2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev *hdev, int event)
55 hci_sock_dev_event(hdev, event);
58 /* ---- HCI debugfs entries ---- */
60 static int inquiry_cache_show(struct seq_file *f, void *p)
62 struct hci_dev *hdev = f->private;
63 struct discovery_state *cache = &hdev->discovery;
64 struct inquiry_entry *e;
68 list_for_each_entry(e, &cache->all, all) {
69 struct inquiry_data *data = &e->data;
70 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
72 data->pscan_rep_mode, data->pscan_period_mode,
73 data->pscan_mode, data->dev_class[2],
74 data->dev_class[1], data->dev_class[0],
75 __le16_to_cpu(data->clock_offset),
76 data->rssi, data->ssp_mode, e->timestamp);
84 static int inquiry_cache_open(struct inode *inode, struct file *file)
86 return single_open(file, inquiry_cache_show, inode->i_private);
89 static const struct file_operations inquiry_cache_fops = {
90 .open = inquiry_cache_open,
93 .release = single_release,
96 static int voice_setting_get(void *data, u64 *val)
98 struct hci_dev *hdev = data;
101 *val = hdev->voice_setting;
102 hci_dev_unlock(hdev);
107 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
108 NULL, "0x%4.4llx\n");
110 static int auto_accept_delay_set(void *data, u64 val)
112 struct hci_dev *hdev = data;
115 hdev->auto_accept_delay = val;
116 hci_dev_unlock(hdev);
121 static int auto_accept_delay_get(void *data, u64 *val)
123 struct hci_dev *hdev = data;
126 *val = hdev->auto_accept_delay;
127 hci_dev_unlock(hdev);
132 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
133 auto_accept_delay_set, "%llu\n");
135 static int static_address_show(struct seq_file *f, void *p)
137 struct hci_dev *hdev = f->private;
140 seq_printf(f, "%pMR\n", &hdev->static_addr);
141 hci_dev_unlock(hdev);
146 static int static_address_open(struct inode *inode, struct file *file)
148 return single_open(file, static_address_show, inode->i_private);
151 static const struct file_operations static_address_fops = {
152 .open = static_address_open,
155 .release = single_release,
158 /* ---- HCI requests ---- */
160 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
162 BT_DBG("%s result 0x%2.2x", hdev->name, result);
164 if (hdev->req_status == HCI_REQ_PEND) {
165 hdev->req_result = result;
166 hdev->req_status = HCI_REQ_DONE;
167 wake_up_interruptible(&hdev->req_wait_q);
171 static void hci_req_cancel(struct hci_dev *hdev, int err)
173 BT_DBG("%s err 0x%2.2x", hdev->name, err);
175 if (hdev->req_status == HCI_REQ_PEND) {
176 hdev->req_result = err;
177 hdev->req_status = HCI_REQ_CANCELED;
178 wake_up_interruptible(&hdev->req_wait_q);
182 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
185 struct hci_ev_cmd_complete *ev;
186 struct hci_event_hdr *hdr;
191 skb = hdev->recv_evt;
192 hdev->recv_evt = NULL;
194 hci_dev_unlock(hdev);
197 return ERR_PTR(-ENODATA);
199 if (skb->len < sizeof(*hdr)) {
200 BT_ERR("Too short HCI event");
204 hdr = (void *) skb->data;
205 skb_pull(skb, HCI_EVENT_HDR_SIZE);
208 if (hdr->evt != event)
213 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
214 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
218 if (skb->len < sizeof(*ev)) {
219 BT_ERR("Too short cmd_complete event");
223 ev = (void *) skb->data;
224 skb_pull(skb, sizeof(*ev));
226 if (opcode == __le16_to_cpu(ev->opcode))
229 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
230 __le16_to_cpu(ev->opcode));
234 return ERR_PTR(-ENODATA);
237 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
238 const void *param, u8 event, u32 timeout)
240 DECLARE_WAITQUEUE(wait, current);
241 struct hci_request req;
244 BT_DBG("%s", hdev->name);
246 hci_req_init(&req, hdev);
248 hci_req_add_ev(&req, opcode, plen, param, event);
250 hdev->req_status = HCI_REQ_PEND;
252 err = hci_req_run(&req, hci_req_sync_complete);
256 add_wait_queue(&hdev->req_wait_q, &wait);
257 set_current_state(TASK_INTERRUPTIBLE);
259 schedule_timeout(timeout);
261 remove_wait_queue(&hdev->req_wait_q, &wait);
263 if (signal_pending(current))
264 return ERR_PTR(-EINTR);
266 switch (hdev->req_status) {
268 err = -bt_to_errno(hdev->req_result);
271 case HCI_REQ_CANCELED:
272 err = -hdev->req_result;
280 hdev->req_status = hdev->req_result = 0;
282 BT_DBG("%s end: err %d", hdev->name, err);
287 return hci_get_cmd_complete(hdev, opcode, event);
289 EXPORT_SYMBOL(__hci_cmd_sync_ev);
291 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
292 const void *param, u32 timeout)
294 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
296 EXPORT_SYMBOL(__hci_cmd_sync);
298 /* Execute request and wait for completion. */
299 static int __hci_req_sync(struct hci_dev *hdev,
300 void (*func)(struct hci_request *req,
302 unsigned long opt, __u32 timeout)
304 struct hci_request req;
305 DECLARE_WAITQUEUE(wait, current);
308 BT_DBG("%s start", hdev->name);
310 hci_req_init(&req, hdev);
312 hdev->req_status = HCI_REQ_PEND;
316 err = hci_req_run(&req, hci_req_sync_complete);
318 hdev->req_status = 0;
320 /* ENODATA means the HCI request command queue is empty.
321 * This can happen when a request with conditionals doesn't
322 * trigger any commands to be sent. This is normal behavior
323 * and should not trigger an error return.
331 add_wait_queue(&hdev->req_wait_q, &wait);
332 set_current_state(TASK_INTERRUPTIBLE);
334 schedule_timeout(timeout);
336 remove_wait_queue(&hdev->req_wait_q, &wait);
338 if (signal_pending(current))
341 switch (hdev->req_status) {
343 err = -bt_to_errno(hdev->req_result);
346 case HCI_REQ_CANCELED:
347 err = -hdev->req_result;
355 hdev->req_status = hdev->req_result = 0;
357 BT_DBG("%s end: err %d", hdev->name, err);
362 static int hci_req_sync(struct hci_dev *hdev,
363 void (*req)(struct hci_request *req,
365 unsigned long opt, __u32 timeout)
369 if (!test_bit(HCI_UP, &hdev->flags))
372 /* Serialize all requests */
374 ret = __hci_req_sync(hdev, req, opt, timeout);
375 hci_req_unlock(hdev);
380 static void hci_reset_req(struct hci_request *req, unsigned long opt)
382 BT_DBG("%s %ld", req->hdev->name, opt);
385 set_bit(HCI_RESET, &req->hdev->flags);
386 hci_req_add(req, HCI_OP_RESET, 0, NULL);
389 static void bredr_init(struct hci_request *req)
391 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
393 /* Read Local Supported Features */
394 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
396 /* Read Local Version */
397 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
399 /* Read BD Address */
400 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
403 static void amp_init(struct hci_request *req)
405 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
407 /* Read Local Version */
408 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
410 /* Read Local Supported Commands */
411 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
413 /* Read Local Supported Features */
414 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
416 /* Read Local AMP Info */
417 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
419 /* Read Data Blk size */
420 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
422 /* Read Flow Control Mode */
423 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
425 /* Read Location Data */
426 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
429 static void hci_init1_req(struct hci_request *req, unsigned long opt)
431 struct hci_dev *hdev = req->hdev;
433 BT_DBG("%s %ld", hdev->name, opt);
436 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
437 hci_reset_req(req, 0);
439 switch (hdev->dev_type) {
449 BT_ERR("Unknown device type %d", hdev->dev_type);
454 static void bredr_setup(struct hci_request *req)
456 struct hci_dev *hdev = req->hdev;
461 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
462 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
464 /* Read Class of Device */
465 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
467 /* Read Local Name */
468 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
470 /* Read Voice Setting */
471 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
473 /* Read Number of Supported IAC */
474 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
476 /* Read Current IAC LAP */
477 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
479 /* Clear Event Filters */
480 flt_type = HCI_FLT_CLEAR_ALL;
481 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
483 /* Connection accept timeout ~20 secs */
484 param = __constant_cpu_to_le16(0x7d00);
485 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
487 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
488 * but it does not support page scan related HCI commands.
490 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
491 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
492 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
496 static void le_setup(struct hci_request *req)
498 struct hci_dev *hdev = req->hdev;
500 /* Read LE Buffer Size */
501 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
503 /* Read LE Local Supported Features */
504 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
506 /* Read LE Advertising Channel TX Power */
507 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
509 /* Read LE White List Size */
510 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
512 /* Read LE Supported States */
513 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
515 /* LE-only controllers have LE implicitly enabled */
516 if (!lmp_bredr_capable(hdev))
517 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
520 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
522 if (lmp_ext_inq_capable(hdev))
525 if (lmp_inq_rssi_capable(hdev))
528 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
529 hdev->lmp_subver == 0x0757)
532 if (hdev->manufacturer == 15) {
533 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
535 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
537 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
541 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
542 hdev->lmp_subver == 0x1805)
548 static void hci_setup_inquiry_mode(struct hci_request *req)
552 mode = hci_get_inquiry_mode(req->hdev);
554 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
557 static void hci_setup_event_mask(struct hci_request *req)
559 struct hci_dev *hdev = req->hdev;
561 /* The second byte is 0xff instead of 0x9f (two reserved bits
562 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
565 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
567 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
568 * any event mask for pre 1.2 devices.
570 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
573 if (lmp_bredr_capable(hdev)) {
574 events[4] |= 0x01; /* Flow Specification Complete */
575 events[4] |= 0x02; /* Inquiry Result with RSSI */
576 events[4] |= 0x04; /* Read Remote Extended Features Complete */
577 events[5] |= 0x08; /* Synchronous Connection Complete */
578 events[5] |= 0x10; /* Synchronous Connection Changed */
580 /* Use a different default for LE-only devices */
581 memset(events, 0, sizeof(events));
582 events[0] |= 0x10; /* Disconnection Complete */
583 events[0] |= 0x80; /* Encryption Change */
584 events[1] |= 0x08; /* Read Remote Version Information Complete */
585 events[1] |= 0x20; /* Command Complete */
586 events[1] |= 0x40; /* Command Status */
587 events[1] |= 0x80; /* Hardware Error */
588 events[2] |= 0x04; /* Number of Completed Packets */
589 events[3] |= 0x02; /* Data Buffer Overflow */
590 events[5] |= 0x80; /* Encryption Key Refresh Complete */
593 if (lmp_inq_rssi_capable(hdev))
594 events[4] |= 0x02; /* Inquiry Result with RSSI */
596 if (lmp_sniffsubr_capable(hdev))
597 events[5] |= 0x20; /* Sniff Subrating */
599 if (lmp_pause_enc_capable(hdev))
600 events[5] |= 0x80; /* Encryption Key Refresh Complete */
602 if (lmp_ext_inq_capable(hdev))
603 events[5] |= 0x40; /* Extended Inquiry Result */
605 if (lmp_no_flush_capable(hdev))
606 events[7] |= 0x01; /* Enhanced Flush Complete */
608 if (lmp_lsto_capable(hdev))
609 events[6] |= 0x80; /* Link Supervision Timeout Changed */
611 if (lmp_ssp_capable(hdev)) {
612 events[6] |= 0x01; /* IO Capability Request */
613 events[6] |= 0x02; /* IO Capability Response */
614 events[6] |= 0x04; /* User Confirmation Request */
615 events[6] |= 0x08; /* User Passkey Request */
616 events[6] |= 0x10; /* Remote OOB Data Request */
617 events[6] |= 0x20; /* Simple Pairing Complete */
618 events[7] |= 0x04; /* User Passkey Notification */
619 events[7] |= 0x08; /* Keypress Notification */
620 events[7] |= 0x10; /* Remote Host Supported
621 * Features Notification
625 if (lmp_le_capable(hdev))
626 events[7] |= 0x20; /* LE Meta-Event */
628 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
630 if (lmp_le_capable(hdev)) {
631 memset(events, 0, sizeof(events));
633 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
634 sizeof(events), events);
638 static void hci_init2_req(struct hci_request *req, unsigned long opt)
640 struct hci_dev *hdev = req->hdev;
642 if (lmp_bredr_capable(hdev))
645 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
647 if (lmp_le_capable(hdev))
650 hci_setup_event_mask(req);
652 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
653 * local supported commands HCI command.
655 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
656 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
658 if (lmp_ssp_capable(hdev)) {
659 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
661 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
662 sizeof(mode), &mode);
664 struct hci_cp_write_eir cp;
666 memset(hdev->eir, 0, sizeof(hdev->eir));
667 memset(&cp, 0, sizeof(cp));
669 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
673 if (lmp_inq_rssi_capable(hdev))
674 hci_setup_inquiry_mode(req);
676 if (lmp_inq_tx_pwr_capable(hdev))
677 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
679 if (lmp_ext_feat_capable(hdev)) {
680 struct hci_cp_read_local_ext_features cp;
683 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
687 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
689 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
694 static void hci_setup_link_policy(struct hci_request *req)
696 struct hci_dev *hdev = req->hdev;
697 struct hci_cp_write_def_link_policy cp;
700 if (lmp_rswitch_capable(hdev))
701 link_policy |= HCI_LP_RSWITCH;
702 if (lmp_hold_capable(hdev))
703 link_policy |= HCI_LP_HOLD;
704 if (lmp_sniff_capable(hdev))
705 link_policy |= HCI_LP_SNIFF;
706 if (lmp_park_capable(hdev))
707 link_policy |= HCI_LP_PARK;
709 cp.policy = cpu_to_le16(link_policy);
710 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
713 static void hci_set_le_support(struct hci_request *req)
715 struct hci_dev *hdev = req->hdev;
716 struct hci_cp_write_le_host_supported cp;
718 /* LE-only devices do not support explicit enablement */
719 if (!lmp_bredr_capable(hdev))
722 memset(&cp, 0, sizeof(cp));
724 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
726 cp.simul = lmp_le_br_capable(hdev);
729 if (cp.le != lmp_host_le_capable(hdev))
730 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
734 static void hci_set_event_mask_page_2(struct hci_request *req)
736 struct hci_dev *hdev = req->hdev;
737 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
739 /* If Connectionless Slave Broadcast master role is supported
740 * enable all necessary events for it.
742 if (hdev->features[2][0] & 0x01) {
743 events[1] |= 0x40; /* Triggered Clock Capture */
744 events[1] |= 0x80; /* Synchronization Train Complete */
745 events[2] |= 0x10; /* Slave Page Response Timeout */
746 events[2] |= 0x20; /* CSB Channel Map Change */
749 /* If Connectionless Slave Broadcast slave role is supported
750 * enable all necessary events for it.
752 if (hdev->features[2][0] & 0x02) {
753 events[2] |= 0x01; /* Synchronization Train Received */
754 events[2] |= 0x02; /* CSB Receive */
755 events[2] |= 0x04; /* CSB Timeout */
756 events[2] |= 0x08; /* Truncated Page Complete */
759 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
762 static void hci_init3_req(struct hci_request *req, unsigned long opt)
764 struct hci_dev *hdev = req->hdev;
767 /* Some Broadcom based Bluetooth controllers do not support the
768 * Delete Stored Link Key command. They are clearly indicating its
769 * absence in the bit mask of supported commands.
771 * Check the supported commands and only if the the command is marked
772 * as supported send it. If not supported assume that the controller
773 * does not have actual support for stored link keys which makes this
774 * command redundant anyway.
776 if (hdev->commands[6] & 0x80) {
777 struct hci_cp_delete_stored_link_key cp;
779 bacpy(&cp.bdaddr, BDADDR_ANY);
780 cp.delete_all = 0x01;
781 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
785 if (hdev->commands[5] & 0x10)
786 hci_setup_link_policy(req);
788 if (lmp_le_capable(hdev))
789 hci_set_le_support(req);
791 /* Read features beyond page 1 if available */
792 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
793 struct hci_cp_read_local_ext_features cp;
796 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
801 static void hci_init4_req(struct hci_request *req, unsigned long opt)
803 struct hci_dev *hdev = req->hdev;
805 /* Set event mask page 2 if the HCI command for it is supported */
806 if (hdev->commands[22] & 0x04)
807 hci_set_event_mask_page_2(req);
809 /* Check for Synchronization Train support */
810 if (hdev->features[2][0] & 0x04)
811 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
814 static int __hci_init(struct hci_dev *hdev)
818 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
822 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
823 * BR/EDR/LE type controllers. AMP controllers only need the
826 if (hdev->dev_type != HCI_BREDR)
829 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
833 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
837 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
841 /* Only create debugfs entries during the initial setup
842 * phase and not every time the controller gets powered on.
844 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
847 if (lmp_bredr_capable(hdev)) {
848 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
849 hdev, &inquiry_cache_fops);
850 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
851 hdev, &voice_setting_fops);
854 if (lmp_ssp_capable(hdev))
855 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
856 hdev, &auto_accept_delay_fops);
858 if (lmp_le_capable(hdev))
859 debugfs_create_file("static_address", 0444, hdev->debugfs,
860 hdev, &static_address_fops);
865 static void hci_scan_req(struct hci_request *req, unsigned long opt)
869 BT_DBG("%s %x", req->hdev->name, scan);
871 /* Inquiry and Page scans */
872 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
875 static void hci_auth_req(struct hci_request *req, unsigned long opt)
879 BT_DBG("%s %x", req->hdev->name, auth);
882 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
885 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
889 BT_DBG("%s %x", req->hdev->name, encrypt);
892 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
895 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
897 __le16 policy = cpu_to_le16(opt);
899 BT_DBG("%s %x", req->hdev->name, policy);
901 /* Default link policy */
902 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
905 /* Get HCI device by index.
906 * Device is held on return. */
907 struct hci_dev *hci_dev_get(int index)
909 struct hci_dev *hdev = NULL, *d;
916 read_lock(&hci_dev_list_lock);
917 list_for_each_entry(d, &hci_dev_list, list) {
918 if (d->id == index) {
919 hdev = hci_dev_hold(d);
923 read_unlock(&hci_dev_list_lock);
927 /* ---- Inquiry support ---- */
929 bool hci_discovery_active(struct hci_dev *hdev)
931 struct discovery_state *discov = &hdev->discovery;
933 switch (discov->state) {
934 case DISCOVERY_FINDING:
935 case DISCOVERY_RESOLVING:
943 void hci_discovery_set_state(struct hci_dev *hdev, int state)
945 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
947 if (hdev->discovery.state == state)
951 case DISCOVERY_STOPPED:
952 if (hdev->discovery.state != DISCOVERY_STARTING)
953 mgmt_discovering(hdev, 0);
955 case DISCOVERY_STARTING:
957 case DISCOVERY_FINDING:
958 mgmt_discovering(hdev, 1);
960 case DISCOVERY_RESOLVING:
962 case DISCOVERY_STOPPING:
966 hdev->discovery.state = state;
969 void hci_inquiry_cache_flush(struct hci_dev *hdev)
971 struct discovery_state *cache = &hdev->discovery;
972 struct inquiry_entry *p, *n;
974 list_for_each_entry_safe(p, n, &cache->all, all) {
979 INIT_LIST_HEAD(&cache->unknown);
980 INIT_LIST_HEAD(&cache->resolve);
983 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
986 struct discovery_state *cache = &hdev->discovery;
987 struct inquiry_entry *e;
989 BT_DBG("cache %p, %pMR", cache, bdaddr);
991 list_for_each_entry(e, &cache->all, all) {
992 if (!bacmp(&e->data.bdaddr, bdaddr))
999 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1002 struct discovery_state *cache = &hdev->discovery;
1003 struct inquiry_entry *e;
1005 BT_DBG("cache %p, %pMR", cache, bdaddr);
1007 list_for_each_entry(e, &cache->unknown, list) {
1008 if (!bacmp(&e->data.bdaddr, bdaddr))
1015 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1019 struct discovery_state *cache = &hdev->discovery;
1020 struct inquiry_entry *e;
1022 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1024 list_for_each_entry(e, &cache->resolve, list) {
1025 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1027 if (!bacmp(&e->data.bdaddr, bdaddr))
1034 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1035 struct inquiry_entry *ie)
1037 struct discovery_state *cache = &hdev->discovery;
1038 struct list_head *pos = &cache->resolve;
1039 struct inquiry_entry *p;
1041 list_del(&ie->list);
1043 list_for_each_entry(p, &cache->resolve, list) {
1044 if (p->name_state != NAME_PENDING &&
1045 abs(p->data.rssi) >= abs(ie->data.rssi))
1050 list_add(&ie->list, pos);
1053 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1054 bool name_known, bool *ssp)
1056 struct discovery_state *cache = &hdev->discovery;
1057 struct inquiry_entry *ie;
1059 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1061 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1064 *ssp = data->ssp_mode;
1066 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1068 if (ie->data.ssp_mode && ssp)
1071 if (ie->name_state == NAME_NEEDED &&
1072 data->rssi != ie->data.rssi) {
1073 ie->data.rssi = data->rssi;
1074 hci_inquiry_cache_update_resolve(hdev, ie);
1080 /* Entry not in the cache. Add new one. */
1081 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1085 list_add(&ie->all, &cache->all);
1088 ie->name_state = NAME_KNOWN;
1090 ie->name_state = NAME_NOT_KNOWN;
1091 list_add(&ie->list, &cache->unknown);
1095 if (name_known && ie->name_state != NAME_KNOWN &&
1096 ie->name_state != NAME_PENDING) {
1097 ie->name_state = NAME_KNOWN;
1098 list_del(&ie->list);
1101 memcpy(&ie->data, data, sizeof(*data));
1102 ie->timestamp = jiffies;
1103 cache->timestamp = jiffies;
1105 if (ie->name_state == NAME_NOT_KNOWN)
1111 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1113 struct discovery_state *cache = &hdev->discovery;
1114 struct inquiry_info *info = (struct inquiry_info *) buf;
1115 struct inquiry_entry *e;
1118 list_for_each_entry(e, &cache->all, all) {
1119 struct inquiry_data *data = &e->data;
1124 bacpy(&info->bdaddr, &data->bdaddr);
1125 info->pscan_rep_mode = data->pscan_rep_mode;
1126 info->pscan_period_mode = data->pscan_period_mode;
1127 info->pscan_mode = data->pscan_mode;
1128 memcpy(info->dev_class, data->dev_class, 3);
1129 info->clock_offset = data->clock_offset;
1135 BT_DBG("cache %p, copied %d", cache, copied);
1139 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1141 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1142 struct hci_dev *hdev = req->hdev;
1143 struct hci_cp_inquiry cp;
1145 BT_DBG("%s", hdev->name);
1147 if (test_bit(HCI_INQUIRY, &hdev->flags))
1151 memcpy(&cp.lap, &ir->lap, 3);
1152 cp.length = ir->length;
1153 cp.num_rsp = ir->num_rsp;
1154 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1157 static int wait_inquiry(void *word)
1160 return signal_pending(current);
1163 int hci_inquiry(void __user *arg)
1165 __u8 __user *ptr = arg;
1166 struct hci_inquiry_req ir;
1167 struct hci_dev *hdev;
1168 int err = 0, do_inquiry = 0, max_rsp;
1172 if (copy_from_user(&ir, ptr, sizeof(ir)))
1175 hdev = hci_dev_get(ir.dev_id);
1179 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1184 if (hdev->dev_type != HCI_BREDR) {
1189 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1195 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1196 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1197 hci_inquiry_cache_flush(hdev);
1200 hci_dev_unlock(hdev);
1202 timeo = ir.length * msecs_to_jiffies(2000);
1205 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1210 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1211 * cleared). If it is interrupted by a signal, return -EINTR.
1213 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1214 TASK_INTERRUPTIBLE))
1218 /* for unlimited number of responses we will use buffer with
1221 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1223 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1224 * copy it to the user space.
1226 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1233 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1234 hci_dev_unlock(hdev);
1236 BT_DBG("num_rsp %d", ir.num_rsp);
1238 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1240 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1253 static int hci_dev_do_open(struct hci_dev *hdev)
1257 BT_DBG("%s %p", hdev->name, hdev);
1261 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1266 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1267 /* Check for rfkill but allow the HCI setup stage to
1268 * proceed (which in itself doesn't cause any RF activity).
1270 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1275 /* Check for valid public address or a configured static
1276 * random adddress, but let the HCI setup proceed to
1277 * be able to determine if there is a public address
1280 * This check is only valid for BR/EDR controllers
1281 * since AMP controllers do not have an address.
1283 if (hdev->dev_type == HCI_BREDR &&
1284 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1285 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1286 ret = -EADDRNOTAVAIL;
1291 if (test_bit(HCI_UP, &hdev->flags)) {
1296 if (hdev->open(hdev)) {
1301 atomic_set(&hdev->cmd_cnt, 1);
1302 set_bit(HCI_INIT, &hdev->flags);
1304 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1305 ret = hdev->setup(hdev);
1308 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1309 set_bit(HCI_RAW, &hdev->flags);
1311 if (!test_bit(HCI_RAW, &hdev->flags) &&
1312 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1313 ret = __hci_init(hdev);
1316 clear_bit(HCI_INIT, &hdev->flags);
1320 set_bit(HCI_UP, &hdev->flags);
1321 hci_notify(hdev, HCI_DEV_UP);
1322 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1323 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1324 hdev->dev_type == HCI_BREDR) {
1326 mgmt_powered(hdev, 1);
1327 hci_dev_unlock(hdev);
1330 /* Init failed, cleanup */
1331 flush_work(&hdev->tx_work);
1332 flush_work(&hdev->cmd_work);
1333 flush_work(&hdev->rx_work);
1335 skb_queue_purge(&hdev->cmd_q);
1336 skb_queue_purge(&hdev->rx_q);
1341 if (hdev->sent_cmd) {
1342 kfree_skb(hdev->sent_cmd);
1343 hdev->sent_cmd = NULL;
1351 hci_req_unlock(hdev);
1355 /* ---- HCI ioctl helpers ---- */
1357 int hci_dev_open(__u16 dev)
1359 struct hci_dev *hdev;
1362 hdev = hci_dev_get(dev);
1366 /* We need to ensure that no other power on/off work is pending
1367 * before proceeding to call hci_dev_do_open. This is
1368 * particularly important if the setup procedure has not yet
1371 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1372 cancel_delayed_work(&hdev->power_off);
1374 /* After this call it is guaranteed that the setup procedure
1375 * has finished. This means that error conditions like RFKILL
1376 * or no valid public or static random address apply.
1378 flush_workqueue(hdev->req_workqueue);
1380 err = hci_dev_do_open(hdev);
1387 static int hci_dev_do_close(struct hci_dev *hdev)
1389 BT_DBG("%s %p", hdev->name, hdev);
1391 cancel_delayed_work(&hdev->power_off);
1393 hci_req_cancel(hdev, ENODEV);
1396 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1397 del_timer_sync(&hdev->cmd_timer);
1398 hci_req_unlock(hdev);
1402 /* Flush RX and TX works */
1403 flush_work(&hdev->tx_work);
1404 flush_work(&hdev->rx_work);
1406 if (hdev->discov_timeout > 0) {
1407 cancel_delayed_work(&hdev->discov_off);
1408 hdev->discov_timeout = 0;
1409 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1410 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1413 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1414 cancel_delayed_work(&hdev->service_cache);
1416 cancel_delayed_work_sync(&hdev->le_scan_disable);
1419 hci_inquiry_cache_flush(hdev);
1420 hci_conn_hash_flush(hdev);
1421 hci_dev_unlock(hdev);
1423 hci_notify(hdev, HCI_DEV_DOWN);
1429 skb_queue_purge(&hdev->cmd_q);
1430 atomic_set(&hdev->cmd_cnt, 1);
1431 if (!test_bit(HCI_RAW, &hdev->flags) &&
1432 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1433 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1434 set_bit(HCI_INIT, &hdev->flags);
1435 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1436 clear_bit(HCI_INIT, &hdev->flags);
1439 /* flush cmd work */
1440 flush_work(&hdev->cmd_work);
1443 skb_queue_purge(&hdev->rx_q);
1444 skb_queue_purge(&hdev->cmd_q);
1445 skb_queue_purge(&hdev->raw_q);
1447 /* Drop last sent command */
1448 if (hdev->sent_cmd) {
1449 del_timer_sync(&hdev->cmd_timer);
1450 kfree_skb(hdev->sent_cmd);
1451 hdev->sent_cmd = NULL;
1454 kfree_skb(hdev->recv_evt);
1455 hdev->recv_evt = NULL;
1457 /* After this point our queues are empty
1458 * and no tasks are scheduled. */
1463 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1465 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1466 if (hdev->dev_type == HCI_BREDR) {
1468 mgmt_powered(hdev, 0);
1469 hci_dev_unlock(hdev);
1473 /* Controller radio is available but is currently powered down */
1474 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1476 memset(hdev->eir, 0, sizeof(hdev->eir));
1477 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1479 hci_req_unlock(hdev);
1485 int hci_dev_close(__u16 dev)
1487 struct hci_dev *hdev;
1490 hdev = hci_dev_get(dev);
1494 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1499 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1500 cancel_delayed_work(&hdev->power_off);
1502 err = hci_dev_do_close(hdev);
1509 int hci_dev_reset(__u16 dev)
1511 struct hci_dev *hdev;
1514 hdev = hci_dev_get(dev);
1520 if (!test_bit(HCI_UP, &hdev->flags)) {
1525 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1531 skb_queue_purge(&hdev->rx_q);
1532 skb_queue_purge(&hdev->cmd_q);
1535 hci_inquiry_cache_flush(hdev);
1536 hci_conn_hash_flush(hdev);
1537 hci_dev_unlock(hdev);
1542 atomic_set(&hdev->cmd_cnt, 1);
1543 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1545 if (!test_bit(HCI_RAW, &hdev->flags))
1546 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1549 hci_req_unlock(hdev);
1554 int hci_dev_reset_stat(__u16 dev)
1556 struct hci_dev *hdev;
1559 hdev = hci_dev_get(dev);
1563 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1568 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1575 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1577 struct hci_dev *hdev;
1578 struct hci_dev_req dr;
1581 if (copy_from_user(&dr, arg, sizeof(dr)))
1584 hdev = hci_dev_get(dr.dev_id);
1588 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1593 if (hdev->dev_type != HCI_BREDR) {
1598 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1605 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1610 if (!lmp_encrypt_capable(hdev)) {
1615 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1616 /* Auth must be enabled first */
1617 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1623 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1628 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1633 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1637 case HCISETLINKMODE:
1638 hdev->link_mode = ((__u16) dr.dev_opt) &
1639 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1643 hdev->pkt_type = (__u16) dr.dev_opt;
1647 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1648 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1652 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1653 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1666 int hci_get_dev_list(void __user *arg)
1668 struct hci_dev *hdev;
1669 struct hci_dev_list_req *dl;
1670 struct hci_dev_req *dr;
1671 int n = 0, size, err;
1674 if (get_user(dev_num, (__u16 __user *) arg))
1677 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1680 size = sizeof(*dl) + dev_num * sizeof(*dr);
1682 dl = kzalloc(size, GFP_KERNEL);
1688 read_lock(&hci_dev_list_lock);
1689 list_for_each_entry(hdev, &hci_dev_list, list) {
1690 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1691 cancel_delayed_work(&hdev->power_off);
1693 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1694 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1696 (dr + n)->dev_id = hdev->id;
1697 (dr + n)->dev_opt = hdev->flags;
1702 read_unlock(&hci_dev_list_lock);
1705 size = sizeof(*dl) + n * sizeof(*dr);
1707 err = copy_to_user(arg, dl, size);
1710 return err ? -EFAULT : 0;
1713 int hci_get_dev_info(void __user *arg)
1715 struct hci_dev *hdev;
1716 struct hci_dev_info di;
1719 if (copy_from_user(&di, arg, sizeof(di)))
1722 hdev = hci_dev_get(di.dev_id);
1726 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1727 cancel_delayed_work_sync(&hdev->power_off);
1729 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1730 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1732 strcpy(di.name, hdev->name);
1733 di.bdaddr = hdev->bdaddr;
1734 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1735 di.flags = hdev->flags;
1736 di.pkt_type = hdev->pkt_type;
1737 if (lmp_bredr_capable(hdev)) {
1738 di.acl_mtu = hdev->acl_mtu;
1739 di.acl_pkts = hdev->acl_pkts;
1740 di.sco_mtu = hdev->sco_mtu;
1741 di.sco_pkts = hdev->sco_pkts;
1743 di.acl_mtu = hdev->le_mtu;
1744 di.acl_pkts = hdev->le_pkts;
1748 di.link_policy = hdev->link_policy;
1749 di.link_mode = hdev->link_mode;
1751 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1752 memcpy(&di.features, &hdev->features, sizeof(di.features));
1754 if (copy_to_user(arg, &di, sizeof(di)))
1762 /* ---- Interface to HCI drivers ---- */
1764 static int hci_rfkill_set_block(void *data, bool blocked)
1766 struct hci_dev *hdev = data;
1768 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1770 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1774 set_bit(HCI_RFKILLED, &hdev->dev_flags);
1775 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1776 hci_dev_do_close(hdev);
1778 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1784 static const struct rfkill_ops hci_rfkill_ops = {
1785 .set_block = hci_rfkill_set_block,
1788 static void hci_power_on(struct work_struct *work)
1790 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1793 BT_DBG("%s", hdev->name);
1795 err = hci_dev_do_open(hdev);
1797 mgmt_set_powered_failed(hdev, err);
1801 /* During the HCI setup phase, a few error conditions are
1802 * ignored and they need to be checked now. If they are still
1803 * valid, it is important to turn the device back off.
1805 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1806 (hdev->dev_type == HCI_BREDR &&
1807 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1808 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
1809 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1810 hci_dev_do_close(hdev);
1811 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1812 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1813 HCI_AUTO_OFF_TIMEOUT);
1816 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1817 mgmt_index_added(hdev);
1820 static void hci_power_off(struct work_struct *work)
1822 struct hci_dev *hdev = container_of(work, struct hci_dev,
1825 BT_DBG("%s", hdev->name);
1827 hci_dev_do_close(hdev);
1830 static void hci_discov_off(struct work_struct *work)
1832 struct hci_dev *hdev;
1834 hdev = container_of(work, struct hci_dev, discov_off.work);
1836 BT_DBG("%s", hdev->name);
1838 mgmt_discoverable_timeout(hdev);
1841 int hci_uuids_clear(struct hci_dev *hdev)
1843 struct bt_uuid *uuid, *tmp;
1845 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1846 list_del(&uuid->list);
1853 int hci_link_keys_clear(struct hci_dev *hdev)
1855 struct list_head *p, *n;
1857 list_for_each_safe(p, n, &hdev->link_keys) {
1858 struct link_key *key;
1860 key = list_entry(p, struct link_key, list);
1869 int hci_smp_ltks_clear(struct hci_dev *hdev)
1871 struct smp_ltk *k, *tmp;
1873 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1881 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1885 list_for_each_entry(k, &hdev->link_keys, list)
1886 if (bacmp(bdaddr, &k->bdaddr) == 0)
1892 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1893 u8 key_type, u8 old_key_type)
1896 if (key_type < 0x03)
1899 /* Debug keys are insecure so don't store them persistently */
1900 if (key_type == HCI_LK_DEBUG_COMBINATION)
1903 /* Changed combination key and there's no previous one */
1904 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1907 /* Security mode 3 case */
1911 /* Neither local nor remote side had no-bonding as requirement */
1912 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1915 /* Local side had dedicated bonding as requirement */
1916 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1919 /* Remote side had dedicated bonding as requirement */
1920 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1923 /* If none of the above criteria match, then don't store the key
1928 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1932 list_for_each_entry(k, &hdev->long_term_keys, list) {
1933 if (k->ediv != ediv ||
1934 memcmp(rand, k->rand, sizeof(k->rand)))
1943 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1948 list_for_each_entry(k, &hdev->long_term_keys, list)
1949 if (addr_type == k->bdaddr_type &&
1950 bacmp(bdaddr, &k->bdaddr) == 0)
1956 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1957 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1959 struct link_key *key, *old_key;
1963 old_key = hci_find_link_key(hdev, bdaddr);
1965 old_key_type = old_key->type;
1968 old_key_type = conn ? conn->key_type : 0xff;
1969 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1972 list_add(&key->list, &hdev->link_keys);
1975 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1977 /* Some buggy controller combinations generate a changed
1978 * combination key for legacy pairing even when there's no
1980 if (type == HCI_LK_CHANGED_COMBINATION &&
1981 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1982 type = HCI_LK_COMBINATION;
1984 conn->key_type = type;
1987 bacpy(&key->bdaddr, bdaddr);
1988 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1989 key->pin_len = pin_len;
1991 if (type == HCI_LK_CHANGED_COMBINATION)
1992 key->type = old_key_type;
1999 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2001 mgmt_new_link_key(hdev, key, persistent);
2004 conn->flush_key = !persistent;
2009 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
2010 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
2013 struct smp_ltk *key, *old_key;
2015 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2018 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2022 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2025 list_add(&key->list, &hdev->long_term_keys);
2028 bacpy(&key->bdaddr, bdaddr);
2029 key->bdaddr_type = addr_type;
2030 memcpy(key->val, tk, sizeof(key->val));
2031 key->authenticated = authenticated;
2033 key->enc_size = enc_size;
2035 memcpy(key->rand, rand, sizeof(key->rand));
2040 if (type & HCI_SMP_LTK)
2041 mgmt_new_ltk(hdev, key, 1);
2046 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2048 struct link_key *key;
2050 key = hci_find_link_key(hdev, bdaddr);
2054 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2056 list_del(&key->list);
2062 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2064 struct smp_ltk *k, *tmp;
2066 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2067 if (bacmp(bdaddr, &k->bdaddr))
2070 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2079 /* HCI command timer function */
2080 static void hci_cmd_timeout(unsigned long arg)
2082 struct hci_dev *hdev = (void *) arg;
2084 if (hdev->sent_cmd) {
2085 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2086 u16 opcode = __le16_to_cpu(sent->opcode);
2088 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2090 BT_ERR("%s command tx timeout", hdev->name);
2093 atomic_set(&hdev->cmd_cnt, 1);
2094 queue_work(hdev->workqueue, &hdev->cmd_work);
2097 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2100 struct oob_data *data;
2102 list_for_each_entry(data, &hdev->remote_oob_data, list)
2103 if (bacmp(bdaddr, &data->bdaddr) == 0)
2109 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2111 struct oob_data *data;
2113 data = hci_find_remote_oob_data(hdev, bdaddr);
2117 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2119 list_del(&data->list);
2125 int hci_remote_oob_data_clear(struct hci_dev *hdev)
2127 struct oob_data *data, *n;
2129 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2130 list_del(&data->list);
2137 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
2140 struct oob_data *data;
2142 data = hci_find_remote_oob_data(hdev, bdaddr);
2145 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2149 bacpy(&data->bdaddr, bdaddr);
2150 list_add(&data->list, &hdev->remote_oob_data);
2153 memcpy(data->hash, hash, sizeof(data->hash));
2154 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2156 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2161 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2162 bdaddr_t *bdaddr, u8 type)
2164 struct bdaddr_list *b;
2166 list_for_each_entry(b, &hdev->blacklist, list) {
2167 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2174 int hci_blacklist_clear(struct hci_dev *hdev)
2176 struct list_head *p, *n;
2178 list_for_each_safe(p, n, &hdev->blacklist) {
2179 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2188 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2190 struct bdaddr_list *entry;
2192 if (!bacmp(bdaddr, BDADDR_ANY))
2195 if (hci_blacklist_lookup(hdev, bdaddr, type))
2198 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
2202 bacpy(&entry->bdaddr, bdaddr);
2203 entry->bdaddr_type = type;
2205 list_add(&entry->list, &hdev->blacklist);
2207 return mgmt_device_blocked(hdev, bdaddr, type);
2210 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2212 struct bdaddr_list *entry;
2214 if (!bacmp(bdaddr, BDADDR_ANY))
2215 return hci_blacklist_clear(hdev);
2217 entry = hci_blacklist_lookup(hdev, bdaddr, type);
2221 list_del(&entry->list);
2224 return mgmt_device_unblocked(hdev, bdaddr, type);
2227 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2230 BT_ERR("Failed to start inquiry: status %d", status);
2233 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2234 hci_dev_unlock(hdev);
2239 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2241 /* General inquiry access code (GIAC) */
2242 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2243 struct hci_request req;
2244 struct hci_cp_inquiry cp;
2248 BT_ERR("Failed to disable LE scanning: status %d", status);
2252 switch (hdev->discovery.type) {
2253 case DISCOV_TYPE_LE:
2255 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2256 hci_dev_unlock(hdev);
2259 case DISCOV_TYPE_INTERLEAVED:
2260 hci_req_init(&req, hdev);
2262 memset(&cp, 0, sizeof(cp));
2263 memcpy(&cp.lap, lap, sizeof(cp.lap));
2264 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2265 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2269 hci_inquiry_cache_flush(hdev);
2271 err = hci_req_run(&req, inquiry_complete);
2273 BT_ERR("Inquiry request failed: err %d", err);
2274 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2277 hci_dev_unlock(hdev);
2282 static void le_scan_disable_work(struct work_struct *work)
2284 struct hci_dev *hdev = container_of(work, struct hci_dev,
2285 le_scan_disable.work);
2286 struct hci_cp_le_set_scan_enable cp;
2287 struct hci_request req;
2290 BT_DBG("%s", hdev->name);
2292 hci_req_init(&req, hdev);
2294 memset(&cp, 0, sizeof(cp));
2295 cp.enable = LE_SCAN_DISABLE;
2296 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2298 err = hci_req_run(&req, le_scan_disable_work_complete);
2300 BT_ERR("Disable LE scanning request failed: err %d", err);
2303 /* Alloc HCI device */
2304 struct hci_dev *hci_alloc_dev(void)
2306 struct hci_dev *hdev;
2308 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2312 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2313 hdev->esco_type = (ESCO_HV1);
2314 hdev->link_mode = (HCI_LM_ACCEPT);
2315 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2316 hdev->io_capability = 0x03; /* No Input No Output */
2317 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2318 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2320 hdev->sniff_max_interval = 800;
2321 hdev->sniff_min_interval = 80;
2323 hdev->le_scan_interval = 0x0060;
2324 hdev->le_scan_window = 0x0030;
2326 mutex_init(&hdev->lock);
2327 mutex_init(&hdev->req_lock);
2329 INIT_LIST_HEAD(&hdev->mgmt_pending);
2330 INIT_LIST_HEAD(&hdev->blacklist);
2331 INIT_LIST_HEAD(&hdev->uuids);
2332 INIT_LIST_HEAD(&hdev->link_keys);
2333 INIT_LIST_HEAD(&hdev->long_term_keys);
2334 INIT_LIST_HEAD(&hdev->remote_oob_data);
2335 INIT_LIST_HEAD(&hdev->conn_hash.list);
2337 INIT_WORK(&hdev->rx_work, hci_rx_work);
2338 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2339 INIT_WORK(&hdev->tx_work, hci_tx_work);
2340 INIT_WORK(&hdev->power_on, hci_power_on);
2342 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2343 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2344 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2346 skb_queue_head_init(&hdev->rx_q);
2347 skb_queue_head_init(&hdev->cmd_q);
2348 skb_queue_head_init(&hdev->raw_q);
2350 init_waitqueue_head(&hdev->req_wait_q);
2352 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2354 hci_init_sysfs(hdev);
2355 discovery_init(hdev);
2359 EXPORT_SYMBOL(hci_alloc_dev);
2361 /* Free HCI device */
2362 void hci_free_dev(struct hci_dev *hdev)
2364 /* will free via device release */
2365 put_device(&hdev->dev);
2367 EXPORT_SYMBOL(hci_free_dev);
2369 /* Register HCI device */
2370 int hci_register_dev(struct hci_dev *hdev)
2374 if (!hdev->open || !hdev->close)
2377 /* Do not allow HCI_AMP devices to register at index 0,
2378 * so the index can be used as the AMP controller ID.
2380 switch (hdev->dev_type) {
2382 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2385 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2394 sprintf(hdev->name, "hci%d", id);
2397 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2399 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2400 WQ_MEM_RECLAIM, 1, hdev->name);
2401 if (!hdev->workqueue) {
2406 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2407 WQ_MEM_RECLAIM, 1, hdev->name);
2408 if (!hdev->req_workqueue) {
2409 destroy_workqueue(hdev->workqueue);
2414 error = hci_add_sysfs(hdev);
2418 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2419 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2422 if (rfkill_register(hdev->rfkill) < 0) {
2423 rfkill_destroy(hdev->rfkill);
2424 hdev->rfkill = NULL;
2428 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2429 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2431 set_bit(HCI_SETUP, &hdev->dev_flags);
2432 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2434 if (hdev->dev_type == HCI_BREDR) {
2435 /* Assume BR/EDR support until proven otherwise (such as
2436 * through reading supported features during init.
2438 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2441 write_lock(&hci_dev_list_lock);
2442 list_add(&hdev->list, &hci_dev_list);
2443 write_unlock(&hci_dev_list_lock);
2445 hci_notify(hdev, HCI_DEV_REG);
2448 queue_work(hdev->req_workqueue, &hdev->power_on);
2453 destroy_workqueue(hdev->workqueue);
2454 destroy_workqueue(hdev->req_workqueue);
2456 ida_simple_remove(&hci_index_ida, hdev->id);
2460 EXPORT_SYMBOL(hci_register_dev);
2462 /* Unregister HCI device */
2463 void hci_unregister_dev(struct hci_dev *hdev)
2467 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2469 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2473 write_lock(&hci_dev_list_lock);
2474 list_del(&hdev->list);
2475 write_unlock(&hci_dev_list_lock);
2477 hci_dev_do_close(hdev);
2479 for (i = 0; i < NUM_REASSEMBLY; i++)
2480 kfree_skb(hdev->reassembly[i]);
2482 cancel_work_sync(&hdev->power_on);
2484 if (!test_bit(HCI_INIT, &hdev->flags) &&
2485 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2487 mgmt_index_removed(hdev);
2488 hci_dev_unlock(hdev);
2491 /* mgmt_index_removed should take care of emptying the
2493 BUG_ON(!list_empty(&hdev->mgmt_pending));
2495 hci_notify(hdev, HCI_DEV_UNREG);
2498 rfkill_unregister(hdev->rfkill);
2499 rfkill_destroy(hdev->rfkill);
2502 hci_del_sysfs(hdev);
2504 destroy_workqueue(hdev->workqueue);
2505 destroy_workqueue(hdev->req_workqueue);
2508 hci_blacklist_clear(hdev);
2509 hci_uuids_clear(hdev);
2510 hci_link_keys_clear(hdev);
2511 hci_smp_ltks_clear(hdev);
2512 hci_remote_oob_data_clear(hdev);
2513 hci_dev_unlock(hdev);
2517 ida_simple_remove(&hci_index_ida, id);
2519 EXPORT_SYMBOL(hci_unregister_dev);
2521 /* Suspend HCI device */
2522 int hci_suspend_dev(struct hci_dev *hdev)
2524 hci_notify(hdev, HCI_DEV_SUSPEND);
2527 EXPORT_SYMBOL(hci_suspend_dev);
2529 /* Resume HCI device */
2530 int hci_resume_dev(struct hci_dev *hdev)
2532 hci_notify(hdev, HCI_DEV_RESUME);
2535 EXPORT_SYMBOL(hci_resume_dev);
2537 /* Receive frame from HCI drivers */
2538 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2540 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2541 && !test_bit(HCI_INIT, &hdev->flags))) {
2547 bt_cb(skb)->incoming = 1;
2550 __net_timestamp(skb);
2552 skb_queue_tail(&hdev->rx_q, skb);
2553 queue_work(hdev->workqueue, &hdev->rx_work);
2557 EXPORT_SYMBOL(hci_recv_frame);
2559 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2560 int count, __u8 index)
2565 struct sk_buff *skb;
2566 struct bt_skb_cb *scb;
2568 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2569 index >= NUM_REASSEMBLY)
2572 skb = hdev->reassembly[index];
2576 case HCI_ACLDATA_PKT:
2577 len = HCI_MAX_FRAME_SIZE;
2578 hlen = HCI_ACL_HDR_SIZE;
2581 len = HCI_MAX_EVENT_SIZE;
2582 hlen = HCI_EVENT_HDR_SIZE;
2584 case HCI_SCODATA_PKT:
2585 len = HCI_MAX_SCO_SIZE;
2586 hlen = HCI_SCO_HDR_SIZE;
2590 skb = bt_skb_alloc(len, GFP_ATOMIC);
2594 scb = (void *) skb->cb;
2596 scb->pkt_type = type;
2598 hdev->reassembly[index] = skb;
2602 scb = (void *) skb->cb;
2603 len = min_t(uint, scb->expect, count);
2605 memcpy(skb_put(skb, len), data, len);
2614 if (skb->len == HCI_EVENT_HDR_SIZE) {
2615 struct hci_event_hdr *h = hci_event_hdr(skb);
2616 scb->expect = h->plen;
2618 if (skb_tailroom(skb) < scb->expect) {
2620 hdev->reassembly[index] = NULL;
2626 case HCI_ACLDATA_PKT:
2627 if (skb->len == HCI_ACL_HDR_SIZE) {
2628 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2629 scb->expect = __le16_to_cpu(h->dlen);
2631 if (skb_tailroom(skb) < scb->expect) {
2633 hdev->reassembly[index] = NULL;
2639 case HCI_SCODATA_PKT:
2640 if (skb->len == HCI_SCO_HDR_SIZE) {
2641 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2642 scb->expect = h->dlen;
2644 if (skb_tailroom(skb) < scb->expect) {
2646 hdev->reassembly[index] = NULL;
2653 if (scb->expect == 0) {
2654 /* Complete frame */
2656 bt_cb(skb)->pkt_type = type;
2657 hci_recv_frame(hdev, skb);
2659 hdev->reassembly[index] = NULL;
2667 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2671 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2675 rem = hci_reassembly(hdev, type, data, count, type - 1);
2679 data += (count - rem);
2685 EXPORT_SYMBOL(hci_recv_fragment);
2687 #define STREAM_REASSEMBLY 0
2689 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2695 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2698 struct { char type; } *pkt;
2700 /* Start of the frame */
2707 type = bt_cb(skb)->pkt_type;
2709 rem = hci_reassembly(hdev, type, data, count,
2714 data += (count - rem);
2720 EXPORT_SYMBOL(hci_recv_stream_fragment);
2722 /* ---- Interface to upper protocols ---- */
2724 int hci_register_cb(struct hci_cb *cb)
2726 BT_DBG("%p name %s", cb, cb->name);
2728 write_lock(&hci_cb_list_lock);
2729 list_add(&cb->list, &hci_cb_list);
2730 write_unlock(&hci_cb_list_lock);
2734 EXPORT_SYMBOL(hci_register_cb);
2736 int hci_unregister_cb(struct hci_cb *cb)
2738 BT_DBG("%p name %s", cb, cb->name);
2740 write_lock(&hci_cb_list_lock);
2741 list_del(&cb->list);
2742 write_unlock(&hci_cb_list_lock);
2746 EXPORT_SYMBOL(hci_unregister_cb);
2748 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
2750 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2753 __net_timestamp(skb);
2755 /* Send copy to monitor */
2756 hci_send_to_monitor(hdev, skb);
2758 if (atomic_read(&hdev->promisc)) {
2759 /* Send copy to the sockets */
2760 hci_send_to_sock(hdev, skb);
2763 /* Get rid of skb owner, prior to sending to the driver. */
2766 if (hdev->send(hdev, skb) < 0)
2767 BT_ERR("%s sending frame failed", hdev->name);
2770 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2772 skb_queue_head_init(&req->cmd_q);
2777 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2779 struct hci_dev *hdev = req->hdev;
2780 struct sk_buff *skb;
2781 unsigned long flags;
2783 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2785 /* If an error occured during request building, remove all HCI
2786 * commands queued on the HCI request queue.
2789 skb_queue_purge(&req->cmd_q);
2793 /* Do not allow empty requests */
2794 if (skb_queue_empty(&req->cmd_q))
2797 skb = skb_peek_tail(&req->cmd_q);
2798 bt_cb(skb)->req.complete = complete;
2800 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2801 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2802 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2804 queue_work(hdev->workqueue, &hdev->cmd_work);
2809 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2810 u32 plen, const void *param)
2812 int len = HCI_COMMAND_HDR_SIZE + plen;
2813 struct hci_command_hdr *hdr;
2814 struct sk_buff *skb;
2816 skb = bt_skb_alloc(len, GFP_ATOMIC);
2820 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2821 hdr->opcode = cpu_to_le16(opcode);
2825 memcpy(skb_put(skb, plen), param, plen);
2827 BT_DBG("skb len %d", skb->len);
2829 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2834 /* Send HCI command */
2835 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2838 struct sk_buff *skb;
2840 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2842 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2844 BT_ERR("%s no memory for command", hdev->name);
2848 /* Stand-alone HCI commands must be flaged as
2849 * single-command requests.
2851 bt_cb(skb)->req.start = true;
2853 skb_queue_tail(&hdev->cmd_q, skb);
2854 queue_work(hdev->workqueue, &hdev->cmd_work);
2859 /* Queue a command to an asynchronous HCI request */
2860 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2861 const void *param, u8 event)
2863 struct hci_dev *hdev = req->hdev;
2864 struct sk_buff *skb;
2866 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2868 /* If an error occured during request building, there is no point in
2869 * queueing the HCI command. We can simply return.
2874 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2876 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2877 hdev->name, opcode);
2882 if (skb_queue_empty(&req->cmd_q))
2883 bt_cb(skb)->req.start = true;
2885 bt_cb(skb)->req.event = event;
2887 skb_queue_tail(&req->cmd_q, skb);
2890 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2893 hci_req_add_ev(req, opcode, plen, param, 0);
2896 /* Get data from the previously sent command */
2897 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2899 struct hci_command_hdr *hdr;
2901 if (!hdev->sent_cmd)
2904 hdr = (void *) hdev->sent_cmd->data;
2906 if (hdr->opcode != cpu_to_le16(opcode))
2909 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2911 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2915 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2917 struct hci_acl_hdr *hdr;
2920 skb_push(skb, HCI_ACL_HDR_SIZE);
2921 skb_reset_transport_header(skb);
2922 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2923 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2924 hdr->dlen = cpu_to_le16(len);
2927 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2928 struct sk_buff *skb, __u16 flags)
2930 struct hci_conn *conn = chan->conn;
2931 struct hci_dev *hdev = conn->hdev;
2932 struct sk_buff *list;
2934 skb->len = skb_headlen(skb);
2937 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2939 switch (hdev->dev_type) {
2941 hci_add_acl_hdr(skb, conn->handle, flags);
2944 hci_add_acl_hdr(skb, chan->handle, flags);
2947 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2951 list = skb_shinfo(skb)->frag_list;
2953 /* Non fragmented */
2954 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2956 skb_queue_tail(queue, skb);
2959 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2961 skb_shinfo(skb)->frag_list = NULL;
2963 /* Queue all fragments atomically */
2964 spin_lock(&queue->lock);
2966 __skb_queue_tail(queue, skb);
2968 flags &= ~ACL_START;
2971 skb = list; list = list->next;
2973 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2974 hci_add_acl_hdr(skb, conn->handle, flags);
2976 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2978 __skb_queue_tail(queue, skb);
2981 spin_unlock(&queue->lock);
2985 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2987 struct hci_dev *hdev = chan->conn->hdev;
2989 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2991 hci_queue_acl(chan, &chan->data_q, skb, flags);
2993 queue_work(hdev->workqueue, &hdev->tx_work);
2997 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2999 struct hci_dev *hdev = conn->hdev;
3000 struct hci_sco_hdr hdr;
3002 BT_DBG("%s len %d", hdev->name, skb->len);
3004 hdr.handle = cpu_to_le16(conn->handle);
3005 hdr.dlen = skb->len;
3007 skb_push(skb, HCI_SCO_HDR_SIZE);
3008 skb_reset_transport_header(skb);
3009 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3011 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3013 skb_queue_tail(&conn->data_q, skb);
3014 queue_work(hdev->workqueue, &hdev->tx_work);
3017 /* ---- HCI TX task (outgoing data) ---- */
3019 /* HCI Connection scheduler */
3020 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3023 struct hci_conn_hash *h = &hdev->conn_hash;
3024 struct hci_conn *conn = NULL, *c;
3025 unsigned int num = 0, min = ~0;
3027 /* We don't have to lock device here. Connections are always
3028 * added and removed with TX task disabled. */
3032 list_for_each_entry_rcu(c, &h->list, list) {
3033 if (c->type != type || skb_queue_empty(&c->data_q))
3036 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3041 if (c->sent < min) {
3046 if (hci_conn_num(hdev, type) == num)
3055 switch (conn->type) {
3057 cnt = hdev->acl_cnt;
3061 cnt = hdev->sco_cnt;
3064 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3068 BT_ERR("Unknown link type");
3076 BT_DBG("conn %p quote %d", conn, *quote);
3080 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3082 struct hci_conn_hash *h = &hdev->conn_hash;
3085 BT_ERR("%s link tx timeout", hdev->name);
3089 /* Kill stalled connections */
3090 list_for_each_entry_rcu(c, &h->list, list) {
3091 if (c->type == type && c->sent) {
3092 BT_ERR("%s killing stalled connection %pMR",
3093 hdev->name, &c->dst);
3094 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3101 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3104 struct hci_conn_hash *h = &hdev->conn_hash;
3105 struct hci_chan *chan = NULL;
3106 unsigned int num = 0, min = ~0, cur_prio = 0;
3107 struct hci_conn *conn;
3108 int cnt, q, conn_num = 0;
3110 BT_DBG("%s", hdev->name);
3114 list_for_each_entry_rcu(conn, &h->list, list) {
3115 struct hci_chan *tmp;
3117 if (conn->type != type)
3120 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3125 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3126 struct sk_buff *skb;
3128 if (skb_queue_empty(&tmp->data_q))
3131 skb = skb_peek(&tmp->data_q);
3132 if (skb->priority < cur_prio)
3135 if (skb->priority > cur_prio) {
3138 cur_prio = skb->priority;
3143 if (conn->sent < min) {
3149 if (hci_conn_num(hdev, type) == conn_num)
3158 switch (chan->conn->type) {
3160 cnt = hdev->acl_cnt;
3163 cnt = hdev->block_cnt;
3167 cnt = hdev->sco_cnt;
3170 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3174 BT_ERR("Unknown link type");
3179 BT_DBG("chan %p quote %d", chan, *quote);
3183 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3185 struct hci_conn_hash *h = &hdev->conn_hash;
3186 struct hci_conn *conn;
3189 BT_DBG("%s", hdev->name);
3193 list_for_each_entry_rcu(conn, &h->list, list) {
3194 struct hci_chan *chan;
3196 if (conn->type != type)
3199 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3204 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3205 struct sk_buff *skb;
3212 if (skb_queue_empty(&chan->data_q))
3215 skb = skb_peek(&chan->data_q);
3216 if (skb->priority >= HCI_PRIO_MAX - 1)
3219 skb->priority = HCI_PRIO_MAX - 1;
3221 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3225 if (hci_conn_num(hdev, type) == num)
3233 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3235 /* Calculate count of blocks used by this packet */
3236 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3239 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3241 if (!test_bit(HCI_RAW, &hdev->flags)) {
3242 /* ACL tx timeout must be longer than maximum
3243 * link supervision timeout (40.9 seconds) */
3244 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3245 HCI_ACL_TX_TIMEOUT))
3246 hci_link_tx_to(hdev, ACL_LINK);
3250 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3252 unsigned int cnt = hdev->acl_cnt;
3253 struct hci_chan *chan;
3254 struct sk_buff *skb;
3257 __check_timeout(hdev, cnt);
3259 while (hdev->acl_cnt &&
3260 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3261 u32 priority = (skb_peek(&chan->data_q))->priority;
3262 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3263 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3264 skb->len, skb->priority);
3266 /* Stop if priority has changed */
3267 if (skb->priority < priority)
3270 skb = skb_dequeue(&chan->data_q);
3272 hci_conn_enter_active_mode(chan->conn,
3273 bt_cb(skb)->force_active);
3275 hci_send_frame(hdev, skb);
3276 hdev->acl_last_tx = jiffies;
3284 if (cnt != hdev->acl_cnt)
3285 hci_prio_recalculate(hdev, ACL_LINK);
3288 static void hci_sched_acl_blk(struct hci_dev *hdev)
3290 unsigned int cnt = hdev->block_cnt;
3291 struct hci_chan *chan;
3292 struct sk_buff *skb;
3296 __check_timeout(hdev, cnt);
3298 BT_DBG("%s", hdev->name);
3300 if (hdev->dev_type == HCI_AMP)
3305 while (hdev->block_cnt > 0 &&
3306 (chan = hci_chan_sent(hdev, type, "e))) {
3307 u32 priority = (skb_peek(&chan->data_q))->priority;
3308 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3311 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3312 skb->len, skb->priority);
3314 /* Stop if priority has changed */
3315 if (skb->priority < priority)
3318 skb = skb_dequeue(&chan->data_q);
3320 blocks = __get_blocks(hdev, skb);
3321 if (blocks > hdev->block_cnt)
3324 hci_conn_enter_active_mode(chan->conn,
3325 bt_cb(skb)->force_active);
3327 hci_send_frame(hdev, skb);
3328 hdev->acl_last_tx = jiffies;
3330 hdev->block_cnt -= blocks;
3333 chan->sent += blocks;
3334 chan->conn->sent += blocks;
3338 if (cnt != hdev->block_cnt)
3339 hci_prio_recalculate(hdev, type);
3342 static void hci_sched_acl(struct hci_dev *hdev)
3344 BT_DBG("%s", hdev->name);
3346 /* No ACL link over BR/EDR controller */
3347 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3350 /* No AMP link over AMP controller */
3351 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3354 switch (hdev->flow_ctl_mode) {
3355 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3356 hci_sched_acl_pkt(hdev);
3359 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3360 hci_sched_acl_blk(hdev);
3366 static void hci_sched_sco(struct hci_dev *hdev)
3368 struct hci_conn *conn;
3369 struct sk_buff *skb;
3372 BT_DBG("%s", hdev->name);
3374 if (!hci_conn_num(hdev, SCO_LINK))
3377 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3378 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3379 BT_DBG("skb %p len %d", skb, skb->len);
3380 hci_send_frame(hdev, skb);
3383 if (conn->sent == ~0)
3389 static void hci_sched_esco(struct hci_dev *hdev)
3391 struct hci_conn *conn;
3392 struct sk_buff *skb;
3395 BT_DBG("%s", hdev->name);
3397 if (!hci_conn_num(hdev, ESCO_LINK))
3400 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3402 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3403 BT_DBG("skb %p len %d", skb, skb->len);
3404 hci_send_frame(hdev, skb);
3407 if (conn->sent == ~0)
3413 static void hci_sched_le(struct hci_dev *hdev)
3415 struct hci_chan *chan;
3416 struct sk_buff *skb;
3417 int quote, cnt, tmp;
3419 BT_DBG("%s", hdev->name);
3421 if (!hci_conn_num(hdev, LE_LINK))
3424 if (!test_bit(HCI_RAW, &hdev->flags)) {
3425 /* LE tx timeout must be longer than maximum
3426 * link supervision timeout (40.9 seconds) */
3427 if (!hdev->le_cnt && hdev->le_pkts &&
3428 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3429 hci_link_tx_to(hdev, LE_LINK);
3432 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3434 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3435 u32 priority = (skb_peek(&chan->data_q))->priority;
3436 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3437 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3438 skb->len, skb->priority);
3440 /* Stop if priority has changed */
3441 if (skb->priority < priority)
3444 skb = skb_dequeue(&chan->data_q);
3446 hci_send_frame(hdev, skb);
3447 hdev->le_last_tx = jiffies;
3458 hdev->acl_cnt = cnt;
3461 hci_prio_recalculate(hdev, LE_LINK);
3464 static void hci_tx_work(struct work_struct *work)
3466 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3467 struct sk_buff *skb;
3469 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3470 hdev->sco_cnt, hdev->le_cnt);
3472 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3473 /* Schedule queues and send stuff to HCI driver */
3474 hci_sched_acl(hdev);
3475 hci_sched_sco(hdev);
3476 hci_sched_esco(hdev);
3480 /* Send next queued raw (unknown type) packet */
3481 while ((skb = skb_dequeue(&hdev->raw_q)))
3482 hci_send_frame(hdev, skb);
3485 /* ----- HCI RX task (incoming data processing) ----- */
3487 /* ACL data packet */
3488 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3490 struct hci_acl_hdr *hdr = (void *) skb->data;
3491 struct hci_conn *conn;
3492 __u16 handle, flags;
3494 skb_pull(skb, HCI_ACL_HDR_SIZE);
3496 handle = __le16_to_cpu(hdr->handle);
3497 flags = hci_flags(handle);
3498 handle = hci_handle(handle);
3500 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3503 hdev->stat.acl_rx++;
3506 conn = hci_conn_hash_lookup_handle(hdev, handle);
3507 hci_dev_unlock(hdev);
3510 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3512 /* Send to upper protocol */
3513 l2cap_recv_acldata(conn, skb, flags);
3516 BT_ERR("%s ACL packet for unknown connection handle %d",
3517 hdev->name, handle);
3523 /* SCO data packet */
3524 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3526 struct hci_sco_hdr *hdr = (void *) skb->data;
3527 struct hci_conn *conn;
3530 skb_pull(skb, HCI_SCO_HDR_SIZE);
3532 handle = __le16_to_cpu(hdr->handle);
3534 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3536 hdev->stat.sco_rx++;
3539 conn = hci_conn_hash_lookup_handle(hdev, handle);
3540 hci_dev_unlock(hdev);
3543 /* Send to upper protocol */
3544 sco_recv_scodata(conn, skb);
3547 BT_ERR("%s SCO packet for unknown connection handle %d",
3548 hdev->name, handle);
3554 static bool hci_req_is_complete(struct hci_dev *hdev)
3556 struct sk_buff *skb;
3558 skb = skb_peek(&hdev->cmd_q);
3562 return bt_cb(skb)->req.start;
3565 static void hci_resend_last(struct hci_dev *hdev)
3567 struct hci_command_hdr *sent;
3568 struct sk_buff *skb;
3571 if (!hdev->sent_cmd)
3574 sent = (void *) hdev->sent_cmd->data;
3575 opcode = __le16_to_cpu(sent->opcode);
3576 if (opcode == HCI_OP_RESET)
3579 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3583 skb_queue_head(&hdev->cmd_q, skb);
3584 queue_work(hdev->workqueue, &hdev->cmd_work);
3587 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3589 hci_req_complete_t req_complete = NULL;
3590 struct sk_buff *skb;
3591 unsigned long flags;
3593 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3595 /* If the completed command doesn't match the last one that was
3596 * sent we need to do special handling of it.
3598 if (!hci_sent_cmd_data(hdev, opcode)) {
3599 /* Some CSR based controllers generate a spontaneous
3600 * reset complete event during init and any pending
3601 * command will never be completed. In such a case we
3602 * need to resend whatever was the last sent
3605 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3606 hci_resend_last(hdev);
3611 /* If the command succeeded and there's still more commands in
3612 * this request the request is not yet complete.
3614 if (!status && !hci_req_is_complete(hdev))
3617 /* If this was the last command in a request the complete
3618 * callback would be found in hdev->sent_cmd instead of the
3619 * command queue (hdev->cmd_q).
3621 if (hdev->sent_cmd) {
3622 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3625 /* We must set the complete callback to NULL to
3626 * avoid calling the callback more than once if
3627 * this function gets called again.
3629 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3635 /* Remove all pending commands belonging to this request */
3636 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3637 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3638 if (bt_cb(skb)->req.start) {
3639 __skb_queue_head(&hdev->cmd_q, skb);
3643 req_complete = bt_cb(skb)->req.complete;
3646 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3650 req_complete(hdev, status);
3653 static void hci_rx_work(struct work_struct *work)
3655 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3656 struct sk_buff *skb;
3658 BT_DBG("%s", hdev->name);
3660 while ((skb = skb_dequeue(&hdev->rx_q))) {
3661 /* Send copy to monitor */
3662 hci_send_to_monitor(hdev, skb);
3664 if (atomic_read(&hdev->promisc)) {
3665 /* Send copy to the sockets */
3666 hci_send_to_sock(hdev, skb);
3669 if (test_bit(HCI_RAW, &hdev->flags) ||
3670 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3675 if (test_bit(HCI_INIT, &hdev->flags)) {
3676 /* Don't process data packets in this states. */
3677 switch (bt_cb(skb)->pkt_type) {
3678 case HCI_ACLDATA_PKT:
3679 case HCI_SCODATA_PKT:
3686 switch (bt_cb(skb)->pkt_type) {
3688 BT_DBG("%s Event packet", hdev->name);
3689 hci_event_packet(hdev, skb);
3692 case HCI_ACLDATA_PKT:
3693 BT_DBG("%s ACL data packet", hdev->name);
3694 hci_acldata_packet(hdev, skb);
3697 case HCI_SCODATA_PKT:
3698 BT_DBG("%s SCO data packet", hdev->name);
3699 hci_scodata_packet(hdev, skb);
3709 static void hci_cmd_work(struct work_struct *work)
3711 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3712 struct sk_buff *skb;
3714 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3715 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3717 /* Send queued commands */
3718 if (atomic_read(&hdev->cmd_cnt)) {
3719 skb = skb_dequeue(&hdev->cmd_q);
3723 kfree_skb(hdev->sent_cmd);
3725 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
3726 if (hdev->sent_cmd) {
3727 atomic_dec(&hdev->cmd_cnt);
3728 hci_send_frame(hdev, skb);
3729 if (test_bit(HCI_RESET, &hdev->flags))
3730 del_timer(&hdev->cmd_timer);
3732 mod_timer(&hdev->cmd_timer,
3733 jiffies + HCI_CMD_TIMEOUT);
3735 skb_queue_head(&hdev->cmd_q, skb);
3736 queue_work(hdev->workqueue, &hdev->cmd_work);