2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev *hdev, int event)
55 hci_sock_dev_event(hdev, event);
58 /* ---- HCI requests ---- */
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
82 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
92 hdev->recv_evt = NULL;
97 return ERR_PTR(-ENODATA);
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
108 if (hdr->evt != event)
113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
126 if (opcode == __le16_to_cpu(ev->opcode))
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
134 return ERR_PTR(-ENODATA);
137 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
138 const void *param, u8 event, u32 timeout)
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
144 BT_DBG("%s", hdev->name);
146 hci_req_init(&req, hdev);
148 hci_req_add_ev(&req, opcode, plen, param, event);
150 hdev->req_status = HCI_REQ_PEND;
152 err = hci_req_run(&req, hci_req_sync_complete);
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
159 schedule_timeout(timeout);
161 remove_wait_queue(&hdev->req_wait_q, &wait);
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
166 switch (hdev->req_status) {
168 err = -bt_to_errno(hdev->req_result);
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
180 hdev->req_status = hdev->req_result = 0;
182 BT_DBG("%s end: err %d", hdev->name, err);
187 return hci_get_cmd_complete(hdev, opcode, event);
189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
192 const void *param, u32 timeout)
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
196 EXPORT_SYMBOL(__hci_cmd_sync);
198 /* Execute request and wait for completion. */
199 static int __hci_req_sync(struct hci_dev *hdev,
200 void (*func)(struct hci_request *req,
202 unsigned long opt, __u32 timeout)
204 struct hci_request req;
205 DECLARE_WAITQUEUE(wait, current);
208 BT_DBG("%s start", hdev->name);
210 hci_req_init(&req, hdev);
212 hdev->req_status = HCI_REQ_PEND;
216 err = hci_req_run(&req, hci_req_sync_complete);
218 hdev->req_status = 0;
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
234 schedule_timeout(timeout);
236 remove_wait_queue(&hdev->req_wait_q, &wait);
238 if (signal_pending(current))
241 switch (hdev->req_status) {
243 err = -bt_to_errno(hdev->req_result);
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
255 hdev->req_status = hdev->req_result = 0;
257 BT_DBG("%s end: err %d", hdev->name, err);
262 static int hci_req_sync(struct hci_dev *hdev,
263 void (*req)(struct hci_request *req,
265 unsigned long opt, __u32 timeout)
269 if (!test_bit(HCI_UP, &hdev->flags))
272 /* Serialize all requests */
274 ret = __hci_req_sync(hdev, req, opt, timeout);
275 hci_req_unlock(hdev);
280 static void hci_reset_req(struct hci_request *req, unsigned long opt)
282 BT_DBG("%s %ld", req->hdev->name, opt);
285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
289 static void bredr_init(struct hci_request *req)
291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
293 /* Read Local Supported Features */
294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
296 /* Read Local Version */
297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
299 /* Read BD Address */
300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
303 static void amp_init(struct hci_request *req)
305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
307 /* Read Local Version */
308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
310 /* Read Local AMP Info */
311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
313 /* Read Data Blk size */
314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
317 static void hci_init1_req(struct hci_request *req, unsigned long opt)
319 struct hci_dev *hdev = req->hdev;
321 BT_DBG("%s %ld", hdev->name, opt);
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
325 hci_reset_req(req, 0);
327 switch (hdev->dev_type) {
337 BT_ERR("Unknown device type %d", hdev->dev_type);
342 static void bredr_setup(struct hci_request *req)
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
350 /* Read Class of Device */
351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
353 /* Read Local Name */
354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
356 /* Read Voice Setting */
357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
374 static void le_setup(struct hci_request *req)
376 struct hci_dev *hdev = req->hdev;
378 /* Read LE Buffer Size */
379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
381 /* Read LE Local Supported Features */
382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
384 /* Read LE Advertising Channel TX Power */
385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
387 /* Read LE White List Size */
388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
390 /* Read LE Supported States */
391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
398 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
400 if (lmp_ext_inq_capable(hdev))
403 if (lmp_inq_rssi_capable(hdev))
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
426 static void hci_setup_inquiry_mode(struct hci_request *req)
430 mode = hci_get_inquiry_mode(req->hdev);
432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
435 static void hci_setup_event_mask(struct hci_request *req)
437 struct hci_dev *hdev = req->hdev;
439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
458 /* Use a different default for LE-only devices */
459 memset(events, 0, sizeof(events));
460 events[0] |= 0x10; /* Disconnection Complete */
461 events[0] |= 0x80; /* Encryption Change */
462 events[1] |= 0x08; /* Read Remote Version Information Complete */
463 events[1] |= 0x20; /* Command Complete */
464 events[1] |= 0x40; /* Command Status */
465 events[1] |= 0x80; /* Hardware Error */
466 events[2] |= 0x04; /* Number of Completed Packets */
467 events[3] |= 0x02; /* Data Buffer Overflow */
468 events[5] |= 0x80; /* Encryption Key Refresh Complete */
471 if (lmp_inq_rssi_capable(hdev))
472 events[4] |= 0x02; /* Inquiry Result with RSSI */
474 if (lmp_sniffsubr_capable(hdev))
475 events[5] |= 0x20; /* Sniff Subrating */
477 if (lmp_pause_enc_capable(hdev))
478 events[5] |= 0x80; /* Encryption Key Refresh Complete */
480 if (lmp_ext_inq_capable(hdev))
481 events[5] |= 0x40; /* Extended Inquiry Result */
483 if (lmp_no_flush_capable(hdev))
484 events[7] |= 0x01; /* Enhanced Flush Complete */
486 if (lmp_lsto_capable(hdev))
487 events[6] |= 0x80; /* Link Supervision Timeout Changed */
489 if (lmp_ssp_capable(hdev)) {
490 events[6] |= 0x01; /* IO Capability Request */
491 events[6] |= 0x02; /* IO Capability Response */
492 events[6] |= 0x04; /* User Confirmation Request */
493 events[6] |= 0x08; /* User Passkey Request */
494 events[6] |= 0x10; /* Remote OOB Data Request */
495 events[6] |= 0x20; /* Simple Pairing Complete */
496 events[7] |= 0x04; /* User Passkey Notification */
497 events[7] |= 0x08; /* Keypress Notification */
498 events[7] |= 0x10; /* Remote Host Supported
499 * Features Notification
503 if (lmp_le_capable(hdev))
504 events[7] |= 0x20; /* LE Meta-Event */
506 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
508 if (lmp_le_capable(hdev)) {
509 memset(events, 0, sizeof(events));
511 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
512 sizeof(events), events);
516 static void hci_init2_req(struct hci_request *req, unsigned long opt)
518 struct hci_dev *hdev = req->hdev;
520 if (lmp_bredr_capable(hdev))
523 if (lmp_le_capable(hdev))
526 hci_setup_event_mask(req);
528 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
529 * local supported commands HCI command.
531 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
532 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
534 if (lmp_ssp_capable(hdev)) {
535 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
537 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
538 sizeof(mode), &mode);
540 struct hci_cp_write_eir cp;
542 memset(hdev->eir, 0, sizeof(hdev->eir));
543 memset(&cp, 0, sizeof(cp));
545 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
549 if (lmp_inq_rssi_capable(hdev))
550 hci_setup_inquiry_mode(req);
552 if (lmp_inq_tx_pwr_capable(hdev))
553 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
555 if (lmp_ext_feat_capable(hdev)) {
556 struct hci_cp_read_local_ext_features cp;
559 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
563 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
565 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
570 static void hci_setup_link_policy(struct hci_request *req)
572 struct hci_dev *hdev = req->hdev;
573 struct hci_cp_write_def_link_policy cp;
576 if (lmp_rswitch_capable(hdev))
577 link_policy |= HCI_LP_RSWITCH;
578 if (lmp_hold_capable(hdev))
579 link_policy |= HCI_LP_HOLD;
580 if (lmp_sniff_capable(hdev))
581 link_policy |= HCI_LP_SNIFF;
582 if (lmp_park_capable(hdev))
583 link_policy |= HCI_LP_PARK;
585 cp.policy = cpu_to_le16(link_policy);
586 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
589 static void hci_set_le_support(struct hci_request *req)
591 struct hci_dev *hdev = req->hdev;
592 struct hci_cp_write_le_host_supported cp;
594 /* LE-only devices do not support explicit enablement */
595 if (!lmp_bredr_capable(hdev))
598 memset(&cp, 0, sizeof(cp));
600 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
602 cp.simul = lmp_le_br_capable(hdev);
605 if (cp.le != lmp_host_le_capable(hdev))
606 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
610 static void hci_init3_req(struct hci_request *req, unsigned long opt)
612 struct hci_dev *hdev = req->hdev;
615 /* Some Broadcom based Bluetooth controllers do not support the
616 * Delete Stored Link Key command. They are clearly indicating its
617 * absence in the bit mask of supported commands.
619 * Check the supported commands and only if the the command is marked
620 * as supported send it. If not supported assume that the controller
621 * does not have actual support for stored link keys which makes this
622 * command redundant anyway.
624 if (hdev->commands[6] & 0x80) {
625 struct hci_cp_delete_stored_link_key cp;
627 bacpy(&cp.bdaddr, BDADDR_ANY);
628 cp.delete_all = 0x01;
629 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
633 if (hdev->commands[5] & 0x10)
634 hci_setup_link_policy(req);
636 if (lmp_le_capable(hdev)) {
637 hci_set_le_support(req);
641 /* Read features beyond page 1 if available */
642 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
643 struct hci_cp_read_local_ext_features cp;
646 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
651 static int __hci_init(struct hci_dev *hdev)
655 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
659 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
660 * BR/EDR/LE type controllers. AMP controllers only need the
663 if (hdev->dev_type != HCI_BREDR)
666 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
670 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
673 static void hci_scan_req(struct hci_request *req, unsigned long opt)
677 BT_DBG("%s %x", req->hdev->name, scan);
679 /* Inquiry and Page scans */
680 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
683 static void hci_auth_req(struct hci_request *req, unsigned long opt)
687 BT_DBG("%s %x", req->hdev->name, auth);
690 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
693 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
697 BT_DBG("%s %x", req->hdev->name, encrypt);
700 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
703 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
705 __le16 policy = cpu_to_le16(opt);
707 BT_DBG("%s %x", req->hdev->name, policy);
709 /* Default link policy */
710 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
713 /* Get HCI device by index.
714 * Device is held on return. */
715 struct hci_dev *hci_dev_get(int index)
717 struct hci_dev *hdev = NULL, *d;
724 read_lock(&hci_dev_list_lock);
725 list_for_each_entry(d, &hci_dev_list, list) {
726 if (d->id == index) {
727 hdev = hci_dev_hold(d);
731 read_unlock(&hci_dev_list_lock);
735 /* ---- Inquiry support ---- */
737 bool hci_discovery_active(struct hci_dev *hdev)
739 struct discovery_state *discov = &hdev->discovery;
741 switch (discov->state) {
742 case DISCOVERY_FINDING:
743 case DISCOVERY_RESOLVING:
751 void hci_discovery_set_state(struct hci_dev *hdev, int state)
753 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
755 if (hdev->discovery.state == state)
759 case DISCOVERY_STOPPED:
760 if (hdev->discovery.state != DISCOVERY_STARTING)
761 mgmt_discovering(hdev, 0);
763 case DISCOVERY_STARTING:
765 case DISCOVERY_FINDING:
766 mgmt_discovering(hdev, 1);
768 case DISCOVERY_RESOLVING:
770 case DISCOVERY_STOPPING:
774 hdev->discovery.state = state;
777 void hci_inquiry_cache_flush(struct hci_dev *hdev)
779 struct discovery_state *cache = &hdev->discovery;
780 struct inquiry_entry *p, *n;
782 list_for_each_entry_safe(p, n, &cache->all, all) {
787 INIT_LIST_HEAD(&cache->unknown);
788 INIT_LIST_HEAD(&cache->resolve);
791 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
794 struct discovery_state *cache = &hdev->discovery;
795 struct inquiry_entry *e;
797 BT_DBG("cache %p, %pMR", cache, bdaddr);
799 list_for_each_entry(e, &cache->all, all) {
800 if (!bacmp(&e->data.bdaddr, bdaddr))
807 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
810 struct discovery_state *cache = &hdev->discovery;
811 struct inquiry_entry *e;
813 BT_DBG("cache %p, %pMR", cache, bdaddr);
815 list_for_each_entry(e, &cache->unknown, list) {
816 if (!bacmp(&e->data.bdaddr, bdaddr))
823 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
827 struct discovery_state *cache = &hdev->discovery;
828 struct inquiry_entry *e;
830 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
832 list_for_each_entry(e, &cache->resolve, list) {
833 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
835 if (!bacmp(&e->data.bdaddr, bdaddr))
842 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
843 struct inquiry_entry *ie)
845 struct discovery_state *cache = &hdev->discovery;
846 struct list_head *pos = &cache->resolve;
847 struct inquiry_entry *p;
851 list_for_each_entry(p, &cache->resolve, list) {
852 if (p->name_state != NAME_PENDING &&
853 abs(p->data.rssi) >= abs(ie->data.rssi))
858 list_add(&ie->list, pos);
861 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
862 bool name_known, bool *ssp)
864 struct discovery_state *cache = &hdev->discovery;
865 struct inquiry_entry *ie;
867 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
869 hci_remove_remote_oob_data(hdev, &data->bdaddr);
872 *ssp = data->ssp_mode;
874 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
876 if (ie->data.ssp_mode && ssp)
879 if (ie->name_state == NAME_NEEDED &&
880 data->rssi != ie->data.rssi) {
881 ie->data.rssi = data->rssi;
882 hci_inquiry_cache_update_resolve(hdev, ie);
888 /* Entry not in the cache. Add new one. */
889 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
893 list_add(&ie->all, &cache->all);
896 ie->name_state = NAME_KNOWN;
898 ie->name_state = NAME_NOT_KNOWN;
899 list_add(&ie->list, &cache->unknown);
903 if (name_known && ie->name_state != NAME_KNOWN &&
904 ie->name_state != NAME_PENDING) {
905 ie->name_state = NAME_KNOWN;
909 memcpy(&ie->data, data, sizeof(*data));
910 ie->timestamp = jiffies;
911 cache->timestamp = jiffies;
913 if (ie->name_state == NAME_NOT_KNOWN)
919 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
921 struct discovery_state *cache = &hdev->discovery;
922 struct inquiry_info *info = (struct inquiry_info *) buf;
923 struct inquiry_entry *e;
926 list_for_each_entry(e, &cache->all, all) {
927 struct inquiry_data *data = &e->data;
932 bacpy(&info->bdaddr, &data->bdaddr);
933 info->pscan_rep_mode = data->pscan_rep_mode;
934 info->pscan_period_mode = data->pscan_period_mode;
935 info->pscan_mode = data->pscan_mode;
936 memcpy(info->dev_class, data->dev_class, 3);
937 info->clock_offset = data->clock_offset;
943 BT_DBG("cache %p, copied %d", cache, copied);
947 static void hci_inq_req(struct hci_request *req, unsigned long opt)
949 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
950 struct hci_dev *hdev = req->hdev;
951 struct hci_cp_inquiry cp;
953 BT_DBG("%s", hdev->name);
955 if (test_bit(HCI_INQUIRY, &hdev->flags))
959 memcpy(&cp.lap, &ir->lap, 3);
960 cp.length = ir->length;
961 cp.num_rsp = ir->num_rsp;
962 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
965 static int wait_inquiry(void *word)
968 return signal_pending(current);
971 int hci_inquiry(void __user *arg)
973 __u8 __user *ptr = arg;
974 struct hci_inquiry_req ir;
975 struct hci_dev *hdev;
976 int err = 0, do_inquiry = 0, max_rsp;
980 if (copy_from_user(&ir, ptr, sizeof(ir)))
983 hdev = hci_dev_get(ir.dev_id);
987 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
993 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
994 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
995 hci_inquiry_cache_flush(hdev);
998 hci_dev_unlock(hdev);
1000 timeo = ir.length * msecs_to_jiffies(2000);
1003 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1008 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1009 * cleared). If it is interrupted by a signal, return -EINTR.
1011 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1012 TASK_INTERRUPTIBLE))
1016 /* for unlimited number of responses we will use buffer with
1019 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1021 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1022 * copy it to the user space.
1024 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1031 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1032 hci_dev_unlock(hdev);
1034 BT_DBG("num_rsp %d", ir.num_rsp);
1036 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1038 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1051 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1053 u8 ad_len = 0, flags = 0;
1056 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1057 flags |= LE_AD_GENERAL;
1059 if (!lmp_bredr_capable(hdev))
1060 flags |= LE_AD_NO_BREDR;
1062 if (lmp_le_br_capable(hdev))
1063 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1065 if (lmp_host_le_br_capable(hdev))
1066 flags |= LE_AD_SIM_LE_BREDR_HOST;
1069 BT_DBG("adv flags 0x%02x", flags);
1079 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1081 ptr[1] = EIR_TX_POWER;
1082 ptr[2] = (u8) hdev->adv_tx_power;
1088 name_len = strlen(hdev->dev_name);
1090 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1092 if (name_len > max_len) {
1094 ptr[1] = EIR_NAME_SHORT;
1096 ptr[1] = EIR_NAME_COMPLETE;
1098 ptr[0] = name_len + 1;
1100 memcpy(ptr + 2, hdev->dev_name, name_len);
1102 ad_len += (name_len + 2);
1103 ptr += (name_len + 2);
1109 void hci_update_ad(struct hci_request *req)
1111 struct hci_dev *hdev = req->hdev;
1112 struct hci_cp_le_set_adv_data cp;
1115 if (!lmp_le_capable(hdev))
1118 memset(&cp, 0, sizeof(cp));
1120 len = create_ad(hdev, cp.data);
1122 if (hdev->adv_data_len == len &&
1123 memcmp(cp.data, hdev->adv_data, len) == 0)
1126 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1127 hdev->adv_data_len = len;
1131 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1134 /* ---- HCI ioctl helpers ---- */
1136 int hci_dev_open(__u16 dev)
1138 struct hci_dev *hdev;
1141 hdev = hci_dev_get(dev);
1145 BT_DBG("%s %p", hdev->name, hdev);
1149 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1154 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1159 if (test_bit(HCI_UP, &hdev->flags)) {
1164 if (hdev->open(hdev)) {
1169 atomic_set(&hdev->cmd_cnt, 1);
1170 set_bit(HCI_INIT, &hdev->flags);
1172 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1173 ret = hdev->setup(hdev);
1176 /* Treat all non BR/EDR controllers as raw devices if
1177 * enable_hs is not set.
1179 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1180 set_bit(HCI_RAW, &hdev->flags);
1182 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1183 set_bit(HCI_RAW, &hdev->flags);
1185 if (!test_bit(HCI_RAW, &hdev->flags) &&
1186 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1187 ret = __hci_init(hdev);
1190 clear_bit(HCI_INIT, &hdev->flags);
1194 set_bit(HCI_UP, &hdev->flags);
1195 hci_notify(hdev, HCI_DEV_UP);
1196 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1197 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1198 mgmt_valid_hdev(hdev)) {
1200 mgmt_powered(hdev, 1);
1201 hci_dev_unlock(hdev);
1204 /* Init failed, cleanup */
1205 flush_work(&hdev->tx_work);
1206 flush_work(&hdev->cmd_work);
1207 flush_work(&hdev->rx_work);
1209 skb_queue_purge(&hdev->cmd_q);
1210 skb_queue_purge(&hdev->rx_q);
1215 if (hdev->sent_cmd) {
1216 kfree_skb(hdev->sent_cmd);
1217 hdev->sent_cmd = NULL;
1225 hci_req_unlock(hdev);
1230 static int hci_dev_do_close(struct hci_dev *hdev)
1232 BT_DBG("%s %p", hdev->name, hdev);
1234 cancel_delayed_work(&hdev->power_off);
1236 hci_req_cancel(hdev, ENODEV);
1239 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1240 del_timer_sync(&hdev->cmd_timer);
1241 hci_req_unlock(hdev);
1245 /* Flush RX and TX works */
1246 flush_work(&hdev->tx_work);
1247 flush_work(&hdev->rx_work);
1249 if (hdev->discov_timeout > 0) {
1250 cancel_delayed_work(&hdev->discov_off);
1251 hdev->discov_timeout = 0;
1252 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1255 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1256 cancel_delayed_work(&hdev->service_cache);
1258 cancel_delayed_work_sync(&hdev->le_scan_disable);
1261 hci_inquiry_cache_flush(hdev);
1262 hci_conn_hash_flush(hdev);
1263 hci_dev_unlock(hdev);
1265 hci_notify(hdev, HCI_DEV_DOWN);
1271 skb_queue_purge(&hdev->cmd_q);
1272 atomic_set(&hdev->cmd_cnt, 1);
1273 if (!test_bit(HCI_RAW, &hdev->flags) &&
1274 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1275 set_bit(HCI_INIT, &hdev->flags);
1276 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1277 clear_bit(HCI_INIT, &hdev->flags);
1280 /* flush cmd work */
1281 flush_work(&hdev->cmd_work);
1284 skb_queue_purge(&hdev->rx_q);
1285 skb_queue_purge(&hdev->cmd_q);
1286 skb_queue_purge(&hdev->raw_q);
1288 /* Drop last sent command */
1289 if (hdev->sent_cmd) {
1290 del_timer_sync(&hdev->cmd_timer);
1291 kfree_skb(hdev->sent_cmd);
1292 hdev->sent_cmd = NULL;
1295 kfree_skb(hdev->recv_evt);
1296 hdev->recv_evt = NULL;
1298 /* After this point our queues are empty
1299 * and no tasks are scheduled. */
1304 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1306 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1307 mgmt_valid_hdev(hdev)) {
1309 mgmt_powered(hdev, 0);
1310 hci_dev_unlock(hdev);
1313 /* Controller radio is available but is currently powered down */
1314 hdev->amp_status = 0;
1316 memset(hdev->eir, 0, sizeof(hdev->eir));
1317 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1319 hci_req_unlock(hdev);
1325 int hci_dev_close(__u16 dev)
1327 struct hci_dev *hdev;
1330 hdev = hci_dev_get(dev);
1334 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1339 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1340 cancel_delayed_work(&hdev->power_off);
1342 err = hci_dev_do_close(hdev);
1349 int hci_dev_reset(__u16 dev)
1351 struct hci_dev *hdev;
1354 hdev = hci_dev_get(dev);
1360 if (!test_bit(HCI_UP, &hdev->flags)) {
1365 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1371 skb_queue_purge(&hdev->rx_q);
1372 skb_queue_purge(&hdev->cmd_q);
1375 hci_inquiry_cache_flush(hdev);
1376 hci_conn_hash_flush(hdev);
1377 hci_dev_unlock(hdev);
1382 atomic_set(&hdev->cmd_cnt, 1);
1383 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1385 if (!test_bit(HCI_RAW, &hdev->flags))
1386 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1389 hci_req_unlock(hdev);
1394 int hci_dev_reset_stat(__u16 dev)
1396 struct hci_dev *hdev;
1399 hdev = hci_dev_get(dev);
1403 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1408 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1415 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1417 struct hci_dev *hdev;
1418 struct hci_dev_req dr;
1421 if (copy_from_user(&dr, arg, sizeof(dr)))
1424 hdev = hci_dev_get(dr.dev_id);
1428 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1435 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1440 if (!lmp_encrypt_capable(hdev)) {
1445 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1446 /* Auth must be enabled first */
1447 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1453 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1458 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1463 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1467 case HCISETLINKMODE:
1468 hdev->link_mode = ((__u16) dr.dev_opt) &
1469 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1473 hdev->pkt_type = (__u16) dr.dev_opt;
1477 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1478 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1482 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1483 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1496 int hci_get_dev_list(void __user *arg)
1498 struct hci_dev *hdev;
1499 struct hci_dev_list_req *dl;
1500 struct hci_dev_req *dr;
1501 int n = 0, size, err;
1504 if (get_user(dev_num, (__u16 __user *) arg))
1507 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1510 size = sizeof(*dl) + dev_num * sizeof(*dr);
1512 dl = kzalloc(size, GFP_KERNEL);
1518 read_lock(&hci_dev_list_lock);
1519 list_for_each_entry(hdev, &hci_dev_list, list) {
1520 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1521 cancel_delayed_work(&hdev->power_off);
1523 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1524 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1526 (dr + n)->dev_id = hdev->id;
1527 (dr + n)->dev_opt = hdev->flags;
1532 read_unlock(&hci_dev_list_lock);
1535 size = sizeof(*dl) + n * sizeof(*dr);
1537 err = copy_to_user(arg, dl, size);
1540 return err ? -EFAULT : 0;
1543 int hci_get_dev_info(void __user *arg)
1545 struct hci_dev *hdev;
1546 struct hci_dev_info di;
1549 if (copy_from_user(&di, arg, sizeof(di)))
1552 hdev = hci_dev_get(di.dev_id);
1556 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1557 cancel_delayed_work_sync(&hdev->power_off);
1559 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1560 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1562 strcpy(di.name, hdev->name);
1563 di.bdaddr = hdev->bdaddr;
1564 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1565 di.flags = hdev->flags;
1566 di.pkt_type = hdev->pkt_type;
1567 if (lmp_bredr_capable(hdev)) {
1568 di.acl_mtu = hdev->acl_mtu;
1569 di.acl_pkts = hdev->acl_pkts;
1570 di.sco_mtu = hdev->sco_mtu;
1571 di.sco_pkts = hdev->sco_pkts;
1573 di.acl_mtu = hdev->le_mtu;
1574 di.acl_pkts = hdev->le_pkts;
1578 di.link_policy = hdev->link_policy;
1579 di.link_mode = hdev->link_mode;
1581 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1582 memcpy(&di.features, &hdev->features, sizeof(di.features));
1584 if (copy_to_user(arg, &di, sizeof(di)))
1592 /* ---- Interface to HCI drivers ---- */
1594 static int hci_rfkill_set_block(void *data, bool blocked)
1596 struct hci_dev *hdev = data;
1598 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1600 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1606 hci_dev_do_close(hdev);
1611 static const struct rfkill_ops hci_rfkill_ops = {
1612 .set_block = hci_rfkill_set_block,
1615 static void hci_power_on(struct work_struct *work)
1617 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1620 BT_DBG("%s", hdev->name);
1622 err = hci_dev_open(hdev->id);
1624 mgmt_set_powered_failed(hdev, err);
1628 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1629 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1630 HCI_AUTO_OFF_TIMEOUT);
1632 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1633 mgmt_index_added(hdev);
1636 static void hci_power_off(struct work_struct *work)
1638 struct hci_dev *hdev = container_of(work, struct hci_dev,
1641 BT_DBG("%s", hdev->name);
1643 hci_dev_do_close(hdev);
1646 static void hci_discov_off(struct work_struct *work)
1648 struct hci_dev *hdev;
1649 u8 scan = SCAN_PAGE;
1651 hdev = container_of(work, struct hci_dev, discov_off.work);
1653 BT_DBG("%s", hdev->name);
1657 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1659 hdev->discov_timeout = 0;
1661 hci_dev_unlock(hdev);
1664 int hci_uuids_clear(struct hci_dev *hdev)
1666 struct bt_uuid *uuid, *tmp;
1668 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1669 list_del(&uuid->list);
1676 int hci_link_keys_clear(struct hci_dev *hdev)
1678 struct list_head *p, *n;
1680 list_for_each_safe(p, n, &hdev->link_keys) {
1681 struct link_key *key;
1683 key = list_entry(p, struct link_key, list);
1692 int hci_smp_ltks_clear(struct hci_dev *hdev)
1694 struct smp_ltk *k, *tmp;
1696 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1704 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1708 list_for_each_entry(k, &hdev->link_keys, list)
1709 if (bacmp(bdaddr, &k->bdaddr) == 0)
1715 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1716 u8 key_type, u8 old_key_type)
1719 if (key_type < 0x03)
1722 /* Debug keys are insecure so don't store them persistently */
1723 if (key_type == HCI_LK_DEBUG_COMBINATION)
1726 /* Changed combination key and there's no previous one */
1727 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1730 /* Security mode 3 case */
1734 /* Neither local nor remote side had no-bonding as requirement */
1735 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1738 /* Local side had dedicated bonding as requirement */
1739 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1742 /* Remote side had dedicated bonding as requirement */
1743 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1746 /* If none of the above criteria match, then don't store the key
1751 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1755 list_for_each_entry(k, &hdev->long_term_keys, list) {
1756 if (k->ediv != ediv ||
1757 memcmp(rand, k->rand, sizeof(k->rand)))
1766 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1771 list_for_each_entry(k, &hdev->long_term_keys, list)
1772 if (addr_type == k->bdaddr_type &&
1773 bacmp(bdaddr, &k->bdaddr) == 0)
1779 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1780 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1782 struct link_key *key, *old_key;
1786 old_key = hci_find_link_key(hdev, bdaddr);
1788 old_key_type = old_key->type;
1791 old_key_type = conn ? conn->key_type : 0xff;
1792 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1795 list_add(&key->list, &hdev->link_keys);
1798 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1800 /* Some buggy controller combinations generate a changed
1801 * combination key for legacy pairing even when there's no
1803 if (type == HCI_LK_CHANGED_COMBINATION &&
1804 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1805 type = HCI_LK_COMBINATION;
1807 conn->key_type = type;
1810 bacpy(&key->bdaddr, bdaddr);
1811 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1812 key->pin_len = pin_len;
1814 if (type == HCI_LK_CHANGED_COMBINATION)
1815 key->type = old_key_type;
1822 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1824 mgmt_new_link_key(hdev, key, persistent);
1827 conn->flush_key = !persistent;
1832 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1833 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1836 struct smp_ltk *key, *old_key;
1838 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1841 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1845 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1848 list_add(&key->list, &hdev->long_term_keys);
1851 bacpy(&key->bdaddr, bdaddr);
1852 key->bdaddr_type = addr_type;
1853 memcpy(key->val, tk, sizeof(key->val));
1854 key->authenticated = authenticated;
1856 key->enc_size = enc_size;
1858 memcpy(key->rand, rand, sizeof(key->rand));
1863 if (type & HCI_SMP_LTK)
1864 mgmt_new_ltk(hdev, key, 1);
1869 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1871 struct link_key *key;
1873 key = hci_find_link_key(hdev, bdaddr);
1877 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1879 list_del(&key->list);
1885 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1887 struct smp_ltk *k, *tmp;
1889 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1890 if (bacmp(bdaddr, &k->bdaddr))
1893 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1902 /* HCI command timer function */
1903 static void hci_cmd_timeout(unsigned long arg)
1905 struct hci_dev *hdev = (void *) arg;
1907 if (hdev->sent_cmd) {
1908 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1909 u16 opcode = __le16_to_cpu(sent->opcode);
1911 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1913 BT_ERR("%s command tx timeout", hdev->name);
1916 atomic_set(&hdev->cmd_cnt, 1);
1917 queue_work(hdev->workqueue, &hdev->cmd_work);
1920 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1923 struct oob_data *data;
1925 list_for_each_entry(data, &hdev->remote_oob_data, list)
1926 if (bacmp(bdaddr, &data->bdaddr) == 0)
1932 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1934 struct oob_data *data;
1936 data = hci_find_remote_oob_data(hdev, bdaddr);
1940 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1942 list_del(&data->list);
1948 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1950 struct oob_data *data, *n;
1952 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1953 list_del(&data->list);
1960 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1963 struct oob_data *data;
1965 data = hci_find_remote_oob_data(hdev, bdaddr);
1968 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1972 bacpy(&data->bdaddr, bdaddr);
1973 list_add(&data->list, &hdev->remote_oob_data);
1976 memcpy(data->hash, hash, sizeof(data->hash));
1977 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1979 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1984 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1986 struct bdaddr_list *b;
1988 list_for_each_entry(b, &hdev->blacklist, list)
1989 if (bacmp(bdaddr, &b->bdaddr) == 0)
1995 int hci_blacklist_clear(struct hci_dev *hdev)
1997 struct list_head *p, *n;
1999 list_for_each_safe(p, n, &hdev->blacklist) {
2000 struct bdaddr_list *b;
2002 b = list_entry(p, struct bdaddr_list, list);
2011 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2013 struct bdaddr_list *entry;
2015 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2018 if (hci_blacklist_lookup(hdev, bdaddr))
2021 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
2025 bacpy(&entry->bdaddr, bdaddr);
2027 list_add(&entry->list, &hdev->blacklist);
2029 return mgmt_device_blocked(hdev, bdaddr, type);
2032 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2034 struct bdaddr_list *entry;
2036 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2037 return hci_blacklist_clear(hdev);
2039 entry = hci_blacklist_lookup(hdev, bdaddr);
2043 list_del(&entry->list);
2046 return mgmt_device_unblocked(hdev, bdaddr, type);
2049 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2052 BT_ERR("Failed to start inquiry: status %d", status);
2055 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2056 hci_dev_unlock(hdev);
2061 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2063 /* General inquiry access code (GIAC) */
2064 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2065 struct hci_request req;
2066 struct hci_cp_inquiry cp;
2070 BT_ERR("Failed to disable LE scanning: status %d", status);
2074 switch (hdev->discovery.type) {
2075 case DISCOV_TYPE_LE:
2077 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2078 hci_dev_unlock(hdev);
2081 case DISCOV_TYPE_INTERLEAVED:
2082 hci_req_init(&req, hdev);
2084 memset(&cp, 0, sizeof(cp));
2085 memcpy(&cp.lap, lap, sizeof(cp.lap));
2086 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2087 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2091 hci_inquiry_cache_flush(hdev);
2093 err = hci_req_run(&req, inquiry_complete);
2095 BT_ERR("Inquiry request failed: err %d", err);
2096 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2099 hci_dev_unlock(hdev);
2104 static void le_scan_disable_work(struct work_struct *work)
2106 struct hci_dev *hdev = container_of(work, struct hci_dev,
2107 le_scan_disable.work);
2108 struct hci_cp_le_set_scan_enable cp;
2109 struct hci_request req;
2112 BT_DBG("%s", hdev->name);
2114 hci_req_init(&req, hdev);
2116 memset(&cp, 0, sizeof(cp));
2117 cp.enable = LE_SCAN_DISABLE;
2118 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2120 err = hci_req_run(&req, le_scan_disable_work_complete);
2122 BT_ERR("Disable LE scanning request failed: err %d", err);
2125 /* Alloc HCI device */
2126 struct hci_dev *hci_alloc_dev(void)
2128 struct hci_dev *hdev;
2130 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2134 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2135 hdev->esco_type = (ESCO_HV1);
2136 hdev->link_mode = (HCI_LM_ACCEPT);
2137 hdev->io_capability = 0x03; /* No Input No Output */
2138 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2139 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2141 hdev->sniff_max_interval = 800;
2142 hdev->sniff_min_interval = 80;
2144 mutex_init(&hdev->lock);
2145 mutex_init(&hdev->req_lock);
2147 INIT_LIST_HEAD(&hdev->mgmt_pending);
2148 INIT_LIST_HEAD(&hdev->blacklist);
2149 INIT_LIST_HEAD(&hdev->uuids);
2150 INIT_LIST_HEAD(&hdev->link_keys);
2151 INIT_LIST_HEAD(&hdev->long_term_keys);
2152 INIT_LIST_HEAD(&hdev->remote_oob_data);
2153 INIT_LIST_HEAD(&hdev->conn_hash.list);
2155 INIT_WORK(&hdev->rx_work, hci_rx_work);
2156 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2157 INIT_WORK(&hdev->tx_work, hci_tx_work);
2158 INIT_WORK(&hdev->power_on, hci_power_on);
2160 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2161 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2162 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2164 skb_queue_head_init(&hdev->rx_q);
2165 skb_queue_head_init(&hdev->cmd_q);
2166 skb_queue_head_init(&hdev->raw_q);
2168 init_waitqueue_head(&hdev->req_wait_q);
2170 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2172 hci_init_sysfs(hdev);
2173 discovery_init(hdev);
2177 EXPORT_SYMBOL(hci_alloc_dev);
2179 /* Free HCI device */
2180 void hci_free_dev(struct hci_dev *hdev)
2182 /* will free via device release */
2183 put_device(&hdev->dev);
2185 EXPORT_SYMBOL(hci_free_dev);
2187 /* Register HCI device */
2188 int hci_register_dev(struct hci_dev *hdev)
2192 if (!hdev->open || !hdev->close)
2195 /* Do not allow HCI_AMP devices to register at index 0,
2196 * so the index can be used as the AMP controller ID.
2198 switch (hdev->dev_type) {
2200 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2203 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2212 sprintf(hdev->name, "hci%d", id);
2215 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2217 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2218 WQ_MEM_RECLAIM, 1, hdev->name);
2219 if (!hdev->workqueue) {
2224 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2225 WQ_MEM_RECLAIM, 1, hdev->name);
2226 if (!hdev->req_workqueue) {
2227 destroy_workqueue(hdev->workqueue);
2232 error = hci_add_sysfs(hdev);
2236 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2237 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2240 if (rfkill_register(hdev->rfkill) < 0) {
2241 rfkill_destroy(hdev->rfkill);
2242 hdev->rfkill = NULL;
2246 set_bit(HCI_SETUP, &hdev->dev_flags);
2248 if (hdev->dev_type != HCI_AMP)
2249 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2251 write_lock(&hci_dev_list_lock);
2252 list_add(&hdev->list, &hci_dev_list);
2253 write_unlock(&hci_dev_list_lock);
2255 hci_notify(hdev, HCI_DEV_REG);
2258 queue_work(hdev->req_workqueue, &hdev->power_on);
2263 destroy_workqueue(hdev->workqueue);
2264 destroy_workqueue(hdev->req_workqueue);
2266 ida_simple_remove(&hci_index_ida, hdev->id);
2270 EXPORT_SYMBOL(hci_register_dev);
2272 /* Unregister HCI device */
2273 void hci_unregister_dev(struct hci_dev *hdev)
2277 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2279 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2283 write_lock(&hci_dev_list_lock);
2284 list_del(&hdev->list);
2285 write_unlock(&hci_dev_list_lock);
2287 hci_dev_do_close(hdev);
2289 for (i = 0; i < NUM_REASSEMBLY; i++)
2290 kfree_skb(hdev->reassembly[i]);
2292 cancel_work_sync(&hdev->power_on);
2294 if (!test_bit(HCI_INIT, &hdev->flags) &&
2295 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2297 mgmt_index_removed(hdev);
2298 hci_dev_unlock(hdev);
2301 /* mgmt_index_removed should take care of emptying the
2303 BUG_ON(!list_empty(&hdev->mgmt_pending));
2305 hci_notify(hdev, HCI_DEV_UNREG);
2308 rfkill_unregister(hdev->rfkill);
2309 rfkill_destroy(hdev->rfkill);
2312 hci_del_sysfs(hdev);
2314 destroy_workqueue(hdev->workqueue);
2315 destroy_workqueue(hdev->req_workqueue);
2318 hci_blacklist_clear(hdev);
2319 hci_uuids_clear(hdev);
2320 hci_link_keys_clear(hdev);
2321 hci_smp_ltks_clear(hdev);
2322 hci_remote_oob_data_clear(hdev);
2323 hci_dev_unlock(hdev);
2327 ida_simple_remove(&hci_index_ida, id);
2329 EXPORT_SYMBOL(hci_unregister_dev);
2331 /* Suspend HCI device */
2332 int hci_suspend_dev(struct hci_dev *hdev)
2334 hci_notify(hdev, HCI_DEV_SUSPEND);
2337 EXPORT_SYMBOL(hci_suspend_dev);
2339 /* Resume HCI device */
2340 int hci_resume_dev(struct hci_dev *hdev)
2342 hci_notify(hdev, HCI_DEV_RESUME);
2345 EXPORT_SYMBOL(hci_resume_dev);
2347 /* Receive frame from HCI drivers */
2348 int hci_recv_frame(struct sk_buff *skb)
2350 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2351 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2352 && !test_bit(HCI_INIT, &hdev->flags))) {
2358 bt_cb(skb)->incoming = 1;
2361 __net_timestamp(skb);
2363 skb_queue_tail(&hdev->rx_q, skb);
2364 queue_work(hdev->workqueue, &hdev->rx_work);
2368 EXPORT_SYMBOL(hci_recv_frame);
2370 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2371 int count, __u8 index)
2376 struct sk_buff *skb;
2377 struct bt_skb_cb *scb;
2379 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2380 index >= NUM_REASSEMBLY)
2383 skb = hdev->reassembly[index];
2387 case HCI_ACLDATA_PKT:
2388 len = HCI_MAX_FRAME_SIZE;
2389 hlen = HCI_ACL_HDR_SIZE;
2392 len = HCI_MAX_EVENT_SIZE;
2393 hlen = HCI_EVENT_HDR_SIZE;
2395 case HCI_SCODATA_PKT:
2396 len = HCI_MAX_SCO_SIZE;
2397 hlen = HCI_SCO_HDR_SIZE;
2401 skb = bt_skb_alloc(len, GFP_ATOMIC);
2405 scb = (void *) skb->cb;
2407 scb->pkt_type = type;
2409 skb->dev = (void *) hdev;
2410 hdev->reassembly[index] = skb;
2414 scb = (void *) skb->cb;
2415 len = min_t(uint, scb->expect, count);
2417 memcpy(skb_put(skb, len), data, len);
2426 if (skb->len == HCI_EVENT_HDR_SIZE) {
2427 struct hci_event_hdr *h = hci_event_hdr(skb);
2428 scb->expect = h->plen;
2430 if (skb_tailroom(skb) < scb->expect) {
2432 hdev->reassembly[index] = NULL;
2438 case HCI_ACLDATA_PKT:
2439 if (skb->len == HCI_ACL_HDR_SIZE) {
2440 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2441 scb->expect = __le16_to_cpu(h->dlen);
2443 if (skb_tailroom(skb) < scb->expect) {
2445 hdev->reassembly[index] = NULL;
2451 case HCI_SCODATA_PKT:
2452 if (skb->len == HCI_SCO_HDR_SIZE) {
2453 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2454 scb->expect = h->dlen;
2456 if (skb_tailroom(skb) < scb->expect) {
2458 hdev->reassembly[index] = NULL;
2465 if (scb->expect == 0) {
2466 /* Complete frame */
2468 bt_cb(skb)->pkt_type = type;
2469 hci_recv_frame(skb);
2471 hdev->reassembly[index] = NULL;
2479 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2483 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2487 rem = hci_reassembly(hdev, type, data, count, type - 1);
2491 data += (count - rem);
2497 EXPORT_SYMBOL(hci_recv_fragment);
2499 #define STREAM_REASSEMBLY 0
2501 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2507 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2510 struct { char type; } *pkt;
2512 /* Start of the frame */
2519 type = bt_cb(skb)->pkt_type;
2521 rem = hci_reassembly(hdev, type, data, count,
2526 data += (count - rem);
2532 EXPORT_SYMBOL(hci_recv_stream_fragment);
2534 /* ---- Interface to upper protocols ---- */
2536 int hci_register_cb(struct hci_cb *cb)
2538 BT_DBG("%p name %s", cb, cb->name);
2540 write_lock(&hci_cb_list_lock);
2541 list_add(&cb->list, &hci_cb_list);
2542 write_unlock(&hci_cb_list_lock);
2546 EXPORT_SYMBOL(hci_register_cb);
2548 int hci_unregister_cb(struct hci_cb *cb)
2550 BT_DBG("%p name %s", cb, cb->name);
2552 write_lock(&hci_cb_list_lock);
2553 list_del(&cb->list);
2554 write_unlock(&hci_cb_list_lock);
2558 EXPORT_SYMBOL(hci_unregister_cb);
2560 static int hci_send_frame(struct sk_buff *skb)
2562 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2569 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2572 __net_timestamp(skb);
2574 /* Send copy to monitor */
2575 hci_send_to_monitor(hdev, skb);
2577 if (atomic_read(&hdev->promisc)) {
2578 /* Send copy to the sockets */
2579 hci_send_to_sock(hdev, skb);
2582 /* Get rid of skb owner, prior to sending to the driver. */
2585 return hdev->send(skb);
2588 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2590 skb_queue_head_init(&req->cmd_q);
2595 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2597 struct hci_dev *hdev = req->hdev;
2598 struct sk_buff *skb;
2599 unsigned long flags;
2601 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2603 /* If an error occured during request building, remove all HCI
2604 * commands queued on the HCI request queue.
2607 skb_queue_purge(&req->cmd_q);
2611 /* Do not allow empty requests */
2612 if (skb_queue_empty(&req->cmd_q))
2615 skb = skb_peek_tail(&req->cmd_q);
2616 bt_cb(skb)->req.complete = complete;
2618 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2619 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2620 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2622 queue_work(hdev->workqueue, &hdev->cmd_work);
2627 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2628 u32 plen, const void *param)
2630 int len = HCI_COMMAND_HDR_SIZE + plen;
2631 struct hci_command_hdr *hdr;
2632 struct sk_buff *skb;
2634 skb = bt_skb_alloc(len, GFP_ATOMIC);
2638 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2639 hdr->opcode = cpu_to_le16(opcode);
2643 memcpy(skb_put(skb, plen), param, plen);
2645 BT_DBG("skb len %d", skb->len);
2647 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2648 skb->dev = (void *) hdev;
2653 /* Send HCI command */
2654 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2657 struct sk_buff *skb;
2659 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2661 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2663 BT_ERR("%s no memory for command", hdev->name);
2667 /* Stand-alone HCI commands must be flaged as
2668 * single-command requests.
2670 bt_cb(skb)->req.start = true;
2672 skb_queue_tail(&hdev->cmd_q, skb);
2673 queue_work(hdev->workqueue, &hdev->cmd_work);
2678 /* Queue a command to an asynchronous HCI request */
2679 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2680 const void *param, u8 event)
2682 struct hci_dev *hdev = req->hdev;
2683 struct sk_buff *skb;
2685 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2687 /* If an error occured during request building, there is no point in
2688 * queueing the HCI command. We can simply return.
2693 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2695 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2696 hdev->name, opcode);
2701 if (skb_queue_empty(&req->cmd_q))
2702 bt_cb(skb)->req.start = true;
2704 bt_cb(skb)->req.event = event;
2706 skb_queue_tail(&req->cmd_q, skb);
2709 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2712 hci_req_add_ev(req, opcode, plen, param, 0);
2715 /* Get data from the previously sent command */
2716 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2718 struct hci_command_hdr *hdr;
2720 if (!hdev->sent_cmd)
2723 hdr = (void *) hdev->sent_cmd->data;
2725 if (hdr->opcode != cpu_to_le16(opcode))
2728 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2730 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2734 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2736 struct hci_acl_hdr *hdr;
2739 skb_push(skb, HCI_ACL_HDR_SIZE);
2740 skb_reset_transport_header(skb);
2741 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2742 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2743 hdr->dlen = cpu_to_le16(len);
2746 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2747 struct sk_buff *skb, __u16 flags)
2749 struct hci_conn *conn = chan->conn;
2750 struct hci_dev *hdev = conn->hdev;
2751 struct sk_buff *list;
2753 skb->len = skb_headlen(skb);
2756 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2758 switch (hdev->dev_type) {
2760 hci_add_acl_hdr(skb, conn->handle, flags);
2763 hci_add_acl_hdr(skb, chan->handle, flags);
2766 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2770 list = skb_shinfo(skb)->frag_list;
2772 /* Non fragmented */
2773 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2775 skb_queue_tail(queue, skb);
2778 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2780 skb_shinfo(skb)->frag_list = NULL;
2782 /* Queue all fragments atomically */
2783 spin_lock(&queue->lock);
2785 __skb_queue_tail(queue, skb);
2787 flags &= ~ACL_START;
2790 skb = list; list = list->next;
2792 skb->dev = (void *) hdev;
2793 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2794 hci_add_acl_hdr(skb, conn->handle, flags);
2796 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2798 __skb_queue_tail(queue, skb);
2801 spin_unlock(&queue->lock);
2805 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2807 struct hci_dev *hdev = chan->conn->hdev;
2809 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2811 skb->dev = (void *) hdev;
2813 hci_queue_acl(chan, &chan->data_q, skb, flags);
2815 queue_work(hdev->workqueue, &hdev->tx_work);
2819 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2821 struct hci_dev *hdev = conn->hdev;
2822 struct hci_sco_hdr hdr;
2824 BT_DBG("%s len %d", hdev->name, skb->len);
2826 hdr.handle = cpu_to_le16(conn->handle);
2827 hdr.dlen = skb->len;
2829 skb_push(skb, HCI_SCO_HDR_SIZE);
2830 skb_reset_transport_header(skb);
2831 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2833 skb->dev = (void *) hdev;
2834 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2836 skb_queue_tail(&conn->data_q, skb);
2837 queue_work(hdev->workqueue, &hdev->tx_work);
2840 /* ---- HCI TX task (outgoing data) ---- */
2842 /* HCI Connection scheduler */
2843 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2846 struct hci_conn_hash *h = &hdev->conn_hash;
2847 struct hci_conn *conn = NULL, *c;
2848 unsigned int num = 0, min = ~0;
2850 /* We don't have to lock device here. Connections are always
2851 * added and removed with TX task disabled. */
2855 list_for_each_entry_rcu(c, &h->list, list) {
2856 if (c->type != type || skb_queue_empty(&c->data_q))
2859 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2864 if (c->sent < min) {
2869 if (hci_conn_num(hdev, type) == num)
2878 switch (conn->type) {
2880 cnt = hdev->acl_cnt;
2884 cnt = hdev->sco_cnt;
2887 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2891 BT_ERR("Unknown link type");
2899 BT_DBG("conn %p quote %d", conn, *quote);
2903 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2905 struct hci_conn_hash *h = &hdev->conn_hash;
2908 BT_ERR("%s link tx timeout", hdev->name);
2912 /* Kill stalled connections */
2913 list_for_each_entry_rcu(c, &h->list, list) {
2914 if (c->type == type && c->sent) {
2915 BT_ERR("%s killing stalled connection %pMR",
2916 hdev->name, &c->dst);
2917 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2924 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2927 struct hci_conn_hash *h = &hdev->conn_hash;
2928 struct hci_chan *chan = NULL;
2929 unsigned int num = 0, min = ~0, cur_prio = 0;
2930 struct hci_conn *conn;
2931 int cnt, q, conn_num = 0;
2933 BT_DBG("%s", hdev->name);
2937 list_for_each_entry_rcu(conn, &h->list, list) {
2938 struct hci_chan *tmp;
2940 if (conn->type != type)
2943 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2948 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2949 struct sk_buff *skb;
2951 if (skb_queue_empty(&tmp->data_q))
2954 skb = skb_peek(&tmp->data_q);
2955 if (skb->priority < cur_prio)
2958 if (skb->priority > cur_prio) {
2961 cur_prio = skb->priority;
2966 if (conn->sent < min) {
2972 if (hci_conn_num(hdev, type) == conn_num)
2981 switch (chan->conn->type) {
2983 cnt = hdev->acl_cnt;
2986 cnt = hdev->block_cnt;
2990 cnt = hdev->sco_cnt;
2993 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2997 BT_ERR("Unknown link type");
3002 BT_DBG("chan %p quote %d", chan, *quote);
3006 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3008 struct hci_conn_hash *h = &hdev->conn_hash;
3009 struct hci_conn *conn;
3012 BT_DBG("%s", hdev->name);
3016 list_for_each_entry_rcu(conn, &h->list, list) {
3017 struct hci_chan *chan;
3019 if (conn->type != type)
3022 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3027 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3028 struct sk_buff *skb;
3035 if (skb_queue_empty(&chan->data_q))
3038 skb = skb_peek(&chan->data_q);
3039 if (skb->priority >= HCI_PRIO_MAX - 1)
3042 skb->priority = HCI_PRIO_MAX - 1;
3044 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3048 if (hci_conn_num(hdev, type) == num)
3056 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3058 /* Calculate count of blocks used by this packet */
3059 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3062 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3064 if (!test_bit(HCI_RAW, &hdev->flags)) {
3065 /* ACL tx timeout must be longer than maximum
3066 * link supervision timeout (40.9 seconds) */
3067 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3068 HCI_ACL_TX_TIMEOUT))
3069 hci_link_tx_to(hdev, ACL_LINK);
3073 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3075 unsigned int cnt = hdev->acl_cnt;
3076 struct hci_chan *chan;
3077 struct sk_buff *skb;
3080 __check_timeout(hdev, cnt);
3082 while (hdev->acl_cnt &&
3083 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3084 u32 priority = (skb_peek(&chan->data_q))->priority;
3085 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3086 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3087 skb->len, skb->priority);
3089 /* Stop if priority has changed */
3090 if (skb->priority < priority)
3093 skb = skb_dequeue(&chan->data_q);
3095 hci_conn_enter_active_mode(chan->conn,
3096 bt_cb(skb)->force_active);
3098 hci_send_frame(skb);
3099 hdev->acl_last_tx = jiffies;
3107 if (cnt != hdev->acl_cnt)
3108 hci_prio_recalculate(hdev, ACL_LINK);
3111 static void hci_sched_acl_blk(struct hci_dev *hdev)
3113 unsigned int cnt = hdev->block_cnt;
3114 struct hci_chan *chan;
3115 struct sk_buff *skb;
3119 __check_timeout(hdev, cnt);
3121 BT_DBG("%s", hdev->name);
3123 if (hdev->dev_type == HCI_AMP)
3128 while (hdev->block_cnt > 0 &&
3129 (chan = hci_chan_sent(hdev, type, "e))) {
3130 u32 priority = (skb_peek(&chan->data_q))->priority;
3131 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3134 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3135 skb->len, skb->priority);
3137 /* Stop if priority has changed */
3138 if (skb->priority < priority)
3141 skb = skb_dequeue(&chan->data_q);
3143 blocks = __get_blocks(hdev, skb);
3144 if (blocks > hdev->block_cnt)
3147 hci_conn_enter_active_mode(chan->conn,
3148 bt_cb(skb)->force_active);
3150 hci_send_frame(skb);
3151 hdev->acl_last_tx = jiffies;
3153 hdev->block_cnt -= blocks;
3156 chan->sent += blocks;
3157 chan->conn->sent += blocks;
3161 if (cnt != hdev->block_cnt)
3162 hci_prio_recalculate(hdev, type);
3165 static void hci_sched_acl(struct hci_dev *hdev)
3167 BT_DBG("%s", hdev->name);
3169 /* No ACL link over BR/EDR controller */
3170 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3173 /* No AMP link over AMP controller */
3174 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3177 switch (hdev->flow_ctl_mode) {
3178 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3179 hci_sched_acl_pkt(hdev);
3182 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3183 hci_sched_acl_blk(hdev);
3189 static void hci_sched_sco(struct hci_dev *hdev)
3191 struct hci_conn *conn;
3192 struct sk_buff *skb;
3195 BT_DBG("%s", hdev->name);
3197 if (!hci_conn_num(hdev, SCO_LINK))
3200 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3201 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3202 BT_DBG("skb %p len %d", skb, skb->len);
3203 hci_send_frame(skb);
3206 if (conn->sent == ~0)
3212 static void hci_sched_esco(struct hci_dev *hdev)
3214 struct hci_conn *conn;
3215 struct sk_buff *skb;
3218 BT_DBG("%s", hdev->name);
3220 if (!hci_conn_num(hdev, ESCO_LINK))
3223 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3225 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3226 BT_DBG("skb %p len %d", skb, skb->len);
3227 hci_send_frame(skb);
3230 if (conn->sent == ~0)
3236 static void hci_sched_le(struct hci_dev *hdev)
3238 struct hci_chan *chan;
3239 struct sk_buff *skb;
3240 int quote, cnt, tmp;
3242 BT_DBG("%s", hdev->name);
3244 if (!hci_conn_num(hdev, LE_LINK))
3247 if (!test_bit(HCI_RAW, &hdev->flags)) {
3248 /* LE tx timeout must be longer than maximum
3249 * link supervision timeout (40.9 seconds) */
3250 if (!hdev->le_cnt && hdev->le_pkts &&
3251 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3252 hci_link_tx_to(hdev, LE_LINK);
3255 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3257 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3258 u32 priority = (skb_peek(&chan->data_q))->priority;
3259 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3260 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3261 skb->len, skb->priority);
3263 /* Stop if priority has changed */
3264 if (skb->priority < priority)
3267 skb = skb_dequeue(&chan->data_q);
3269 hci_send_frame(skb);
3270 hdev->le_last_tx = jiffies;
3281 hdev->acl_cnt = cnt;
3284 hci_prio_recalculate(hdev, LE_LINK);
3287 static void hci_tx_work(struct work_struct *work)
3289 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3290 struct sk_buff *skb;
3292 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3293 hdev->sco_cnt, hdev->le_cnt);
3295 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3296 /* Schedule queues and send stuff to HCI driver */
3297 hci_sched_acl(hdev);
3298 hci_sched_sco(hdev);
3299 hci_sched_esco(hdev);
3303 /* Send next queued raw (unknown type) packet */
3304 while ((skb = skb_dequeue(&hdev->raw_q)))
3305 hci_send_frame(skb);
3308 /* ----- HCI RX task (incoming data processing) ----- */
3310 /* ACL data packet */
3311 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3313 struct hci_acl_hdr *hdr = (void *) skb->data;
3314 struct hci_conn *conn;
3315 __u16 handle, flags;
3317 skb_pull(skb, HCI_ACL_HDR_SIZE);
3319 handle = __le16_to_cpu(hdr->handle);
3320 flags = hci_flags(handle);
3321 handle = hci_handle(handle);
3323 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3326 hdev->stat.acl_rx++;
3329 conn = hci_conn_hash_lookup_handle(hdev, handle);
3330 hci_dev_unlock(hdev);
3333 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3335 /* Send to upper protocol */
3336 l2cap_recv_acldata(conn, skb, flags);
3339 BT_ERR("%s ACL packet for unknown connection handle %d",
3340 hdev->name, handle);
3346 /* SCO data packet */
3347 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3349 struct hci_sco_hdr *hdr = (void *) skb->data;
3350 struct hci_conn *conn;
3353 skb_pull(skb, HCI_SCO_HDR_SIZE);
3355 handle = __le16_to_cpu(hdr->handle);
3357 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3359 hdev->stat.sco_rx++;
3362 conn = hci_conn_hash_lookup_handle(hdev, handle);
3363 hci_dev_unlock(hdev);
3366 /* Send to upper protocol */
3367 sco_recv_scodata(conn, skb);
3370 BT_ERR("%s SCO packet for unknown connection handle %d",
3371 hdev->name, handle);
3377 static bool hci_req_is_complete(struct hci_dev *hdev)
3379 struct sk_buff *skb;
3381 skb = skb_peek(&hdev->cmd_q);
3385 return bt_cb(skb)->req.start;
3388 static void hci_resend_last(struct hci_dev *hdev)
3390 struct hci_command_hdr *sent;
3391 struct sk_buff *skb;
3394 if (!hdev->sent_cmd)
3397 sent = (void *) hdev->sent_cmd->data;
3398 opcode = __le16_to_cpu(sent->opcode);
3399 if (opcode == HCI_OP_RESET)
3402 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3406 skb_queue_head(&hdev->cmd_q, skb);
3407 queue_work(hdev->workqueue, &hdev->cmd_work);
3410 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3412 hci_req_complete_t req_complete = NULL;
3413 struct sk_buff *skb;
3414 unsigned long flags;
3416 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3418 /* If the completed command doesn't match the last one that was
3419 * sent we need to do special handling of it.
3421 if (!hci_sent_cmd_data(hdev, opcode)) {
3422 /* Some CSR based controllers generate a spontaneous
3423 * reset complete event during init and any pending
3424 * command will never be completed. In such a case we
3425 * need to resend whatever was the last sent
3428 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3429 hci_resend_last(hdev);
3434 /* If the command succeeded and there's still more commands in
3435 * this request the request is not yet complete.
3437 if (!status && !hci_req_is_complete(hdev))
3440 /* If this was the last command in a request the complete
3441 * callback would be found in hdev->sent_cmd instead of the
3442 * command queue (hdev->cmd_q).
3444 if (hdev->sent_cmd) {
3445 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3448 /* We must set the complete callback to NULL to
3449 * avoid calling the callback more than once if
3450 * this function gets called again.
3452 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3458 /* Remove all pending commands belonging to this request */
3459 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3460 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3461 if (bt_cb(skb)->req.start) {
3462 __skb_queue_head(&hdev->cmd_q, skb);
3466 req_complete = bt_cb(skb)->req.complete;
3469 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3473 req_complete(hdev, status);
3476 static void hci_rx_work(struct work_struct *work)
3478 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3479 struct sk_buff *skb;
3481 BT_DBG("%s", hdev->name);
3483 while ((skb = skb_dequeue(&hdev->rx_q))) {
3484 /* Send copy to monitor */
3485 hci_send_to_monitor(hdev, skb);
3487 if (atomic_read(&hdev->promisc)) {
3488 /* Send copy to the sockets */
3489 hci_send_to_sock(hdev, skb);
3492 if (test_bit(HCI_RAW, &hdev->flags) ||
3493 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3498 if (test_bit(HCI_INIT, &hdev->flags)) {
3499 /* Don't process data packets in this states. */
3500 switch (bt_cb(skb)->pkt_type) {
3501 case HCI_ACLDATA_PKT:
3502 case HCI_SCODATA_PKT:
3509 switch (bt_cb(skb)->pkt_type) {
3511 BT_DBG("%s Event packet", hdev->name);
3512 hci_event_packet(hdev, skb);
3515 case HCI_ACLDATA_PKT:
3516 BT_DBG("%s ACL data packet", hdev->name);
3517 hci_acldata_packet(hdev, skb);
3520 case HCI_SCODATA_PKT:
3521 BT_DBG("%s SCO data packet", hdev->name);
3522 hci_scodata_packet(hdev, skb);
3532 static void hci_cmd_work(struct work_struct *work)
3534 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3535 struct sk_buff *skb;
3537 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3538 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3540 /* Send queued commands */
3541 if (atomic_read(&hdev->cmd_cnt)) {
3542 skb = skb_dequeue(&hdev->cmd_q);
3546 kfree_skb(hdev->sent_cmd);
3548 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
3549 if (hdev->sent_cmd) {
3550 atomic_dec(&hdev->cmd_cnt);
3551 hci_send_frame(skb);
3552 if (test_bit(HCI_RESET, &hdev->flags))
3553 del_timer(&hdev->cmd_timer);
3555 mod_timer(&hdev->cmd_timer,
3556 jiffies + HCI_CMD_TIMEOUT);
3558 skb_queue_head(&hdev->cmd_q, skb);
3559 queue_work(hdev->workqueue, &hdev->cmd_work);
3564 u8 bdaddr_to_le(u8 bdaddr_type)
3566 switch (bdaddr_type) {
3567 case BDADDR_LE_PUBLIC:
3568 return ADDR_LE_DEV_PUBLIC;
3571 /* Fallback to LE Random address type */
3572 return ADDR_LE_DEV_RANDOM;