2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev *hdev, int event)
55 hci_sock_dev_event(hdev, event);
58 /* ---- HCI requests ---- */
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
82 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
92 hdev->recv_evt = NULL;
97 return ERR_PTR(-ENODATA);
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
108 if (hdr->evt != event)
113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
126 if (opcode == __le16_to_cpu(ev->opcode))
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
134 return ERR_PTR(-ENODATA);
137 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
138 const void *param, u8 event, u32 timeout)
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
144 BT_DBG("%s", hdev->name);
146 hci_req_init(&req, hdev);
148 hci_req_add_ev(&req, opcode, plen, param, event);
150 hdev->req_status = HCI_REQ_PEND;
152 err = hci_req_run(&req, hci_req_sync_complete);
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
159 schedule_timeout(timeout);
161 remove_wait_queue(&hdev->req_wait_q, &wait);
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
166 switch (hdev->req_status) {
168 err = -bt_to_errno(hdev->req_result);
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
180 hdev->req_status = hdev->req_result = 0;
182 BT_DBG("%s end: err %d", hdev->name, err);
187 return hci_get_cmd_complete(hdev, opcode, event);
189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
192 const void *param, u32 timeout)
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
196 EXPORT_SYMBOL(__hci_cmd_sync);
198 /* Execute request and wait for completion. */
199 static int __hci_req_sync(struct hci_dev *hdev,
200 void (*func)(struct hci_request *req,
202 unsigned long opt, __u32 timeout)
204 struct hci_request req;
205 DECLARE_WAITQUEUE(wait, current);
208 BT_DBG("%s start", hdev->name);
210 hci_req_init(&req, hdev);
212 hdev->req_status = HCI_REQ_PEND;
216 err = hci_req_run(&req, hci_req_sync_complete);
218 hdev->req_status = 0;
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
234 schedule_timeout(timeout);
236 remove_wait_queue(&hdev->req_wait_q, &wait);
238 if (signal_pending(current))
241 switch (hdev->req_status) {
243 err = -bt_to_errno(hdev->req_result);
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
255 hdev->req_status = hdev->req_result = 0;
257 BT_DBG("%s end: err %d", hdev->name, err);
262 static int hci_req_sync(struct hci_dev *hdev,
263 void (*req)(struct hci_request *req,
265 unsigned long opt, __u32 timeout)
269 if (!test_bit(HCI_UP, &hdev->flags))
272 /* Serialize all requests */
274 ret = __hci_req_sync(hdev, req, opt, timeout);
275 hci_req_unlock(hdev);
280 static void hci_reset_req(struct hci_request *req, unsigned long opt)
282 BT_DBG("%s %ld", req->hdev->name, opt);
285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
289 static void bredr_init(struct hci_request *req)
291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
293 /* Read Local Supported Features */
294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
296 /* Read Local Version */
297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
299 /* Read BD Address */
300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
303 static void amp_init(struct hci_request *req)
305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
307 /* Read Local Version */
308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
310 /* Read Local Supported Commands */
311 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
313 /* Read Local Supported Features */
314 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
316 /* Read Local AMP Info */
317 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
319 /* Read Data Blk size */
320 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
322 /* Read Flow Control Mode */
323 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
325 /* Read Location Data */
326 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
329 static void hci_init1_req(struct hci_request *req, unsigned long opt)
331 struct hci_dev *hdev = req->hdev;
333 BT_DBG("%s %ld", hdev->name, opt);
336 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
337 hci_reset_req(req, 0);
339 switch (hdev->dev_type) {
349 BT_ERR("Unknown device type %d", hdev->dev_type);
354 static void bredr_setup(struct hci_request *req)
359 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
360 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
362 /* Read Class of Device */
363 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
365 /* Read Local Name */
366 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
368 /* Read Voice Setting */
369 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
371 /* Clear Event Filters */
372 flt_type = HCI_FLT_CLEAR_ALL;
373 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
375 /* Connection accept timeout ~20 secs */
376 param = __constant_cpu_to_le16(0x7d00);
377 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
379 /* Read page scan parameters */
380 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
381 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
382 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
386 static void le_setup(struct hci_request *req)
388 struct hci_dev *hdev = req->hdev;
390 /* Read LE Buffer Size */
391 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
393 /* Read LE Local Supported Features */
394 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
396 /* Read LE Advertising Channel TX Power */
397 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
399 /* Read LE White List Size */
400 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
402 /* Read LE Supported States */
403 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
405 /* LE-only controllers have LE implicitly enabled */
406 if (!lmp_bredr_capable(hdev))
407 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
410 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
412 if (lmp_ext_inq_capable(hdev))
415 if (lmp_inq_rssi_capable(hdev))
418 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
419 hdev->lmp_subver == 0x0757)
422 if (hdev->manufacturer == 15) {
423 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
425 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
427 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
431 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
432 hdev->lmp_subver == 0x1805)
438 static void hci_setup_inquiry_mode(struct hci_request *req)
442 mode = hci_get_inquiry_mode(req->hdev);
444 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
447 static void hci_setup_event_mask(struct hci_request *req)
449 struct hci_dev *hdev = req->hdev;
451 /* The second byte is 0xff instead of 0x9f (two reserved bits
452 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
455 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
457 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
458 * any event mask for pre 1.2 devices.
460 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
463 if (lmp_bredr_capable(hdev)) {
464 events[4] |= 0x01; /* Flow Specification Complete */
465 events[4] |= 0x02; /* Inquiry Result with RSSI */
466 events[4] |= 0x04; /* Read Remote Extended Features Complete */
467 events[5] |= 0x08; /* Synchronous Connection Complete */
468 events[5] |= 0x10; /* Synchronous Connection Changed */
470 /* Use a different default for LE-only devices */
471 memset(events, 0, sizeof(events));
472 events[0] |= 0x10; /* Disconnection Complete */
473 events[0] |= 0x80; /* Encryption Change */
474 events[1] |= 0x08; /* Read Remote Version Information Complete */
475 events[1] |= 0x20; /* Command Complete */
476 events[1] |= 0x40; /* Command Status */
477 events[1] |= 0x80; /* Hardware Error */
478 events[2] |= 0x04; /* Number of Completed Packets */
479 events[3] |= 0x02; /* Data Buffer Overflow */
480 events[5] |= 0x80; /* Encryption Key Refresh Complete */
483 if (lmp_inq_rssi_capable(hdev))
484 events[4] |= 0x02; /* Inquiry Result with RSSI */
486 if (lmp_sniffsubr_capable(hdev))
487 events[5] |= 0x20; /* Sniff Subrating */
489 if (lmp_pause_enc_capable(hdev))
490 events[5] |= 0x80; /* Encryption Key Refresh Complete */
492 if (lmp_ext_inq_capable(hdev))
493 events[5] |= 0x40; /* Extended Inquiry Result */
495 if (lmp_no_flush_capable(hdev))
496 events[7] |= 0x01; /* Enhanced Flush Complete */
498 if (lmp_lsto_capable(hdev))
499 events[6] |= 0x80; /* Link Supervision Timeout Changed */
501 if (lmp_ssp_capable(hdev)) {
502 events[6] |= 0x01; /* IO Capability Request */
503 events[6] |= 0x02; /* IO Capability Response */
504 events[6] |= 0x04; /* User Confirmation Request */
505 events[6] |= 0x08; /* User Passkey Request */
506 events[6] |= 0x10; /* Remote OOB Data Request */
507 events[6] |= 0x20; /* Simple Pairing Complete */
508 events[7] |= 0x04; /* User Passkey Notification */
509 events[7] |= 0x08; /* Keypress Notification */
510 events[7] |= 0x10; /* Remote Host Supported
511 * Features Notification
515 if (lmp_le_capable(hdev))
516 events[7] |= 0x20; /* LE Meta-Event */
518 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
520 if (lmp_le_capable(hdev)) {
521 memset(events, 0, sizeof(events));
523 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
524 sizeof(events), events);
528 static void hci_init2_req(struct hci_request *req, unsigned long opt)
530 struct hci_dev *hdev = req->hdev;
532 if (lmp_bredr_capable(hdev))
535 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
537 if (lmp_le_capable(hdev))
540 hci_setup_event_mask(req);
542 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
543 * local supported commands HCI command.
545 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
546 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
548 if (lmp_ssp_capable(hdev)) {
549 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
551 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
552 sizeof(mode), &mode);
554 struct hci_cp_write_eir cp;
556 memset(hdev->eir, 0, sizeof(hdev->eir));
557 memset(&cp, 0, sizeof(cp));
559 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
563 if (lmp_inq_rssi_capable(hdev))
564 hci_setup_inquiry_mode(req);
566 if (lmp_inq_tx_pwr_capable(hdev))
567 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
569 if (lmp_ext_feat_capable(hdev)) {
570 struct hci_cp_read_local_ext_features cp;
573 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
577 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
579 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
584 static void hci_setup_link_policy(struct hci_request *req)
586 struct hci_dev *hdev = req->hdev;
587 struct hci_cp_write_def_link_policy cp;
590 if (lmp_rswitch_capable(hdev))
591 link_policy |= HCI_LP_RSWITCH;
592 if (lmp_hold_capable(hdev))
593 link_policy |= HCI_LP_HOLD;
594 if (lmp_sniff_capable(hdev))
595 link_policy |= HCI_LP_SNIFF;
596 if (lmp_park_capable(hdev))
597 link_policy |= HCI_LP_PARK;
599 cp.policy = cpu_to_le16(link_policy);
600 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
603 static void hci_set_le_support(struct hci_request *req)
605 struct hci_dev *hdev = req->hdev;
606 struct hci_cp_write_le_host_supported cp;
608 /* LE-only devices do not support explicit enablement */
609 if (!lmp_bredr_capable(hdev))
612 memset(&cp, 0, sizeof(cp));
614 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
616 cp.simul = lmp_le_br_capable(hdev);
619 if (cp.le != lmp_host_le_capable(hdev))
620 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
624 static void hci_set_event_mask_page_2(struct hci_request *req)
626 struct hci_dev *hdev = req->hdev;
627 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
629 /* If Connectionless Slave Broadcast master role is supported
630 * enable all necessary events for it.
632 if (hdev->features[2][0] & 0x01) {
633 events[1] |= 0x40; /* Triggered Clock Capture */
634 events[1] |= 0x80; /* Synchronization Train Complete */
635 events[2] |= 0x10; /* Slave Page Response Timeout */
636 events[2] |= 0x20; /* CSB Channel Map Change */
639 /* If Connectionless Slave Broadcast slave role is supported
640 * enable all necessary events for it.
642 if (hdev->features[2][0] & 0x02) {
643 events[2] |= 0x01; /* Synchronization Train Received */
644 events[2] |= 0x02; /* CSB Receive */
645 events[2] |= 0x04; /* CSB Timeout */
646 events[2] |= 0x08; /* Truncated Page Complete */
649 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
652 static void hci_init3_req(struct hci_request *req, unsigned long opt)
654 struct hci_dev *hdev = req->hdev;
657 /* Some Broadcom based Bluetooth controllers do not support the
658 * Delete Stored Link Key command. They are clearly indicating its
659 * absence in the bit mask of supported commands.
661 * Check the supported commands and only if the the command is marked
662 * as supported send it. If not supported assume that the controller
663 * does not have actual support for stored link keys which makes this
664 * command redundant anyway.
666 if (hdev->commands[6] & 0x80) {
667 struct hci_cp_delete_stored_link_key cp;
669 bacpy(&cp.bdaddr, BDADDR_ANY);
670 cp.delete_all = 0x01;
671 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
675 if (hdev->commands[5] & 0x10)
676 hci_setup_link_policy(req);
678 if (lmp_le_capable(hdev)) {
679 hci_set_le_support(req);
683 /* Read features beyond page 1 if available */
684 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
685 struct hci_cp_read_local_ext_features cp;
688 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
693 static void hci_init4_req(struct hci_request *req, unsigned long opt)
695 struct hci_dev *hdev = req->hdev;
697 /* Set event mask page 2 if the HCI command for it is supported */
698 if (hdev->commands[22] & 0x04)
699 hci_set_event_mask_page_2(req);
701 /* Check for Synchronization Train support */
702 if (hdev->features[2][0] & 0x04)
703 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
706 static int __hci_init(struct hci_dev *hdev)
710 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
714 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
715 * BR/EDR/LE type controllers. AMP controllers only need the
718 if (hdev->dev_type != HCI_BREDR)
721 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
725 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
729 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
732 static void hci_scan_req(struct hci_request *req, unsigned long opt)
736 BT_DBG("%s %x", req->hdev->name, scan);
738 /* Inquiry and Page scans */
739 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
742 static void hci_auth_req(struct hci_request *req, unsigned long opt)
746 BT_DBG("%s %x", req->hdev->name, auth);
749 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
752 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
756 BT_DBG("%s %x", req->hdev->name, encrypt);
759 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
762 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
764 __le16 policy = cpu_to_le16(opt);
766 BT_DBG("%s %x", req->hdev->name, policy);
768 /* Default link policy */
769 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
772 /* Get HCI device by index.
773 * Device is held on return. */
774 struct hci_dev *hci_dev_get(int index)
776 struct hci_dev *hdev = NULL, *d;
783 read_lock(&hci_dev_list_lock);
784 list_for_each_entry(d, &hci_dev_list, list) {
785 if (d->id == index) {
786 hdev = hci_dev_hold(d);
790 read_unlock(&hci_dev_list_lock);
794 /* ---- Inquiry support ---- */
796 bool hci_discovery_active(struct hci_dev *hdev)
798 struct discovery_state *discov = &hdev->discovery;
800 switch (discov->state) {
801 case DISCOVERY_FINDING:
802 case DISCOVERY_RESOLVING:
810 void hci_discovery_set_state(struct hci_dev *hdev, int state)
812 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
814 if (hdev->discovery.state == state)
818 case DISCOVERY_STOPPED:
819 if (hdev->discovery.state != DISCOVERY_STARTING)
820 mgmt_discovering(hdev, 0);
822 case DISCOVERY_STARTING:
824 case DISCOVERY_FINDING:
825 mgmt_discovering(hdev, 1);
827 case DISCOVERY_RESOLVING:
829 case DISCOVERY_STOPPING:
833 hdev->discovery.state = state;
836 void hci_inquiry_cache_flush(struct hci_dev *hdev)
838 struct discovery_state *cache = &hdev->discovery;
839 struct inquiry_entry *p, *n;
841 list_for_each_entry_safe(p, n, &cache->all, all) {
846 INIT_LIST_HEAD(&cache->unknown);
847 INIT_LIST_HEAD(&cache->resolve);
850 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
853 struct discovery_state *cache = &hdev->discovery;
854 struct inquiry_entry *e;
856 BT_DBG("cache %p, %pMR", cache, bdaddr);
858 list_for_each_entry(e, &cache->all, all) {
859 if (!bacmp(&e->data.bdaddr, bdaddr))
866 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
869 struct discovery_state *cache = &hdev->discovery;
870 struct inquiry_entry *e;
872 BT_DBG("cache %p, %pMR", cache, bdaddr);
874 list_for_each_entry(e, &cache->unknown, list) {
875 if (!bacmp(&e->data.bdaddr, bdaddr))
882 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
886 struct discovery_state *cache = &hdev->discovery;
887 struct inquiry_entry *e;
889 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
891 list_for_each_entry(e, &cache->resolve, list) {
892 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
894 if (!bacmp(&e->data.bdaddr, bdaddr))
901 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
902 struct inquiry_entry *ie)
904 struct discovery_state *cache = &hdev->discovery;
905 struct list_head *pos = &cache->resolve;
906 struct inquiry_entry *p;
910 list_for_each_entry(p, &cache->resolve, list) {
911 if (p->name_state != NAME_PENDING &&
912 abs(p->data.rssi) >= abs(ie->data.rssi))
917 list_add(&ie->list, pos);
920 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
921 bool name_known, bool *ssp)
923 struct discovery_state *cache = &hdev->discovery;
924 struct inquiry_entry *ie;
926 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
928 hci_remove_remote_oob_data(hdev, &data->bdaddr);
931 *ssp = data->ssp_mode;
933 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
935 if (ie->data.ssp_mode && ssp)
938 if (ie->name_state == NAME_NEEDED &&
939 data->rssi != ie->data.rssi) {
940 ie->data.rssi = data->rssi;
941 hci_inquiry_cache_update_resolve(hdev, ie);
947 /* Entry not in the cache. Add new one. */
948 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
952 list_add(&ie->all, &cache->all);
955 ie->name_state = NAME_KNOWN;
957 ie->name_state = NAME_NOT_KNOWN;
958 list_add(&ie->list, &cache->unknown);
962 if (name_known && ie->name_state != NAME_KNOWN &&
963 ie->name_state != NAME_PENDING) {
964 ie->name_state = NAME_KNOWN;
968 memcpy(&ie->data, data, sizeof(*data));
969 ie->timestamp = jiffies;
970 cache->timestamp = jiffies;
972 if (ie->name_state == NAME_NOT_KNOWN)
978 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
980 struct discovery_state *cache = &hdev->discovery;
981 struct inquiry_info *info = (struct inquiry_info *) buf;
982 struct inquiry_entry *e;
985 list_for_each_entry(e, &cache->all, all) {
986 struct inquiry_data *data = &e->data;
991 bacpy(&info->bdaddr, &data->bdaddr);
992 info->pscan_rep_mode = data->pscan_rep_mode;
993 info->pscan_period_mode = data->pscan_period_mode;
994 info->pscan_mode = data->pscan_mode;
995 memcpy(info->dev_class, data->dev_class, 3);
996 info->clock_offset = data->clock_offset;
1002 BT_DBG("cache %p, copied %d", cache, copied);
1006 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1008 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1009 struct hci_dev *hdev = req->hdev;
1010 struct hci_cp_inquiry cp;
1012 BT_DBG("%s", hdev->name);
1014 if (test_bit(HCI_INQUIRY, &hdev->flags))
1018 memcpy(&cp.lap, &ir->lap, 3);
1019 cp.length = ir->length;
1020 cp.num_rsp = ir->num_rsp;
1021 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1024 static int wait_inquiry(void *word)
1027 return signal_pending(current);
1030 int hci_inquiry(void __user *arg)
1032 __u8 __user *ptr = arg;
1033 struct hci_inquiry_req ir;
1034 struct hci_dev *hdev;
1035 int err = 0, do_inquiry = 0, max_rsp;
1039 if (copy_from_user(&ir, ptr, sizeof(ir)))
1042 hdev = hci_dev_get(ir.dev_id);
1046 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1051 if (hdev->dev_type != HCI_BREDR) {
1056 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1062 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1063 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1064 hci_inquiry_cache_flush(hdev);
1067 hci_dev_unlock(hdev);
1069 timeo = ir.length * msecs_to_jiffies(2000);
1072 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1077 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1078 * cleared). If it is interrupted by a signal, return -EINTR.
1080 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1081 TASK_INTERRUPTIBLE))
1085 /* for unlimited number of responses we will use buffer with
1088 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1090 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1091 * copy it to the user space.
1093 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1100 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1101 hci_dev_unlock(hdev);
1103 BT_DBG("num_rsp %d", ir.num_rsp);
1105 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1107 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1120 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1122 u8 ad_len = 0, flags = 0;
1125 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1126 flags |= LE_AD_GENERAL;
1128 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1129 if (lmp_le_br_capable(hdev))
1130 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1131 if (lmp_host_le_br_capable(hdev))
1132 flags |= LE_AD_SIM_LE_BREDR_HOST;
1134 flags |= LE_AD_NO_BREDR;
1138 BT_DBG("adv flags 0x%02x", flags);
1148 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1150 ptr[1] = EIR_TX_POWER;
1151 ptr[2] = (u8) hdev->adv_tx_power;
1157 name_len = strlen(hdev->dev_name);
1159 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1161 if (name_len > max_len) {
1163 ptr[1] = EIR_NAME_SHORT;
1165 ptr[1] = EIR_NAME_COMPLETE;
1167 ptr[0] = name_len + 1;
1169 memcpy(ptr + 2, hdev->dev_name, name_len);
1171 ad_len += (name_len + 2);
1172 ptr += (name_len + 2);
1178 void hci_update_ad(struct hci_request *req)
1180 struct hci_dev *hdev = req->hdev;
1181 struct hci_cp_le_set_adv_data cp;
1184 if (!lmp_le_capable(hdev))
1187 memset(&cp, 0, sizeof(cp));
1189 len = create_ad(hdev, cp.data);
1191 if (hdev->adv_data_len == len &&
1192 memcmp(cp.data, hdev->adv_data, len) == 0)
1195 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1196 hdev->adv_data_len = len;
1200 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1203 static int hci_dev_do_open(struct hci_dev *hdev)
1207 BT_DBG("%s %p", hdev->name, hdev);
1211 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1216 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1217 /* Check for rfkill but allow the HCI setup stage to
1218 * proceed (which in itself doesn't cause any RF activity).
1220 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1225 /* Check for valid public address or a configured static
1226 * random adddress, but let the HCI setup proceed to
1227 * be able to determine if there is a public address
1230 * This check is only valid for BR/EDR controllers
1231 * since AMP controllers do not have an address.
1233 if (hdev->dev_type == HCI_BREDR &&
1234 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1235 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1236 ret = -EADDRNOTAVAIL;
1241 if (test_bit(HCI_UP, &hdev->flags)) {
1246 if (hdev->open(hdev)) {
1251 atomic_set(&hdev->cmd_cnt, 1);
1252 set_bit(HCI_INIT, &hdev->flags);
1254 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1255 ret = hdev->setup(hdev);
1258 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1259 set_bit(HCI_RAW, &hdev->flags);
1261 if (!test_bit(HCI_RAW, &hdev->flags) &&
1262 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1263 ret = __hci_init(hdev);
1266 clear_bit(HCI_INIT, &hdev->flags);
1270 set_bit(HCI_UP, &hdev->flags);
1271 hci_notify(hdev, HCI_DEV_UP);
1272 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1273 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1274 hdev->dev_type == HCI_BREDR) {
1276 mgmt_powered(hdev, 1);
1277 hci_dev_unlock(hdev);
1280 /* Init failed, cleanup */
1281 flush_work(&hdev->tx_work);
1282 flush_work(&hdev->cmd_work);
1283 flush_work(&hdev->rx_work);
1285 skb_queue_purge(&hdev->cmd_q);
1286 skb_queue_purge(&hdev->rx_q);
1291 if (hdev->sent_cmd) {
1292 kfree_skb(hdev->sent_cmd);
1293 hdev->sent_cmd = NULL;
1301 hci_req_unlock(hdev);
1305 /* ---- HCI ioctl helpers ---- */
1307 int hci_dev_open(__u16 dev)
1309 struct hci_dev *hdev;
1312 hdev = hci_dev_get(dev);
1316 /* We need to ensure that no other power on/off work is pending
1317 * before proceeding to call hci_dev_do_open. This is
1318 * particularly important if the setup procedure has not yet
1321 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1322 cancel_delayed_work(&hdev->power_off);
1324 /* After this call it is guaranteed that the setup procedure
1325 * has finished. This means that error conditions like RFKILL
1326 * or no valid public or static random address apply.
1328 flush_workqueue(hdev->req_workqueue);
1330 err = hci_dev_do_open(hdev);
1337 static int hci_dev_do_close(struct hci_dev *hdev)
1339 BT_DBG("%s %p", hdev->name, hdev);
1341 cancel_delayed_work(&hdev->power_off);
1343 hci_req_cancel(hdev, ENODEV);
1346 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1347 del_timer_sync(&hdev->cmd_timer);
1348 hci_req_unlock(hdev);
1352 /* Flush RX and TX works */
1353 flush_work(&hdev->tx_work);
1354 flush_work(&hdev->rx_work);
1356 if (hdev->discov_timeout > 0) {
1357 cancel_delayed_work(&hdev->discov_off);
1358 hdev->discov_timeout = 0;
1359 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1362 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1363 cancel_delayed_work(&hdev->service_cache);
1365 cancel_delayed_work_sync(&hdev->le_scan_disable);
1368 hci_inquiry_cache_flush(hdev);
1369 hci_conn_hash_flush(hdev);
1370 hci_dev_unlock(hdev);
1372 hci_notify(hdev, HCI_DEV_DOWN);
1378 skb_queue_purge(&hdev->cmd_q);
1379 atomic_set(&hdev->cmd_cnt, 1);
1380 if (!test_bit(HCI_RAW, &hdev->flags) &&
1381 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1382 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1383 set_bit(HCI_INIT, &hdev->flags);
1384 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1385 clear_bit(HCI_INIT, &hdev->flags);
1388 /* flush cmd work */
1389 flush_work(&hdev->cmd_work);
1392 skb_queue_purge(&hdev->rx_q);
1393 skb_queue_purge(&hdev->cmd_q);
1394 skb_queue_purge(&hdev->raw_q);
1396 /* Drop last sent command */
1397 if (hdev->sent_cmd) {
1398 del_timer_sync(&hdev->cmd_timer);
1399 kfree_skb(hdev->sent_cmd);
1400 hdev->sent_cmd = NULL;
1403 kfree_skb(hdev->recv_evt);
1404 hdev->recv_evt = NULL;
1406 /* After this point our queues are empty
1407 * and no tasks are scheduled. */
1412 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1414 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1415 if (hdev->dev_type == HCI_BREDR) {
1417 mgmt_powered(hdev, 0);
1418 hci_dev_unlock(hdev);
1422 /* Controller radio is available but is currently powered down */
1423 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1425 memset(hdev->eir, 0, sizeof(hdev->eir));
1426 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1428 hci_req_unlock(hdev);
1434 int hci_dev_close(__u16 dev)
1436 struct hci_dev *hdev;
1439 hdev = hci_dev_get(dev);
1443 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1448 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1449 cancel_delayed_work(&hdev->power_off);
1451 err = hci_dev_do_close(hdev);
1458 int hci_dev_reset(__u16 dev)
1460 struct hci_dev *hdev;
1463 hdev = hci_dev_get(dev);
1469 if (!test_bit(HCI_UP, &hdev->flags)) {
1474 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1480 skb_queue_purge(&hdev->rx_q);
1481 skb_queue_purge(&hdev->cmd_q);
1484 hci_inquiry_cache_flush(hdev);
1485 hci_conn_hash_flush(hdev);
1486 hci_dev_unlock(hdev);
1491 atomic_set(&hdev->cmd_cnt, 1);
1492 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1494 if (!test_bit(HCI_RAW, &hdev->flags))
1495 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1498 hci_req_unlock(hdev);
1503 int hci_dev_reset_stat(__u16 dev)
1505 struct hci_dev *hdev;
1508 hdev = hci_dev_get(dev);
1512 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1517 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1524 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1526 struct hci_dev *hdev;
1527 struct hci_dev_req dr;
1530 if (copy_from_user(&dr, arg, sizeof(dr)))
1533 hdev = hci_dev_get(dr.dev_id);
1537 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1542 if (hdev->dev_type != HCI_BREDR) {
1547 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1554 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1559 if (!lmp_encrypt_capable(hdev)) {
1564 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1565 /* Auth must be enabled first */
1566 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1572 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1577 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1582 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1586 case HCISETLINKMODE:
1587 hdev->link_mode = ((__u16) dr.dev_opt) &
1588 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1592 hdev->pkt_type = (__u16) dr.dev_opt;
1596 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1597 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1601 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1602 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1615 int hci_get_dev_list(void __user *arg)
1617 struct hci_dev *hdev;
1618 struct hci_dev_list_req *dl;
1619 struct hci_dev_req *dr;
1620 int n = 0, size, err;
1623 if (get_user(dev_num, (__u16 __user *) arg))
1626 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1629 size = sizeof(*dl) + dev_num * sizeof(*dr);
1631 dl = kzalloc(size, GFP_KERNEL);
1637 read_lock(&hci_dev_list_lock);
1638 list_for_each_entry(hdev, &hci_dev_list, list) {
1639 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1640 cancel_delayed_work(&hdev->power_off);
1642 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1643 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1645 (dr + n)->dev_id = hdev->id;
1646 (dr + n)->dev_opt = hdev->flags;
1651 read_unlock(&hci_dev_list_lock);
1654 size = sizeof(*dl) + n * sizeof(*dr);
1656 err = copy_to_user(arg, dl, size);
1659 return err ? -EFAULT : 0;
1662 int hci_get_dev_info(void __user *arg)
1664 struct hci_dev *hdev;
1665 struct hci_dev_info di;
1668 if (copy_from_user(&di, arg, sizeof(di)))
1671 hdev = hci_dev_get(di.dev_id);
1675 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1676 cancel_delayed_work_sync(&hdev->power_off);
1678 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1679 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1681 strcpy(di.name, hdev->name);
1682 di.bdaddr = hdev->bdaddr;
1683 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1684 di.flags = hdev->flags;
1685 di.pkt_type = hdev->pkt_type;
1686 if (lmp_bredr_capable(hdev)) {
1687 di.acl_mtu = hdev->acl_mtu;
1688 di.acl_pkts = hdev->acl_pkts;
1689 di.sco_mtu = hdev->sco_mtu;
1690 di.sco_pkts = hdev->sco_pkts;
1692 di.acl_mtu = hdev->le_mtu;
1693 di.acl_pkts = hdev->le_pkts;
1697 di.link_policy = hdev->link_policy;
1698 di.link_mode = hdev->link_mode;
1700 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1701 memcpy(&di.features, &hdev->features, sizeof(di.features));
1703 if (copy_to_user(arg, &di, sizeof(di)))
1711 /* ---- Interface to HCI drivers ---- */
1713 static int hci_rfkill_set_block(void *data, bool blocked)
1715 struct hci_dev *hdev = data;
1717 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1719 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1723 set_bit(HCI_RFKILLED, &hdev->dev_flags);
1724 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1725 hci_dev_do_close(hdev);
1727 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1733 static const struct rfkill_ops hci_rfkill_ops = {
1734 .set_block = hci_rfkill_set_block,
1737 static void hci_power_on(struct work_struct *work)
1739 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1742 BT_DBG("%s", hdev->name);
1744 err = hci_dev_do_open(hdev);
1746 mgmt_set_powered_failed(hdev, err);
1750 /* During the HCI setup phase, a few error conditions are
1751 * ignored and they need to be checked now. If they are still
1752 * valid, it is important to turn the device back off.
1754 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1755 (hdev->dev_type == HCI_BREDR &&
1756 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1757 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
1758 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1759 hci_dev_do_close(hdev);
1760 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1761 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1762 HCI_AUTO_OFF_TIMEOUT);
1765 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1766 mgmt_index_added(hdev);
1769 static void hci_power_off(struct work_struct *work)
1771 struct hci_dev *hdev = container_of(work, struct hci_dev,
1774 BT_DBG("%s", hdev->name);
1776 hci_dev_do_close(hdev);
1779 static void hci_discov_off(struct work_struct *work)
1781 struct hci_dev *hdev;
1782 u8 scan = SCAN_PAGE;
1784 hdev = container_of(work, struct hci_dev, discov_off.work);
1786 BT_DBG("%s", hdev->name);
1790 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1792 hdev->discov_timeout = 0;
1794 hci_dev_unlock(hdev);
1797 int hci_uuids_clear(struct hci_dev *hdev)
1799 struct bt_uuid *uuid, *tmp;
1801 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1802 list_del(&uuid->list);
1809 int hci_link_keys_clear(struct hci_dev *hdev)
1811 struct list_head *p, *n;
1813 list_for_each_safe(p, n, &hdev->link_keys) {
1814 struct link_key *key;
1816 key = list_entry(p, struct link_key, list);
1825 int hci_smp_ltks_clear(struct hci_dev *hdev)
1827 struct smp_ltk *k, *tmp;
1829 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1837 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1841 list_for_each_entry(k, &hdev->link_keys, list)
1842 if (bacmp(bdaddr, &k->bdaddr) == 0)
1848 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1849 u8 key_type, u8 old_key_type)
1852 if (key_type < 0x03)
1855 /* Debug keys are insecure so don't store them persistently */
1856 if (key_type == HCI_LK_DEBUG_COMBINATION)
1859 /* Changed combination key and there's no previous one */
1860 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1863 /* Security mode 3 case */
1867 /* Neither local nor remote side had no-bonding as requirement */
1868 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1871 /* Local side had dedicated bonding as requirement */
1872 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1875 /* Remote side had dedicated bonding as requirement */
1876 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1879 /* If none of the above criteria match, then don't store the key
1884 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1888 list_for_each_entry(k, &hdev->long_term_keys, list) {
1889 if (k->ediv != ediv ||
1890 memcmp(rand, k->rand, sizeof(k->rand)))
1899 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1904 list_for_each_entry(k, &hdev->long_term_keys, list)
1905 if (addr_type == k->bdaddr_type &&
1906 bacmp(bdaddr, &k->bdaddr) == 0)
1912 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1913 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1915 struct link_key *key, *old_key;
1919 old_key = hci_find_link_key(hdev, bdaddr);
1921 old_key_type = old_key->type;
1924 old_key_type = conn ? conn->key_type : 0xff;
1925 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1928 list_add(&key->list, &hdev->link_keys);
1931 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1933 /* Some buggy controller combinations generate a changed
1934 * combination key for legacy pairing even when there's no
1936 if (type == HCI_LK_CHANGED_COMBINATION &&
1937 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1938 type = HCI_LK_COMBINATION;
1940 conn->key_type = type;
1943 bacpy(&key->bdaddr, bdaddr);
1944 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1945 key->pin_len = pin_len;
1947 if (type == HCI_LK_CHANGED_COMBINATION)
1948 key->type = old_key_type;
1955 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1957 mgmt_new_link_key(hdev, key, persistent);
1960 conn->flush_key = !persistent;
1965 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1966 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1969 struct smp_ltk *key, *old_key;
1971 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1974 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1978 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1981 list_add(&key->list, &hdev->long_term_keys);
1984 bacpy(&key->bdaddr, bdaddr);
1985 key->bdaddr_type = addr_type;
1986 memcpy(key->val, tk, sizeof(key->val));
1987 key->authenticated = authenticated;
1989 key->enc_size = enc_size;
1991 memcpy(key->rand, rand, sizeof(key->rand));
1996 if (type & HCI_SMP_LTK)
1997 mgmt_new_ltk(hdev, key, 1);
2002 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2004 struct link_key *key;
2006 key = hci_find_link_key(hdev, bdaddr);
2010 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2012 list_del(&key->list);
2018 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2020 struct smp_ltk *k, *tmp;
2022 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2023 if (bacmp(bdaddr, &k->bdaddr))
2026 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2035 /* HCI command timer function */
2036 static void hci_cmd_timeout(unsigned long arg)
2038 struct hci_dev *hdev = (void *) arg;
2040 if (hdev->sent_cmd) {
2041 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2042 u16 opcode = __le16_to_cpu(sent->opcode);
2044 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2046 BT_ERR("%s command tx timeout", hdev->name);
2049 atomic_set(&hdev->cmd_cnt, 1);
2050 queue_work(hdev->workqueue, &hdev->cmd_work);
2053 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2056 struct oob_data *data;
2058 list_for_each_entry(data, &hdev->remote_oob_data, list)
2059 if (bacmp(bdaddr, &data->bdaddr) == 0)
2065 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2067 struct oob_data *data;
2069 data = hci_find_remote_oob_data(hdev, bdaddr);
2073 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2075 list_del(&data->list);
2081 int hci_remote_oob_data_clear(struct hci_dev *hdev)
2083 struct oob_data *data, *n;
2085 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2086 list_del(&data->list);
2093 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
2096 struct oob_data *data;
2098 data = hci_find_remote_oob_data(hdev, bdaddr);
2101 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2105 bacpy(&data->bdaddr, bdaddr);
2106 list_add(&data->list, &hdev->remote_oob_data);
2109 memcpy(data->hash, hash, sizeof(data->hash));
2110 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2112 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2117 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
2119 struct bdaddr_list *b;
2121 list_for_each_entry(b, &hdev->blacklist, list)
2122 if (bacmp(bdaddr, &b->bdaddr) == 0)
2128 int hci_blacklist_clear(struct hci_dev *hdev)
2130 struct list_head *p, *n;
2132 list_for_each_safe(p, n, &hdev->blacklist) {
2133 struct bdaddr_list *b;
2135 b = list_entry(p, struct bdaddr_list, list);
2144 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2146 struct bdaddr_list *entry;
2148 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2151 if (hci_blacklist_lookup(hdev, bdaddr))
2154 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
2158 bacpy(&entry->bdaddr, bdaddr);
2160 list_add(&entry->list, &hdev->blacklist);
2162 return mgmt_device_blocked(hdev, bdaddr, type);
2165 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2167 struct bdaddr_list *entry;
2169 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2170 return hci_blacklist_clear(hdev);
2172 entry = hci_blacklist_lookup(hdev, bdaddr);
2176 list_del(&entry->list);
2179 return mgmt_device_unblocked(hdev, bdaddr, type);
2182 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2185 BT_ERR("Failed to start inquiry: status %d", status);
2188 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2189 hci_dev_unlock(hdev);
2194 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2196 /* General inquiry access code (GIAC) */
2197 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2198 struct hci_request req;
2199 struct hci_cp_inquiry cp;
2203 BT_ERR("Failed to disable LE scanning: status %d", status);
2207 switch (hdev->discovery.type) {
2208 case DISCOV_TYPE_LE:
2210 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2211 hci_dev_unlock(hdev);
2214 case DISCOV_TYPE_INTERLEAVED:
2215 hci_req_init(&req, hdev);
2217 memset(&cp, 0, sizeof(cp));
2218 memcpy(&cp.lap, lap, sizeof(cp.lap));
2219 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2220 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2224 hci_inquiry_cache_flush(hdev);
2226 err = hci_req_run(&req, inquiry_complete);
2228 BT_ERR("Inquiry request failed: err %d", err);
2229 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2232 hci_dev_unlock(hdev);
2237 static void le_scan_disable_work(struct work_struct *work)
2239 struct hci_dev *hdev = container_of(work, struct hci_dev,
2240 le_scan_disable.work);
2241 struct hci_cp_le_set_scan_enable cp;
2242 struct hci_request req;
2245 BT_DBG("%s", hdev->name);
2247 hci_req_init(&req, hdev);
2249 memset(&cp, 0, sizeof(cp));
2250 cp.enable = LE_SCAN_DISABLE;
2251 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2253 err = hci_req_run(&req, le_scan_disable_work_complete);
2255 BT_ERR("Disable LE scanning request failed: err %d", err);
2258 /* Alloc HCI device */
2259 struct hci_dev *hci_alloc_dev(void)
2261 struct hci_dev *hdev;
2263 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2267 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2268 hdev->esco_type = (ESCO_HV1);
2269 hdev->link_mode = (HCI_LM_ACCEPT);
2270 hdev->io_capability = 0x03; /* No Input No Output */
2271 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2272 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2274 hdev->sniff_max_interval = 800;
2275 hdev->sniff_min_interval = 80;
2277 hdev->le_scan_interval = 0x0060;
2278 hdev->le_scan_window = 0x0030;
2280 mutex_init(&hdev->lock);
2281 mutex_init(&hdev->req_lock);
2283 INIT_LIST_HEAD(&hdev->mgmt_pending);
2284 INIT_LIST_HEAD(&hdev->blacklist);
2285 INIT_LIST_HEAD(&hdev->uuids);
2286 INIT_LIST_HEAD(&hdev->link_keys);
2287 INIT_LIST_HEAD(&hdev->long_term_keys);
2288 INIT_LIST_HEAD(&hdev->remote_oob_data);
2289 INIT_LIST_HEAD(&hdev->conn_hash.list);
2291 INIT_WORK(&hdev->rx_work, hci_rx_work);
2292 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2293 INIT_WORK(&hdev->tx_work, hci_tx_work);
2294 INIT_WORK(&hdev->power_on, hci_power_on);
2296 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2297 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2298 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2300 skb_queue_head_init(&hdev->rx_q);
2301 skb_queue_head_init(&hdev->cmd_q);
2302 skb_queue_head_init(&hdev->raw_q);
2304 init_waitqueue_head(&hdev->req_wait_q);
2306 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2308 hci_init_sysfs(hdev);
2309 discovery_init(hdev);
2313 EXPORT_SYMBOL(hci_alloc_dev);
2315 /* Free HCI device */
2316 void hci_free_dev(struct hci_dev *hdev)
2318 /* will free via device release */
2319 put_device(&hdev->dev);
2321 EXPORT_SYMBOL(hci_free_dev);
2323 /* Register HCI device */
2324 int hci_register_dev(struct hci_dev *hdev)
2328 if (!hdev->open || !hdev->close)
2331 /* Do not allow HCI_AMP devices to register at index 0,
2332 * so the index can be used as the AMP controller ID.
2334 switch (hdev->dev_type) {
2336 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2339 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2348 sprintf(hdev->name, "hci%d", id);
2351 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2353 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2354 WQ_MEM_RECLAIM, 1, hdev->name);
2355 if (!hdev->workqueue) {
2360 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2361 WQ_MEM_RECLAIM, 1, hdev->name);
2362 if (!hdev->req_workqueue) {
2363 destroy_workqueue(hdev->workqueue);
2368 error = hci_add_sysfs(hdev);
2372 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2373 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2376 if (rfkill_register(hdev->rfkill) < 0) {
2377 rfkill_destroy(hdev->rfkill);
2378 hdev->rfkill = NULL;
2382 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2383 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2385 set_bit(HCI_SETUP, &hdev->dev_flags);
2386 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2388 if (hdev->dev_type == HCI_BREDR) {
2389 /* Assume BR/EDR support until proven otherwise (such as
2390 * through reading supported features during init.
2392 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2395 write_lock(&hci_dev_list_lock);
2396 list_add(&hdev->list, &hci_dev_list);
2397 write_unlock(&hci_dev_list_lock);
2399 hci_notify(hdev, HCI_DEV_REG);
2402 queue_work(hdev->req_workqueue, &hdev->power_on);
2407 destroy_workqueue(hdev->workqueue);
2408 destroy_workqueue(hdev->req_workqueue);
2410 ida_simple_remove(&hci_index_ida, hdev->id);
2414 EXPORT_SYMBOL(hci_register_dev);
2416 /* Unregister HCI device */
2417 void hci_unregister_dev(struct hci_dev *hdev)
2421 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2423 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2427 write_lock(&hci_dev_list_lock);
2428 list_del(&hdev->list);
2429 write_unlock(&hci_dev_list_lock);
2431 hci_dev_do_close(hdev);
2433 for (i = 0; i < NUM_REASSEMBLY; i++)
2434 kfree_skb(hdev->reassembly[i]);
2436 cancel_work_sync(&hdev->power_on);
2438 if (!test_bit(HCI_INIT, &hdev->flags) &&
2439 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2441 mgmt_index_removed(hdev);
2442 hci_dev_unlock(hdev);
2445 /* mgmt_index_removed should take care of emptying the
2447 BUG_ON(!list_empty(&hdev->mgmt_pending));
2449 hci_notify(hdev, HCI_DEV_UNREG);
2452 rfkill_unregister(hdev->rfkill);
2453 rfkill_destroy(hdev->rfkill);
2456 hci_del_sysfs(hdev);
2458 destroy_workqueue(hdev->workqueue);
2459 destroy_workqueue(hdev->req_workqueue);
2462 hci_blacklist_clear(hdev);
2463 hci_uuids_clear(hdev);
2464 hci_link_keys_clear(hdev);
2465 hci_smp_ltks_clear(hdev);
2466 hci_remote_oob_data_clear(hdev);
2467 hci_dev_unlock(hdev);
2471 ida_simple_remove(&hci_index_ida, id);
2473 EXPORT_SYMBOL(hci_unregister_dev);
2475 /* Suspend HCI device */
2476 int hci_suspend_dev(struct hci_dev *hdev)
2478 hci_notify(hdev, HCI_DEV_SUSPEND);
2481 EXPORT_SYMBOL(hci_suspend_dev);
2483 /* Resume HCI device */
2484 int hci_resume_dev(struct hci_dev *hdev)
2486 hci_notify(hdev, HCI_DEV_RESUME);
2489 EXPORT_SYMBOL(hci_resume_dev);
2491 /* Receive frame from HCI drivers */
2492 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2494 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2495 && !test_bit(HCI_INIT, &hdev->flags))) {
2501 bt_cb(skb)->incoming = 1;
2504 __net_timestamp(skb);
2506 skb_queue_tail(&hdev->rx_q, skb);
2507 queue_work(hdev->workqueue, &hdev->rx_work);
2511 EXPORT_SYMBOL(hci_recv_frame);
2513 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2514 int count, __u8 index)
2519 struct sk_buff *skb;
2520 struct bt_skb_cb *scb;
2522 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2523 index >= NUM_REASSEMBLY)
2526 skb = hdev->reassembly[index];
2530 case HCI_ACLDATA_PKT:
2531 len = HCI_MAX_FRAME_SIZE;
2532 hlen = HCI_ACL_HDR_SIZE;
2535 len = HCI_MAX_EVENT_SIZE;
2536 hlen = HCI_EVENT_HDR_SIZE;
2538 case HCI_SCODATA_PKT:
2539 len = HCI_MAX_SCO_SIZE;
2540 hlen = HCI_SCO_HDR_SIZE;
2544 skb = bt_skb_alloc(len, GFP_ATOMIC);
2548 scb = (void *) skb->cb;
2550 scb->pkt_type = type;
2552 hdev->reassembly[index] = skb;
2556 scb = (void *) skb->cb;
2557 len = min_t(uint, scb->expect, count);
2559 memcpy(skb_put(skb, len), data, len);
2568 if (skb->len == HCI_EVENT_HDR_SIZE) {
2569 struct hci_event_hdr *h = hci_event_hdr(skb);
2570 scb->expect = h->plen;
2572 if (skb_tailroom(skb) < scb->expect) {
2574 hdev->reassembly[index] = NULL;
2580 case HCI_ACLDATA_PKT:
2581 if (skb->len == HCI_ACL_HDR_SIZE) {
2582 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2583 scb->expect = __le16_to_cpu(h->dlen);
2585 if (skb_tailroom(skb) < scb->expect) {
2587 hdev->reassembly[index] = NULL;
2593 case HCI_SCODATA_PKT:
2594 if (skb->len == HCI_SCO_HDR_SIZE) {
2595 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2596 scb->expect = h->dlen;
2598 if (skb_tailroom(skb) < scb->expect) {
2600 hdev->reassembly[index] = NULL;
2607 if (scb->expect == 0) {
2608 /* Complete frame */
2610 bt_cb(skb)->pkt_type = type;
2611 hci_recv_frame(hdev, skb);
2613 hdev->reassembly[index] = NULL;
2621 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2625 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2629 rem = hci_reassembly(hdev, type, data, count, type - 1);
2633 data += (count - rem);
2639 EXPORT_SYMBOL(hci_recv_fragment);
2641 #define STREAM_REASSEMBLY 0
2643 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2649 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2652 struct { char type; } *pkt;
2654 /* Start of the frame */
2661 type = bt_cb(skb)->pkt_type;
2663 rem = hci_reassembly(hdev, type, data, count,
2668 data += (count - rem);
2674 EXPORT_SYMBOL(hci_recv_stream_fragment);
2676 /* ---- Interface to upper protocols ---- */
2678 int hci_register_cb(struct hci_cb *cb)
2680 BT_DBG("%p name %s", cb, cb->name);
2682 write_lock(&hci_cb_list_lock);
2683 list_add(&cb->list, &hci_cb_list);
2684 write_unlock(&hci_cb_list_lock);
2688 EXPORT_SYMBOL(hci_register_cb);
2690 int hci_unregister_cb(struct hci_cb *cb)
2692 BT_DBG("%p name %s", cb, cb->name);
2694 write_lock(&hci_cb_list_lock);
2695 list_del(&cb->list);
2696 write_unlock(&hci_cb_list_lock);
2700 EXPORT_SYMBOL(hci_unregister_cb);
2702 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
2704 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2707 __net_timestamp(skb);
2709 /* Send copy to monitor */
2710 hci_send_to_monitor(hdev, skb);
2712 if (atomic_read(&hdev->promisc)) {
2713 /* Send copy to the sockets */
2714 hci_send_to_sock(hdev, skb);
2717 /* Get rid of skb owner, prior to sending to the driver. */
2720 if (hdev->send(hdev, skb) < 0)
2721 BT_ERR("%s sending frame failed", hdev->name);
2724 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2726 skb_queue_head_init(&req->cmd_q);
2731 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2733 struct hci_dev *hdev = req->hdev;
2734 struct sk_buff *skb;
2735 unsigned long flags;
2737 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2739 /* If an error occured during request building, remove all HCI
2740 * commands queued on the HCI request queue.
2743 skb_queue_purge(&req->cmd_q);
2747 /* Do not allow empty requests */
2748 if (skb_queue_empty(&req->cmd_q))
2751 skb = skb_peek_tail(&req->cmd_q);
2752 bt_cb(skb)->req.complete = complete;
2754 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2755 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2756 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2758 queue_work(hdev->workqueue, &hdev->cmd_work);
2763 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2764 u32 plen, const void *param)
2766 int len = HCI_COMMAND_HDR_SIZE + plen;
2767 struct hci_command_hdr *hdr;
2768 struct sk_buff *skb;
2770 skb = bt_skb_alloc(len, GFP_ATOMIC);
2774 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2775 hdr->opcode = cpu_to_le16(opcode);
2779 memcpy(skb_put(skb, plen), param, plen);
2781 BT_DBG("skb len %d", skb->len);
2783 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2788 /* Send HCI command */
2789 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2792 struct sk_buff *skb;
2794 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2796 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2798 BT_ERR("%s no memory for command", hdev->name);
2802 /* Stand-alone HCI commands must be flaged as
2803 * single-command requests.
2805 bt_cb(skb)->req.start = true;
2807 skb_queue_tail(&hdev->cmd_q, skb);
2808 queue_work(hdev->workqueue, &hdev->cmd_work);
2813 /* Queue a command to an asynchronous HCI request */
2814 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2815 const void *param, u8 event)
2817 struct hci_dev *hdev = req->hdev;
2818 struct sk_buff *skb;
2820 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2822 /* If an error occured during request building, there is no point in
2823 * queueing the HCI command. We can simply return.
2828 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2830 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2831 hdev->name, opcode);
2836 if (skb_queue_empty(&req->cmd_q))
2837 bt_cb(skb)->req.start = true;
2839 bt_cb(skb)->req.event = event;
2841 skb_queue_tail(&req->cmd_q, skb);
2844 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2847 hci_req_add_ev(req, opcode, plen, param, 0);
2850 /* Get data from the previously sent command */
2851 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2853 struct hci_command_hdr *hdr;
2855 if (!hdev->sent_cmd)
2858 hdr = (void *) hdev->sent_cmd->data;
2860 if (hdr->opcode != cpu_to_le16(opcode))
2863 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2865 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2869 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2871 struct hci_acl_hdr *hdr;
2874 skb_push(skb, HCI_ACL_HDR_SIZE);
2875 skb_reset_transport_header(skb);
2876 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2877 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2878 hdr->dlen = cpu_to_le16(len);
2881 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2882 struct sk_buff *skb, __u16 flags)
2884 struct hci_conn *conn = chan->conn;
2885 struct hci_dev *hdev = conn->hdev;
2886 struct sk_buff *list;
2888 skb->len = skb_headlen(skb);
2891 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2893 switch (hdev->dev_type) {
2895 hci_add_acl_hdr(skb, conn->handle, flags);
2898 hci_add_acl_hdr(skb, chan->handle, flags);
2901 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2905 list = skb_shinfo(skb)->frag_list;
2907 /* Non fragmented */
2908 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2910 skb_queue_tail(queue, skb);
2913 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2915 skb_shinfo(skb)->frag_list = NULL;
2917 /* Queue all fragments atomically */
2918 spin_lock(&queue->lock);
2920 __skb_queue_tail(queue, skb);
2922 flags &= ~ACL_START;
2925 skb = list; list = list->next;
2927 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2928 hci_add_acl_hdr(skb, conn->handle, flags);
2930 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2932 __skb_queue_tail(queue, skb);
2935 spin_unlock(&queue->lock);
2939 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2941 struct hci_dev *hdev = chan->conn->hdev;
2943 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2945 hci_queue_acl(chan, &chan->data_q, skb, flags);
2947 queue_work(hdev->workqueue, &hdev->tx_work);
2951 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2953 struct hci_dev *hdev = conn->hdev;
2954 struct hci_sco_hdr hdr;
2956 BT_DBG("%s len %d", hdev->name, skb->len);
2958 hdr.handle = cpu_to_le16(conn->handle);
2959 hdr.dlen = skb->len;
2961 skb_push(skb, HCI_SCO_HDR_SIZE);
2962 skb_reset_transport_header(skb);
2963 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2965 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2967 skb_queue_tail(&conn->data_q, skb);
2968 queue_work(hdev->workqueue, &hdev->tx_work);
2971 /* ---- HCI TX task (outgoing data) ---- */
2973 /* HCI Connection scheduler */
2974 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2977 struct hci_conn_hash *h = &hdev->conn_hash;
2978 struct hci_conn *conn = NULL, *c;
2979 unsigned int num = 0, min = ~0;
2981 /* We don't have to lock device here. Connections are always
2982 * added and removed with TX task disabled. */
2986 list_for_each_entry_rcu(c, &h->list, list) {
2987 if (c->type != type || skb_queue_empty(&c->data_q))
2990 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2995 if (c->sent < min) {
3000 if (hci_conn_num(hdev, type) == num)
3009 switch (conn->type) {
3011 cnt = hdev->acl_cnt;
3015 cnt = hdev->sco_cnt;
3018 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3022 BT_ERR("Unknown link type");
3030 BT_DBG("conn %p quote %d", conn, *quote);
3034 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3036 struct hci_conn_hash *h = &hdev->conn_hash;
3039 BT_ERR("%s link tx timeout", hdev->name);
3043 /* Kill stalled connections */
3044 list_for_each_entry_rcu(c, &h->list, list) {
3045 if (c->type == type && c->sent) {
3046 BT_ERR("%s killing stalled connection %pMR",
3047 hdev->name, &c->dst);
3048 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3055 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3058 struct hci_conn_hash *h = &hdev->conn_hash;
3059 struct hci_chan *chan = NULL;
3060 unsigned int num = 0, min = ~0, cur_prio = 0;
3061 struct hci_conn *conn;
3062 int cnt, q, conn_num = 0;
3064 BT_DBG("%s", hdev->name);
3068 list_for_each_entry_rcu(conn, &h->list, list) {
3069 struct hci_chan *tmp;
3071 if (conn->type != type)
3074 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3079 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3080 struct sk_buff *skb;
3082 if (skb_queue_empty(&tmp->data_q))
3085 skb = skb_peek(&tmp->data_q);
3086 if (skb->priority < cur_prio)
3089 if (skb->priority > cur_prio) {
3092 cur_prio = skb->priority;
3097 if (conn->sent < min) {
3103 if (hci_conn_num(hdev, type) == conn_num)
3112 switch (chan->conn->type) {
3114 cnt = hdev->acl_cnt;
3117 cnt = hdev->block_cnt;
3121 cnt = hdev->sco_cnt;
3124 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3128 BT_ERR("Unknown link type");
3133 BT_DBG("chan %p quote %d", chan, *quote);
3137 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3139 struct hci_conn_hash *h = &hdev->conn_hash;
3140 struct hci_conn *conn;
3143 BT_DBG("%s", hdev->name);
3147 list_for_each_entry_rcu(conn, &h->list, list) {
3148 struct hci_chan *chan;
3150 if (conn->type != type)
3153 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3158 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3159 struct sk_buff *skb;
3166 if (skb_queue_empty(&chan->data_q))
3169 skb = skb_peek(&chan->data_q);
3170 if (skb->priority >= HCI_PRIO_MAX - 1)
3173 skb->priority = HCI_PRIO_MAX - 1;
3175 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3179 if (hci_conn_num(hdev, type) == num)
3187 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3189 /* Calculate count of blocks used by this packet */
3190 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3193 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3195 if (!test_bit(HCI_RAW, &hdev->flags)) {
3196 /* ACL tx timeout must be longer than maximum
3197 * link supervision timeout (40.9 seconds) */
3198 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3199 HCI_ACL_TX_TIMEOUT))
3200 hci_link_tx_to(hdev, ACL_LINK);
3204 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3206 unsigned int cnt = hdev->acl_cnt;
3207 struct hci_chan *chan;
3208 struct sk_buff *skb;
3211 __check_timeout(hdev, cnt);
3213 while (hdev->acl_cnt &&
3214 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3215 u32 priority = (skb_peek(&chan->data_q))->priority;
3216 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3217 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3218 skb->len, skb->priority);
3220 /* Stop if priority has changed */
3221 if (skb->priority < priority)
3224 skb = skb_dequeue(&chan->data_q);
3226 hci_conn_enter_active_mode(chan->conn,
3227 bt_cb(skb)->force_active);
3229 hci_send_frame(hdev, skb);
3230 hdev->acl_last_tx = jiffies;
3238 if (cnt != hdev->acl_cnt)
3239 hci_prio_recalculate(hdev, ACL_LINK);
3242 static void hci_sched_acl_blk(struct hci_dev *hdev)
3244 unsigned int cnt = hdev->block_cnt;
3245 struct hci_chan *chan;
3246 struct sk_buff *skb;
3250 __check_timeout(hdev, cnt);
3252 BT_DBG("%s", hdev->name);
3254 if (hdev->dev_type == HCI_AMP)
3259 while (hdev->block_cnt > 0 &&
3260 (chan = hci_chan_sent(hdev, type, "e))) {
3261 u32 priority = (skb_peek(&chan->data_q))->priority;
3262 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3265 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3266 skb->len, skb->priority);
3268 /* Stop if priority has changed */
3269 if (skb->priority < priority)
3272 skb = skb_dequeue(&chan->data_q);
3274 blocks = __get_blocks(hdev, skb);
3275 if (blocks > hdev->block_cnt)
3278 hci_conn_enter_active_mode(chan->conn,
3279 bt_cb(skb)->force_active);
3281 hci_send_frame(hdev, skb);
3282 hdev->acl_last_tx = jiffies;
3284 hdev->block_cnt -= blocks;
3287 chan->sent += blocks;
3288 chan->conn->sent += blocks;
3292 if (cnt != hdev->block_cnt)
3293 hci_prio_recalculate(hdev, type);
3296 static void hci_sched_acl(struct hci_dev *hdev)
3298 BT_DBG("%s", hdev->name);
3300 /* No ACL link over BR/EDR controller */
3301 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3304 /* No AMP link over AMP controller */
3305 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3308 switch (hdev->flow_ctl_mode) {
3309 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3310 hci_sched_acl_pkt(hdev);
3313 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3314 hci_sched_acl_blk(hdev);
3320 static void hci_sched_sco(struct hci_dev *hdev)
3322 struct hci_conn *conn;
3323 struct sk_buff *skb;
3326 BT_DBG("%s", hdev->name);
3328 if (!hci_conn_num(hdev, SCO_LINK))
3331 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3332 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3333 BT_DBG("skb %p len %d", skb, skb->len);
3334 hci_send_frame(hdev, skb);
3337 if (conn->sent == ~0)
3343 static void hci_sched_esco(struct hci_dev *hdev)
3345 struct hci_conn *conn;
3346 struct sk_buff *skb;
3349 BT_DBG("%s", hdev->name);
3351 if (!hci_conn_num(hdev, ESCO_LINK))
3354 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3356 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3357 BT_DBG("skb %p len %d", skb, skb->len);
3358 hci_send_frame(hdev, skb);
3361 if (conn->sent == ~0)
3367 static void hci_sched_le(struct hci_dev *hdev)
3369 struct hci_chan *chan;
3370 struct sk_buff *skb;
3371 int quote, cnt, tmp;
3373 BT_DBG("%s", hdev->name);
3375 if (!hci_conn_num(hdev, LE_LINK))
3378 if (!test_bit(HCI_RAW, &hdev->flags)) {
3379 /* LE tx timeout must be longer than maximum
3380 * link supervision timeout (40.9 seconds) */
3381 if (!hdev->le_cnt && hdev->le_pkts &&
3382 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3383 hci_link_tx_to(hdev, LE_LINK);
3386 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3388 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3389 u32 priority = (skb_peek(&chan->data_q))->priority;
3390 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3391 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3392 skb->len, skb->priority);
3394 /* Stop if priority has changed */
3395 if (skb->priority < priority)
3398 skb = skb_dequeue(&chan->data_q);
3400 hci_send_frame(hdev, skb);
3401 hdev->le_last_tx = jiffies;
3412 hdev->acl_cnt = cnt;
3415 hci_prio_recalculate(hdev, LE_LINK);
3418 static void hci_tx_work(struct work_struct *work)
3420 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3421 struct sk_buff *skb;
3423 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3424 hdev->sco_cnt, hdev->le_cnt);
3426 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3427 /* Schedule queues and send stuff to HCI driver */
3428 hci_sched_acl(hdev);
3429 hci_sched_sco(hdev);
3430 hci_sched_esco(hdev);
3434 /* Send next queued raw (unknown type) packet */
3435 while ((skb = skb_dequeue(&hdev->raw_q)))
3436 hci_send_frame(hdev, skb);
3439 /* ----- HCI RX task (incoming data processing) ----- */
3441 /* ACL data packet */
3442 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3444 struct hci_acl_hdr *hdr = (void *) skb->data;
3445 struct hci_conn *conn;
3446 __u16 handle, flags;
3448 skb_pull(skb, HCI_ACL_HDR_SIZE);
3450 handle = __le16_to_cpu(hdr->handle);
3451 flags = hci_flags(handle);
3452 handle = hci_handle(handle);
3454 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3457 hdev->stat.acl_rx++;
3460 conn = hci_conn_hash_lookup_handle(hdev, handle);
3461 hci_dev_unlock(hdev);
3464 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3466 /* Send to upper protocol */
3467 l2cap_recv_acldata(conn, skb, flags);
3470 BT_ERR("%s ACL packet for unknown connection handle %d",
3471 hdev->name, handle);
3477 /* SCO data packet */
3478 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3480 struct hci_sco_hdr *hdr = (void *) skb->data;
3481 struct hci_conn *conn;
3484 skb_pull(skb, HCI_SCO_HDR_SIZE);
3486 handle = __le16_to_cpu(hdr->handle);
3488 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3490 hdev->stat.sco_rx++;
3493 conn = hci_conn_hash_lookup_handle(hdev, handle);
3494 hci_dev_unlock(hdev);
3497 /* Send to upper protocol */
3498 sco_recv_scodata(conn, skb);
3501 BT_ERR("%s SCO packet for unknown connection handle %d",
3502 hdev->name, handle);
3508 static bool hci_req_is_complete(struct hci_dev *hdev)
3510 struct sk_buff *skb;
3512 skb = skb_peek(&hdev->cmd_q);
3516 return bt_cb(skb)->req.start;
3519 static void hci_resend_last(struct hci_dev *hdev)
3521 struct hci_command_hdr *sent;
3522 struct sk_buff *skb;
3525 if (!hdev->sent_cmd)
3528 sent = (void *) hdev->sent_cmd->data;
3529 opcode = __le16_to_cpu(sent->opcode);
3530 if (opcode == HCI_OP_RESET)
3533 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3537 skb_queue_head(&hdev->cmd_q, skb);
3538 queue_work(hdev->workqueue, &hdev->cmd_work);
3541 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3543 hci_req_complete_t req_complete = NULL;
3544 struct sk_buff *skb;
3545 unsigned long flags;
3547 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3549 /* If the completed command doesn't match the last one that was
3550 * sent we need to do special handling of it.
3552 if (!hci_sent_cmd_data(hdev, opcode)) {
3553 /* Some CSR based controllers generate a spontaneous
3554 * reset complete event during init and any pending
3555 * command will never be completed. In such a case we
3556 * need to resend whatever was the last sent
3559 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3560 hci_resend_last(hdev);
3565 /* If the command succeeded and there's still more commands in
3566 * this request the request is not yet complete.
3568 if (!status && !hci_req_is_complete(hdev))
3571 /* If this was the last command in a request the complete
3572 * callback would be found in hdev->sent_cmd instead of the
3573 * command queue (hdev->cmd_q).
3575 if (hdev->sent_cmd) {
3576 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3579 /* We must set the complete callback to NULL to
3580 * avoid calling the callback more than once if
3581 * this function gets called again.
3583 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3589 /* Remove all pending commands belonging to this request */
3590 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3591 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3592 if (bt_cb(skb)->req.start) {
3593 __skb_queue_head(&hdev->cmd_q, skb);
3597 req_complete = bt_cb(skb)->req.complete;
3600 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3604 req_complete(hdev, status);
3607 static void hci_rx_work(struct work_struct *work)
3609 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3610 struct sk_buff *skb;
3612 BT_DBG("%s", hdev->name);
3614 while ((skb = skb_dequeue(&hdev->rx_q))) {
3615 /* Send copy to monitor */
3616 hci_send_to_monitor(hdev, skb);
3618 if (atomic_read(&hdev->promisc)) {
3619 /* Send copy to the sockets */
3620 hci_send_to_sock(hdev, skb);
3623 if (test_bit(HCI_RAW, &hdev->flags) ||
3624 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3629 if (test_bit(HCI_INIT, &hdev->flags)) {
3630 /* Don't process data packets in this states. */
3631 switch (bt_cb(skb)->pkt_type) {
3632 case HCI_ACLDATA_PKT:
3633 case HCI_SCODATA_PKT:
3640 switch (bt_cb(skb)->pkt_type) {
3642 BT_DBG("%s Event packet", hdev->name);
3643 hci_event_packet(hdev, skb);
3646 case HCI_ACLDATA_PKT:
3647 BT_DBG("%s ACL data packet", hdev->name);
3648 hci_acldata_packet(hdev, skb);
3651 case HCI_SCODATA_PKT:
3652 BT_DBG("%s SCO data packet", hdev->name);
3653 hci_scodata_packet(hdev, skb);
3663 static void hci_cmd_work(struct work_struct *work)
3665 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3666 struct sk_buff *skb;
3668 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3669 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3671 /* Send queued commands */
3672 if (atomic_read(&hdev->cmd_cnt)) {
3673 skb = skb_dequeue(&hdev->cmd_q);
3677 kfree_skb(hdev->sent_cmd);
3679 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
3680 if (hdev->sent_cmd) {
3681 atomic_dec(&hdev->cmd_cnt);
3682 hci_send_frame(hdev, skb);
3683 if (test_bit(HCI_RESET, &hdev->flags))
3684 del_timer(&hdev->cmd_timer);
3686 mod_timer(&hdev->cmd_timer,
3687 jiffies + HCI_CMD_TIMEOUT);
3689 skb_queue_head(&hdev->cmd_q, skb);
3690 queue_work(hdev->workqueue, &hdev->cmd_work);
3695 u8 bdaddr_to_le(u8 bdaddr_type)
3697 switch (bdaddr_type) {
3698 case BDADDR_LE_PUBLIC:
3699 return ADDR_LE_DEV_PUBLIC;
3702 /* Fallback to LE Random address type */
3703 return ADDR_LE_DEV_RANDOM;