2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
57 /* ---- HCI notifications ---- */
59 static void hci_notify(struct hci_dev *hdev, int event)
61 hci_sock_dev_event(hdev, event);
64 /* ---- HCI debugfs entries ---- */
66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
69 struct hci_dev *hdev = file->private_data;
72 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
81 struct hci_dev *hdev = file->private_data;
84 size_t buf_size = min(count, (sizeof(buf)-1));
88 if (!test_bit(HCI_UP, &hdev->flags))
91 if (copy_from_user(buf, user_buf, buf_size))
95 if (strtobool(buf, &enable))
98 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
103 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
106 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
108 hci_req_unlock(hdev);
113 err = -bt_to_errno(skb->data[0]);
119 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
124 static const struct file_operations dut_mode_fops = {
126 .read = dut_mode_read,
127 .write = dut_mode_write,
128 .llseek = default_llseek,
131 static int features_show(struct seq_file *f, void *ptr)
133 struct hci_dev *hdev = f->private;
137 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
138 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
139 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140 hdev->features[p][0], hdev->features[p][1],
141 hdev->features[p][2], hdev->features[p][3],
142 hdev->features[p][4], hdev->features[p][5],
143 hdev->features[p][6], hdev->features[p][7]);
145 if (lmp_le_capable(hdev))
146 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148 hdev->le_features[0], hdev->le_features[1],
149 hdev->le_features[2], hdev->le_features[3],
150 hdev->le_features[4], hdev->le_features[5],
151 hdev->le_features[6], hdev->le_features[7]);
152 hci_dev_unlock(hdev);
157 static int features_open(struct inode *inode, struct file *file)
159 return single_open(file, features_show, inode->i_private);
162 static const struct file_operations features_fops = {
163 .open = features_open,
166 .release = single_release,
169 static int blacklist_show(struct seq_file *f, void *p)
171 struct hci_dev *hdev = f->private;
172 struct bdaddr_list *b;
175 list_for_each_entry(b, &hdev->blacklist, list)
176 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
177 hci_dev_unlock(hdev);
182 static int blacklist_open(struct inode *inode, struct file *file)
184 return single_open(file, blacklist_show, inode->i_private);
187 static const struct file_operations blacklist_fops = {
188 .open = blacklist_open,
191 .release = single_release,
194 static int uuids_show(struct seq_file *f, void *p)
196 struct hci_dev *hdev = f->private;
197 struct bt_uuid *uuid;
200 list_for_each_entry(uuid, &hdev->uuids, list) {
203 /* The Bluetooth UUID values are stored in big endian,
204 * but with reversed byte order. So convert them into
205 * the right order for the %pUb modifier.
207 for (i = 0; i < 16; i++)
208 val[i] = uuid->uuid[15 - i];
210 seq_printf(f, "%pUb\n", val);
212 hci_dev_unlock(hdev);
217 static int uuids_open(struct inode *inode, struct file *file)
219 return single_open(file, uuids_show, inode->i_private);
222 static const struct file_operations uuids_fops = {
226 .release = single_release,
229 static int inquiry_cache_show(struct seq_file *f, void *p)
231 struct hci_dev *hdev = f->private;
232 struct discovery_state *cache = &hdev->discovery;
233 struct inquiry_entry *e;
237 list_for_each_entry(e, &cache->all, all) {
238 struct inquiry_data *data = &e->data;
239 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
241 data->pscan_rep_mode, data->pscan_period_mode,
242 data->pscan_mode, data->dev_class[2],
243 data->dev_class[1], data->dev_class[0],
244 __le16_to_cpu(data->clock_offset),
245 data->rssi, data->ssp_mode, e->timestamp);
248 hci_dev_unlock(hdev);
253 static int inquiry_cache_open(struct inode *inode, struct file *file)
255 return single_open(file, inquiry_cache_show, inode->i_private);
258 static const struct file_operations inquiry_cache_fops = {
259 .open = inquiry_cache_open,
262 .release = single_release,
265 static int link_keys_show(struct seq_file *f, void *ptr)
267 struct hci_dev *hdev = f->private;
268 struct list_head *p, *n;
271 list_for_each_safe(p, n, &hdev->link_keys) {
272 struct link_key *key = list_entry(p, struct link_key, list);
273 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
274 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
276 hci_dev_unlock(hdev);
281 static int link_keys_open(struct inode *inode, struct file *file)
283 return single_open(file, link_keys_show, inode->i_private);
286 static const struct file_operations link_keys_fops = {
287 .open = link_keys_open,
290 .release = single_release,
293 static int dev_class_show(struct seq_file *f, void *ptr)
295 struct hci_dev *hdev = f->private;
298 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
299 hdev->dev_class[1], hdev->dev_class[0]);
300 hci_dev_unlock(hdev);
305 static int dev_class_open(struct inode *inode, struct file *file)
307 return single_open(file, dev_class_show, inode->i_private);
310 static const struct file_operations dev_class_fops = {
311 .open = dev_class_open,
314 .release = single_release,
317 static int voice_setting_get(void *data, u64 *val)
319 struct hci_dev *hdev = data;
322 *val = hdev->voice_setting;
323 hci_dev_unlock(hdev);
328 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
329 NULL, "0x%4.4llx\n");
331 static int auto_accept_delay_set(void *data, u64 val)
333 struct hci_dev *hdev = data;
336 hdev->auto_accept_delay = val;
337 hci_dev_unlock(hdev);
342 static int auto_accept_delay_get(void *data, u64 *val)
344 struct hci_dev *hdev = data;
347 *val = hdev->auto_accept_delay;
348 hci_dev_unlock(hdev);
353 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
354 auto_accept_delay_set, "%llu\n");
356 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
357 size_t count, loff_t *ppos)
359 struct hci_dev *hdev = file->private_data;
362 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
365 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
368 static ssize_t force_sc_support_write(struct file *file,
369 const char __user *user_buf,
370 size_t count, loff_t *ppos)
372 struct hci_dev *hdev = file->private_data;
374 size_t buf_size = min(count, (sizeof(buf)-1));
377 if (test_bit(HCI_UP, &hdev->flags))
380 if (copy_from_user(buf, user_buf, buf_size))
383 buf[buf_size] = '\0';
384 if (strtobool(buf, &enable))
387 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
390 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
395 static const struct file_operations force_sc_support_fops = {
397 .read = force_sc_support_read,
398 .write = force_sc_support_write,
399 .llseek = default_llseek,
402 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
403 size_t count, loff_t *ppos)
405 struct hci_dev *hdev = file->private_data;
408 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
411 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
414 static const struct file_operations sc_only_mode_fops = {
416 .read = sc_only_mode_read,
417 .llseek = default_llseek,
420 static int idle_timeout_set(void *data, u64 val)
422 struct hci_dev *hdev = data;
424 if (val != 0 && (val < 500 || val > 3600000))
428 hdev->idle_timeout = val;
429 hci_dev_unlock(hdev);
434 static int idle_timeout_get(void *data, u64 *val)
436 struct hci_dev *hdev = data;
439 *val = hdev->idle_timeout;
440 hci_dev_unlock(hdev);
445 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
446 idle_timeout_set, "%llu\n");
448 static int rpa_timeout_set(void *data, u64 val)
450 struct hci_dev *hdev = data;
452 /* Require the RPA timeout to be at least 30 seconds and at most
455 if (val < 30 || val > (60 * 60 * 24))
459 hdev->rpa_timeout = val;
460 hci_dev_unlock(hdev);
465 static int rpa_timeout_get(void *data, u64 *val)
467 struct hci_dev *hdev = data;
470 *val = hdev->rpa_timeout;
471 hci_dev_unlock(hdev);
476 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
477 rpa_timeout_set, "%llu\n");
479 static int sniff_min_interval_set(void *data, u64 val)
481 struct hci_dev *hdev = data;
483 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
487 hdev->sniff_min_interval = val;
488 hci_dev_unlock(hdev);
493 static int sniff_min_interval_get(void *data, u64 *val)
495 struct hci_dev *hdev = data;
498 *val = hdev->sniff_min_interval;
499 hci_dev_unlock(hdev);
504 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
505 sniff_min_interval_set, "%llu\n");
507 static int sniff_max_interval_set(void *data, u64 val)
509 struct hci_dev *hdev = data;
511 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
515 hdev->sniff_max_interval = val;
516 hci_dev_unlock(hdev);
521 static int sniff_max_interval_get(void *data, u64 *val)
523 struct hci_dev *hdev = data;
526 *val = hdev->sniff_max_interval;
527 hci_dev_unlock(hdev);
532 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
533 sniff_max_interval_set, "%llu\n");
535 static int conn_info_min_age_set(void *data, u64 val)
537 struct hci_dev *hdev = data;
539 if (val == 0 || val > hdev->conn_info_max_age)
543 hdev->conn_info_min_age = val;
544 hci_dev_unlock(hdev);
549 static int conn_info_min_age_get(void *data, u64 *val)
551 struct hci_dev *hdev = data;
554 *val = hdev->conn_info_min_age;
555 hci_dev_unlock(hdev);
560 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
561 conn_info_min_age_set, "%llu\n");
563 static int conn_info_max_age_set(void *data, u64 val)
565 struct hci_dev *hdev = data;
567 if (val == 0 || val < hdev->conn_info_min_age)
571 hdev->conn_info_max_age = val;
572 hci_dev_unlock(hdev);
577 static int conn_info_max_age_get(void *data, u64 *val)
579 struct hci_dev *hdev = data;
582 *val = hdev->conn_info_max_age;
583 hci_dev_unlock(hdev);
588 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
589 conn_info_max_age_set, "%llu\n");
591 static int identity_show(struct seq_file *f, void *p)
593 struct hci_dev *hdev = f->private;
599 hci_copy_identity_address(hdev, &addr, &addr_type);
601 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
602 16, hdev->irk, &hdev->rpa);
604 hci_dev_unlock(hdev);
609 static int identity_open(struct inode *inode, struct file *file)
611 return single_open(file, identity_show, inode->i_private);
614 static const struct file_operations identity_fops = {
615 .open = identity_open,
618 .release = single_release,
621 static int random_address_show(struct seq_file *f, void *p)
623 struct hci_dev *hdev = f->private;
626 seq_printf(f, "%pMR\n", &hdev->random_addr);
627 hci_dev_unlock(hdev);
632 static int random_address_open(struct inode *inode, struct file *file)
634 return single_open(file, random_address_show, inode->i_private);
637 static const struct file_operations random_address_fops = {
638 .open = random_address_open,
641 .release = single_release,
644 static int static_address_show(struct seq_file *f, void *p)
646 struct hci_dev *hdev = f->private;
649 seq_printf(f, "%pMR\n", &hdev->static_addr);
650 hci_dev_unlock(hdev);
655 static int static_address_open(struct inode *inode, struct file *file)
657 return single_open(file, static_address_show, inode->i_private);
660 static const struct file_operations static_address_fops = {
661 .open = static_address_open,
664 .release = single_release,
667 static ssize_t force_static_address_read(struct file *file,
668 char __user *user_buf,
669 size_t count, loff_t *ppos)
671 struct hci_dev *hdev = file->private_data;
674 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
677 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
680 static ssize_t force_static_address_write(struct file *file,
681 const char __user *user_buf,
682 size_t count, loff_t *ppos)
684 struct hci_dev *hdev = file->private_data;
686 size_t buf_size = min(count, (sizeof(buf)-1));
689 if (test_bit(HCI_UP, &hdev->flags))
692 if (copy_from_user(buf, user_buf, buf_size))
695 buf[buf_size] = '\0';
696 if (strtobool(buf, &enable))
699 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
702 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
707 static const struct file_operations force_static_address_fops = {
709 .read = force_static_address_read,
710 .write = force_static_address_write,
711 .llseek = default_llseek,
714 static int white_list_show(struct seq_file *f, void *ptr)
716 struct hci_dev *hdev = f->private;
717 struct bdaddr_list *b;
720 list_for_each_entry(b, &hdev->le_white_list, list)
721 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
722 hci_dev_unlock(hdev);
727 static int white_list_open(struct inode *inode, struct file *file)
729 return single_open(file, white_list_show, inode->i_private);
732 static const struct file_operations white_list_fops = {
733 .open = white_list_open,
736 .release = single_release,
739 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
741 struct hci_dev *hdev = f->private;
742 struct list_head *p, *n;
745 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
746 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
747 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
748 &irk->bdaddr, irk->addr_type,
749 16, irk->val, &irk->rpa);
751 hci_dev_unlock(hdev);
756 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
758 return single_open(file, identity_resolving_keys_show,
762 static const struct file_operations identity_resolving_keys_fops = {
763 .open = identity_resolving_keys_open,
766 .release = single_release,
769 static int long_term_keys_show(struct seq_file *f, void *ptr)
771 struct hci_dev *hdev = f->private;
772 struct list_head *p, *n;
775 list_for_each_safe(p, n, &hdev->long_term_keys) {
776 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
777 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
778 <k->bdaddr, ltk->bdaddr_type, ltk->authenticated,
779 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
780 __le64_to_cpu(ltk->rand), 16, ltk->val);
782 hci_dev_unlock(hdev);
787 static int long_term_keys_open(struct inode *inode, struct file *file)
789 return single_open(file, long_term_keys_show, inode->i_private);
792 static const struct file_operations long_term_keys_fops = {
793 .open = long_term_keys_open,
796 .release = single_release,
799 static int conn_min_interval_set(void *data, u64 val)
801 struct hci_dev *hdev = data;
803 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
807 hdev->le_conn_min_interval = val;
808 hci_dev_unlock(hdev);
813 static int conn_min_interval_get(void *data, u64 *val)
815 struct hci_dev *hdev = data;
818 *val = hdev->le_conn_min_interval;
819 hci_dev_unlock(hdev);
824 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
825 conn_min_interval_set, "%llu\n");
827 static int conn_max_interval_set(void *data, u64 val)
829 struct hci_dev *hdev = data;
831 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
835 hdev->le_conn_max_interval = val;
836 hci_dev_unlock(hdev);
841 static int conn_max_interval_get(void *data, u64 *val)
843 struct hci_dev *hdev = data;
846 *val = hdev->le_conn_max_interval;
847 hci_dev_unlock(hdev);
852 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
853 conn_max_interval_set, "%llu\n");
855 static int conn_latency_set(void *data, u64 val)
857 struct hci_dev *hdev = data;
863 hdev->le_conn_latency = val;
864 hci_dev_unlock(hdev);
869 static int conn_latency_get(void *data, u64 *val)
871 struct hci_dev *hdev = data;
874 *val = hdev->le_conn_latency;
875 hci_dev_unlock(hdev);
880 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
881 conn_latency_set, "%llu\n");
883 static int supervision_timeout_set(void *data, u64 val)
885 struct hci_dev *hdev = data;
887 if (val < 0x000a || val > 0x0c80)
891 hdev->le_supv_timeout = val;
892 hci_dev_unlock(hdev);
897 static int supervision_timeout_get(void *data, u64 *val)
899 struct hci_dev *hdev = data;
902 *val = hdev->le_supv_timeout;
903 hci_dev_unlock(hdev);
908 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
909 supervision_timeout_set, "%llu\n");
911 static int adv_channel_map_set(void *data, u64 val)
913 struct hci_dev *hdev = data;
915 if (val < 0x01 || val > 0x07)
919 hdev->le_adv_channel_map = val;
920 hci_dev_unlock(hdev);
925 static int adv_channel_map_get(void *data, u64 *val)
927 struct hci_dev *hdev = data;
930 *val = hdev->le_adv_channel_map;
931 hci_dev_unlock(hdev);
936 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
937 adv_channel_map_set, "%llu\n");
939 static int device_list_show(struct seq_file *f, void *ptr)
941 struct hci_dev *hdev = f->private;
942 struct hci_conn_params *p;
945 list_for_each_entry(p, &hdev->le_conn_params, list) {
946 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
949 hci_dev_unlock(hdev);
954 static int device_list_open(struct inode *inode, struct file *file)
956 return single_open(file, device_list_show, inode->i_private);
959 static const struct file_operations device_list_fops = {
960 .open = device_list_open,
963 .release = single_release,
966 /* ---- HCI requests ---- */
968 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
970 BT_DBG("%s result 0x%2.2x", hdev->name, result);
972 if (hdev->req_status == HCI_REQ_PEND) {
973 hdev->req_result = result;
974 hdev->req_status = HCI_REQ_DONE;
975 wake_up_interruptible(&hdev->req_wait_q);
979 static void hci_req_cancel(struct hci_dev *hdev, int err)
981 BT_DBG("%s err 0x%2.2x", hdev->name, err);
983 if (hdev->req_status == HCI_REQ_PEND) {
984 hdev->req_result = err;
985 hdev->req_status = HCI_REQ_CANCELED;
986 wake_up_interruptible(&hdev->req_wait_q);
990 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
993 struct hci_ev_cmd_complete *ev;
994 struct hci_event_hdr *hdr;
999 skb = hdev->recv_evt;
1000 hdev->recv_evt = NULL;
1002 hci_dev_unlock(hdev);
1005 return ERR_PTR(-ENODATA);
1007 if (skb->len < sizeof(*hdr)) {
1008 BT_ERR("Too short HCI event");
1012 hdr = (void *) skb->data;
1013 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1016 if (hdr->evt != event)
1021 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1022 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1026 if (skb->len < sizeof(*ev)) {
1027 BT_ERR("Too short cmd_complete event");
1031 ev = (void *) skb->data;
1032 skb_pull(skb, sizeof(*ev));
1034 if (opcode == __le16_to_cpu(ev->opcode))
1037 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1038 __le16_to_cpu(ev->opcode));
1042 return ERR_PTR(-ENODATA);
1045 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1046 const void *param, u8 event, u32 timeout)
1048 DECLARE_WAITQUEUE(wait, current);
1049 struct hci_request req;
1052 BT_DBG("%s", hdev->name);
1054 hci_req_init(&req, hdev);
1056 hci_req_add_ev(&req, opcode, plen, param, event);
1058 hdev->req_status = HCI_REQ_PEND;
1060 err = hci_req_run(&req, hci_req_sync_complete);
1062 return ERR_PTR(err);
1064 add_wait_queue(&hdev->req_wait_q, &wait);
1065 set_current_state(TASK_INTERRUPTIBLE);
1067 schedule_timeout(timeout);
1069 remove_wait_queue(&hdev->req_wait_q, &wait);
1071 if (signal_pending(current))
1072 return ERR_PTR(-EINTR);
1074 switch (hdev->req_status) {
1076 err = -bt_to_errno(hdev->req_result);
1079 case HCI_REQ_CANCELED:
1080 err = -hdev->req_result;
1088 hdev->req_status = hdev->req_result = 0;
1090 BT_DBG("%s end: err %d", hdev->name, err);
1093 return ERR_PTR(err);
1095 return hci_get_cmd_complete(hdev, opcode, event);
1097 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1099 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1100 const void *param, u32 timeout)
1102 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1104 EXPORT_SYMBOL(__hci_cmd_sync);
1106 /* Execute request and wait for completion. */
1107 static int __hci_req_sync(struct hci_dev *hdev,
1108 void (*func)(struct hci_request *req,
1110 unsigned long opt, __u32 timeout)
1112 struct hci_request req;
1113 DECLARE_WAITQUEUE(wait, current);
1116 BT_DBG("%s start", hdev->name);
1118 hci_req_init(&req, hdev);
1120 hdev->req_status = HCI_REQ_PEND;
1124 err = hci_req_run(&req, hci_req_sync_complete);
1126 hdev->req_status = 0;
1128 /* ENODATA means the HCI request command queue is empty.
1129 * This can happen when a request with conditionals doesn't
1130 * trigger any commands to be sent. This is normal behavior
1131 * and should not trigger an error return.
1133 if (err == -ENODATA)
1139 add_wait_queue(&hdev->req_wait_q, &wait);
1140 set_current_state(TASK_INTERRUPTIBLE);
1142 schedule_timeout(timeout);
1144 remove_wait_queue(&hdev->req_wait_q, &wait);
1146 if (signal_pending(current))
1149 switch (hdev->req_status) {
1151 err = -bt_to_errno(hdev->req_result);
1154 case HCI_REQ_CANCELED:
1155 err = -hdev->req_result;
1163 hdev->req_status = hdev->req_result = 0;
1165 BT_DBG("%s end: err %d", hdev->name, err);
1170 static int hci_req_sync(struct hci_dev *hdev,
1171 void (*req)(struct hci_request *req,
1173 unsigned long opt, __u32 timeout)
1177 if (!test_bit(HCI_UP, &hdev->flags))
1180 /* Serialize all requests */
1182 ret = __hci_req_sync(hdev, req, opt, timeout);
1183 hci_req_unlock(hdev);
1188 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1190 BT_DBG("%s %ld", req->hdev->name, opt);
1193 set_bit(HCI_RESET, &req->hdev->flags);
1194 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1197 static void bredr_init(struct hci_request *req)
1199 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1201 /* Read Local Supported Features */
1202 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1204 /* Read Local Version */
1205 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1207 /* Read BD Address */
1208 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1211 static void amp_init(struct hci_request *req)
1213 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1215 /* Read Local Version */
1216 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1218 /* Read Local Supported Commands */
1219 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1221 /* Read Local Supported Features */
1222 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1224 /* Read Local AMP Info */
1225 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1227 /* Read Data Blk size */
1228 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1230 /* Read Flow Control Mode */
1231 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1233 /* Read Location Data */
1234 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1237 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1239 struct hci_dev *hdev = req->hdev;
1241 BT_DBG("%s %ld", hdev->name, opt);
1244 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1245 hci_reset_req(req, 0);
1247 switch (hdev->dev_type) {
1257 BT_ERR("Unknown device type %d", hdev->dev_type);
1262 static void bredr_setup(struct hci_request *req)
1264 struct hci_dev *hdev = req->hdev;
1269 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1270 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1272 /* Read Class of Device */
1273 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1275 /* Read Local Name */
1276 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1278 /* Read Voice Setting */
1279 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1281 /* Read Number of Supported IAC */
1282 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1284 /* Read Current IAC LAP */
1285 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1287 /* Clear Event Filters */
1288 flt_type = HCI_FLT_CLEAR_ALL;
1289 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1291 /* Connection accept timeout ~20 secs */
1292 param = cpu_to_le16(0x7d00);
1293 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
1295 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1296 * but it does not support page scan related HCI commands.
1298 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1299 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1300 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1304 static void le_setup(struct hci_request *req)
1306 struct hci_dev *hdev = req->hdev;
1308 /* Read LE Buffer Size */
1309 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1311 /* Read LE Local Supported Features */
1312 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1314 /* Read LE Supported States */
1315 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1317 /* Read LE Advertising Channel TX Power */
1318 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1320 /* Read LE White List Size */
1321 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1323 /* Clear LE White List */
1324 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1326 /* LE-only controllers have LE implicitly enabled */
1327 if (!lmp_bredr_capable(hdev))
1328 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1331 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1333 if (lmp_ext_inq_capable(hdev))
1336 if (lmp_inq_rssi_capable(hdev))
1339 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1340 hdev->lmp_subver == 0x0757)
1343 if (hdev->manufacturer == 15) {
1344 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1346 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1348 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1352 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1353 hdev->lmp_subver == 0x1805)
1359 static void hci_setup_inquiry_mode(struct hci_request *req)
1363 mode = hci_get_inquiry_mode(req->hdev);
1365 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1368 static void hci_setup_event_mask(struct hci_request *req)
1370 struct hci_dev *hdev = req->hdev;
1372 /* The second byte is 0xff instead of 0x9f (two reserved bits
1373 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1374 * command otherwise.
1376 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1378 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1379 * any event mask for pre 1.2 devices.
1381 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1384 if (lmp_bredr_capable(hdev)) {
1385 events[4] |= 0x01; /* Flow Specification Complete */
1386 events[4] |= 0x02; /* Inquiry Result with RSSI */
1387 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1388 events[5] |= 0x08; /* Synchronous Connection Complete */
1389 events[5] |= 0x10; /* Synchronous Connection Changed */
1391 /* Use a different default for LE-only devices */
1392 memset(events, 0, sizeof(events));
1393 events[0] |= 0x10; /* Disconnection Complete */
1394 events[0] |= 0x80; /* Encryption Change */
1395 events[1] |= 0x08; /* Read Remote Version Information Complete */
1396 events[1] |= 0x20; /* Command Complete */
1397 events[1] |= 0x40; /* Command Status */
1398 events[1] |= 0x80; /* Hardware Error */
1399 events[2] |= 0x04; /* Number of Completed Packets */
1400 events[3] |= 0x02; /* Data Buffer Overflow */
1401 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1404 if (lmp_inq_rssi_capable(hdev))
1405 events[4] |= 0x02; /* Inquiry Result with RSSI */
1407 if (lmp_sniffsubr_capable(hdev))
1408 events[5] |= 0x20; /* Sniff Subrating */
1410 if (lmp_pause_enc_capable(hdev))
1411 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1413 if (lmp_ext_inq_capable(hdev))
1414 events[5] |= 0x40; /* Extended Inquiry Result */
1416 if (lmp_no_flush_capable(hdev))
1417 events[7] |= 0x01; /* Enhanced Flush Complete */
1419 if (lmp_lsto_capable(hdev))
1420 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1422 if (lmp_ssp_capable(hdev)) {
1423 events[6] |= 0x01; /* IO Capability Request */
1424 events[6] |= 0x02; /* IO Capability Response */
1425 events[6] |= 0x04; /* User Confirmation Request */
1426 events[6] |= 0x08; /* User Passkey Request */
1427 events[6] |= 0x10; /* Remote OOB Data Request */
1428 events[6] |= 0x20; /* Simple Pairing Complete */
1429 events[7] |= 0x04; /* User Passkey Notification */
1430 events[7] |= 0x08; /* Keypress Notification */
1431 events[7] |= 0x10; /* Remote Host Supported
1432 * Features Notification
1436 if (lmp_le_capable(hdev))
1437 events[7] |= 0x20; /* LE Meta-Event */
1439 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1442 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1444 struct hci_dev *hdev = req->hdev;
1446 if (lmp_bredr_capable(hdev))
1449 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1451 if (lmp_le_capable(hdev))
1454 hci_setup_event_mask(req);
1456 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1457 * local supported commands HCI command.
1459 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1460 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1462 if (lmp_ssp_capable(hdev)) {
1463 /* When SSP is available, then the host features page
1464 * should also be available as well. However some
1465 * controllers list the max_page as 0 as long as SSP
1466 * has not been enabled. To achieve proper debugging
1467 * output, force the minimum max_page to 1 at least.
1469 hdev->max_page = 0x01;
1471 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1473 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1474 sizeof(mode), &mode);
1476 struct hci_cp_write_eir cp;
1478 memset(hdev->eir, 0, sizeof(hdev->eir));
1479 memset(&cp, 0, sizeof(cp));
1481 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1485 if (lmp_inq_rssi_capable(hdev))
1486 hci_setup_inquiry_mode(req);
1488 if (lmp_inq_tx_pwr_capable(hdev))
1489 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1491 if (lmp_ext_feat_capable(hdev)) {
1492 struct hci_cp_read_local_ext_features cp;
1495 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1499 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1501 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1506 static void hci_setup_link_policy(struct hci_request *req)
1508 struct hci_dev *hdev = req->hdev;
1509 struct hci_cp_write_def_link_policy cp;
1510 u16 link_policy = 0;
1512 if (lmp_rswitch_capable(hdev))
1513 link_policy |= HCI_LP_RSWITCH;
1514 if (lmp_hold_capable(hdev))
1515 link_policy |= HCI_LP_HOLD;
1516 if (lmp_sniff_capable(hdev))
1517 link_policy |= HCI_LP_SNIFF;
1518 if (lmp_park_capable(hdev))
1519 link_policy |= HCI_LP_PARK;
1521 cp.policy = cpu_to_le16(link_policy);
1522 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1525 static void hci_set_le_support(struct hci_request *req)
1527 struct hci_dev *hdev = req->hdev;
1528 struct hci_cp_write_le_host_supported cp;
1530 /* LE-only devices do not support explicit enablement */
1531 if (!lmp_bredr_capable(hdev))
1534 memset(&cp, 0, sizeof(cp));
1536 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1538 cp.simul = lmp_le_br_capable(hdev);
1541 if (cp.le != lmp_host_le_capable(hdev))
1542 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1546 static void hci_set_event_mask_page_2(struct hci_request *req)
1548 struct hci_dev *hdev = req->hdev;
1549 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1551 /* If Connectionless Slave Broadcast master role is supported
1552 * enable all necessary events for it.
1554 if (lmp_csb_master_capable(hdev)) {
1555 events[1] |= 0x40; /* Triggered Clock Capture */
1556 events[1] |= 0x80; /* Synchronization Train Complete */
1557 events[2] |= 0x10; /* Slave Page Response Timeout */
1558 events[2] |= 0x20; /* CSB Channel Map Change */
1561 /* If Connectionless Slave Broadcast slave role is supported
1562 * enable all necessary events for it.
1564 if (lmp_csb_slave_capable(hdev)) {
1565 events[2] |= 0x01; /* Synchronization Train Received */
1566 events[2] |= 0x02; /* CSB Receive */
1567 events[2] |= 0x04; /* CSB Timeout */
1568 events[2] |= 0x08; /* Truncated Page Complete */
1571 /* Enable Authenticated Payload Timeout Expired event if supported */
1572 if (lmp_ping_capable(hdev))
1575 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1578 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1580 struct hci_dev *hdev = req->hdev;
1583 /* Some Broadcom based Bluetooth controllers do not support the
1584 * Delete Stored Link Key command. They are clearly indicating its
1585 * absence in the bit mask of supported commands.
1587 * Check the supported commands and only if the the command is marked
1588 * as supported send it. If not supported assume that the controller
1589 * does not have actual support for stored link keys which makes this
1590 * command redundant anyway.
1592 * Some controllers indicate that they support handling deleting
1593 * stored link keys, but they don't. The quirk lets a driver
1594 * just disable this command.
1596 if (hdev->commands[6] & 0x80 &&
1597 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1598 struct hci_cp_delete_stored_link_key cp;
1600 bacpy(&cp.bdaddr, BDADDR_ANY);
1601 cp.delete_all = 0x01;
1602 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1606 if (hdev->commands[5] & 0x10)
1607 hci_setup_link_policy(req);
1609 if (lmp_le_capable(hdev)) {
1612 memset(events, 0, sizeof(events));
1615 /* If controller supports the Connection Parameters Request
1616 * Link Layer Procedure, enable the corresponding event.
1618 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1619 events[0] |= 0x20; /* LE Remote Connection
1623 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1626 hci_set_le_support(req);
1629 /* Read features beyond page 1 if available */
1630 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1631 struct hci_cp_read_local_ext_features cp;
1634 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1639 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1641 struct hci_dev *hdev = req->hdev;
1643 /* Set event mask page 2 if the HCI command for it is supported */
1644 if (hdev->commands[22] & 0x04)
1645 hci_set_event_mask_page_2(req);
1647 /* Check for Synchronization Train support */
1648 if (lmp_sync_train_capable(hdev))
1649 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1651 /* Enable Secure Connections if supported and configured */
1652 if ((lmp_sc_capable(hdev) ||
1653 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1654 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1656 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1657 sizeof(support), &support);
1661 static int __hci_init(struct hci_dev *hdev)
1665 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1669 /* The Device Under Test (DUT) mode is special and available for
1670 * all controller types. So just create it early on.
1672 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1673 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1677 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1678 * BR/EDR/LE type controllers. AMP controllers only need the
1681 if (hdev->dev_type != HCI_BREDR)
1684 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1688 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1692 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1696 /* Only create debugfs entries during the initial setup
1697 * phase and not every time the controller gets powered on.
1699 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1702 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1704 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1705 &hdev->manufacturer);
1706 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1707 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1708 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1710 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1712 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1713 &conn_info_min_age_fops);
1714 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1715 &conn_info_max_age_fops);
1717 if (lmp_bredr_capable(hdev)) {
1718 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1719 hdev, &inquiry_cache_fops);
1720 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1721 hdev, &link_keys_fops);
1722 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1723 hdev, &dev_class_fops);
1724 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1725 hdev, &voice_setting_fops);
1728 if (lmp_ssp_capable(hdev)) {
1729 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1730 hdev, &auto_accept_delay_fops);
1731 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1732 hdev, &force_sc_support_fops);
1733 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1734 hdev, &sc_only_mode_fops);
1737 if (lmp_sniff_capable(hdev)) {
1738 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1739 hdev, &idle_timeout_fops);
1740 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1741 hdev, &sniff_min_interval_fops);
1742 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1743 hdev, &sniff_max_interval_fops);
1746 if (lmp_le_capable(hdev)) {
1747 debugfs_create_file("identity", 0400, hdev->debugfs,
1748 hdev, &identity_fops);
1749 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1750 hdev, &rpa_timeout_fops);
1751 debugfs_create_file("random_address", 0444, hdev->debugfs,
1752 hdev, &random_address_fops);
1753 debugfs_create_file("static_address", 0444, hdev->debugfs,
1754 hdev, &static_address_fops);
1756 /* For controllers with a public address, provide a debug
1757 * option to force the usage of the configured static
1758 * address. By default the public address is used.
1760 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1761 debugfs_create_file("force_static_address", 0644,
1762 hdev->debugfs, hdev,
1763 &force_static_address_fops);
1765 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1766 &hdev->le_white_list_size);
1767 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1769 debugfs_create_file("identity_resolving_keys", 0400,
1770 hdev->debugfs, hdev,
1771 &identity_resolving_keys_fops);
1772 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1773 hdev, &long_term_keys_fops);
1774 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1775 hdev, &conn_min_interval_fops);
1776 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1777 hdev, &conn_max_interval_fops);
1778 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1779 hdev, &conn_latency_fops);
1780 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1781 hdev, &supervision_timeout_fops);
1782 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1783 hdev, &adv_channel_map_fops);
1784 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1786 debugfs_create_u16("discov_interleaved_timeout", 0644,
1788 &hdev->discov_interleaved_timeout);
1794 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1798 BT_DBG("%s %x", req->hdev->name, scan);
1800 /* Inquiry and Page scans */
1801 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1804 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1808 BT_DBG("%s %x", req->hdev->name, auth);
1810 /* Authentication */
1811 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1814 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1818 BT_DBG("%s %x", req->hdev->name, encrypt);
1821 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1824 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1826 __le16 policy = cpu_to_le16(opt);
1828 BT_DBG("%s %x", req->hdev->name, policy);
1830 /* Default link policy */
1831 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1834 /* Get HCI device by index.
1835 * Device is held on return. */
1836 struct hci_dev *hci_dev_get(int index)
1838 struct hci_dev *hdev = NULL, *d;
1840 BT_DBG("%d", index);
1845 read_lock(&hci_dev_list_lock);
1846 list_for_each_entry(d, &hci_dev_list, list) {
1847 if (d->id == index) {
1848 hdev = hci_dev_hold(d);
1852 read_unlock(&hci_dev_list_lock);
1856 /* ---- Inquiry support ---- */
1858 bool hci_discovery_active(struct hci_dev *hdev)
1860 struct discovery_state *discov = &hdev->discovery;
1862 switch (discov->state) {
1863 case DISCOVERY_FINDING:
1864 case DISCOVERY_RESOLVING:
1872 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1874 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1876 if (hdev->discovery.state == state)
1880 case DISCOVERY_STOPPED:
1881 hci_update_background_scan(hdev);
1883 if (hdev->discovery.state != DISCOVERY_STARTING)
1884 mgmt_discovering(hdev, 0);
1886 case DISCOVERY_STARTING:
1888 case DISCOVERY_FINDING:
1889 mgmt_discovering(hdev, 1);
1891 case DISCOVERY_RESOLVING:
1893 case DISCOVERY_STOPPING:
1897 hdev->discovery.state = state;
1900 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1902 struct discovery_state *cache = &hdev->discovery;
1903 struct inquiry_entry *p, *n;
1905 list_for_each_entry_safe(p, n, &cache->all, all) {
1910 INIT_LIST_HEAD(&cache->unknown);
1911 INIT_LIST_HEAD(&cache->resolve);
1914 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1917 struct discovery_state *cache = &hdev->discovery;
1918 struct inquiry_entry *e;
1920 BT_DBG("cache %p, %pMR", cache, bdaddr);
1922 list_for_each_entry(e, &cache->all, all) {
1923 if (!bacmp(&e->data.bdaddr, bdaddr))
1930 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1933 struct discovery_state *cache = &hdev->discovery;
1934 struct inquiry_entry *e;
1936 BT_DBG("cache %p, %pMR", cache, bdaddr);
1938 list_for_each_entry(e, &cache->unknown, list) {
1939 if (!bacmp(&e->data.bdaddr, bdaddr))
1946 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1950 struct discovery_state *cache = &hdev->discovery;
1951 struct inquiry_entry *e;
1953 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1955 list_for_each_entry(e, &cache->resolve, list) {
1956 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1958 if (!bacmp(&e->data.bdaddr, bdaddr))
1965 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1966 struct inquiry_entry *ie)
1968 struct discovery_state *cache = &hdev->discovery;
1969 struct list_head *pos = &cache->resolve;
1970 struct inquiry_entry *p;
1972 list_del(&ie->list);
1974 list_for_each_entry(p, &cache->resolve, list) {
1975 if (p->name_state != NAME_PENDING &&
1976 abs(p->data.rssi) >= abs(ie->data.rssi))
1981 list_add(&ie->list, pos);
1984 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1987 struct discovery_state *cache = &hdev->discovery;
1988 struct inquiry_entry *ie;
1991 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1993 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1995 if (!data->ssp_mode)
1996 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1998 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2000 if (!ie->data.ssp_mode)
2001 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2003 if (ie->name_state == NAME_NEEDED &&
2004 data->rssi != ie->data.rssi) {
2005 ie->data.rssi = data->rssi;
2006 hci_inquiry_cache_update_resolve(hdev, ie);
2012 /* Entry not in the cache. Add new one. */
2013 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2015 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2019 list_add(&ie->all, &cache->all);
2022 ie->name_state = NAME_KNOWN;
2024 ie->name_state = NAME_NOT_KNOWN;
2025 list_add(&ie->list, &cache->unknown);
2029 if (name_known && ie->name_state != NAME_KNOWN &&
2030 ie->name_state != NAME_PENDING) {
2031 ie->name_state = NAME_KNOWN;
2032 list_del(&ie->list);
2035 memcpy(&ie->data, data, sizeof(*data));
2036 ie->timestamp = jiffies;
2037 cache->timestamp = jiffies;
2039 if (ie->name_state == NAME_NOT_KNOWN)
2040 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2046 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2048 struct discovery_state *cache = &hdev->discovery;
2049 struct inquiry_info *info = (struct inquiry_info *) buf;
2050 struct inquiry_entry *e;
2053 list_for_each_entry(e, &cache->all, all) {
2054 struct inquiry_data *data = &e->data;
2059 bacpy(&info->bdaddr, &data->bdaddr);
2060 info->pscan_rep_mode = data->pscan_rep_mode;
2061 info->pscan_period_mode = data->pscan_period_mode;
2062 info->pscan_mode = data->pscan_mode;
2063 memcpy(info->dev_class, data->dev_class, 3);
2064 info->clock_offset = data->clock_offset;
2070 BT_DBG("cache %p, copied %d", cache, copied);
2074 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2076 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2077 struct hci_dev *hdev = req->hdev;
2078 struct hci_cp_inquiry cp;
2080 BT_DBG("%s", hdev->name);
2082 if (test_bit(HCI_INQUIRY, &hdev->flags))
2086 memcpy(&cp.lap, &ir->lap, 3);
2087 cp.length = ir->length;
2088 cp.num_rsp = ir->num_rsp;
2089 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2092 static int wait_inquiry(void *word)
2095 return signal_pending(current);
2098 int hci_inquiry(void __user *arg)
2100 __u8 __user *ptr = arg;
2101 struct hci_inquiry_req ir;
2102 struct hci_dev *hdev;
2103 int err = 0, do_inquiry = 0, max_rsp;
2107 if (copy_from_user(&ir, ptr, sizeof(ir)))
2110 hdev = hci_dev_get(ir.dev_id);
2114 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2119 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2124 if (hdev->dev_type != HCI_BREDR) {
2129 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2135 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2136 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2137 hci_inquiry_cache_flush(hdev);
2140 hci_dev_unlock(hdev);
2142 timeo = ir.length * msecs_to_jiffies(2000);
2145 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2150 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2151 * cleared). If it is interrupted by a signal, return -EINTR.
2153 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2154 TASK_INTERRUPTIBLE))
2158 /* for unlimited number of responses we will use buffer with
2161 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2163 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2164 * copy it to the user space.
2166 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2173 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2174 hci_dev_unlock(hdev);
2176 BT_DBG("num_rsp %d", ir.num_rsp);
2178 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2180 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2193 static int hci_dev_do_open(struct hci_dev *hdev)
2197 BT_DBG("%s %p", hdev->name, hdev);
2201 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2206 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2207 /* Check for rfkill but allow the HCI setup stage to
2208 * proceed (which in itself doesn't cause any RF activity).
2210 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2215 /* Check for valid public address or a configured static
2216 * random adddress, but let the HCI setup proceed to
2217 * be able to determine if there is a public address
2220 * In case of user channel usage, it is not important
2221 * if a public address or static random address is
2224 * This check is only valid for BR/EDR controllers
2225 * since AMP controllers do not have an address.
2227 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2228 hdev->dev_type == HCI_BREDR &&
2229 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2230 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2231 ret = -EADDRNOTAVAIL;
2236 if (test_bit(HCI_UP, &hdev->flags)) {
2241 if (hdev->open(hdev)) {
2246 atomic_set(&hdev->cmd_cnt, 1);
2247 set_bit(HCI_INIT, &hdev->flags);
2249 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2250 ret = hdev->setup(hdev);
2252 /* If public address change is configured, ensure that the
2253 * address gets programmed. If the driver does not support
2254 * changing the public address, fail the power on procedure.
2256 if (!ret && bacmp(&hdev->public_addr, BDADDR_ANY)) {
2257 if (hdev->set_bdaddr)
2258 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2260 ret = -EADDRNOTAVAIL;
2264 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2265 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2266 ret = __hci_init(hdev);
2269 clear_bit(HCI_INIT, &hdev->flags);
2273 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2274 set_bit(HCI_UP, &hdev->flags);
2275 hci_notify(hdev, HCI_DEV_UP);
2276 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2277 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2278 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2279 hdev->dev_type == HCI_BREDR) {
2281 mgmt_powered(hdev, 1);
2282 hci_dev_unlock(hdev);
2285 /* Init failed, cleanup */
2286 flush_work(&hdev->tx_work);
2287 flush_work(&hdev->cmd_work);
2288 flush_work(&hdev->rx_work);
2290 skb_queue_purge(&hdev->cmd_q);
2291 skb_queue_purge(&hdev->rx_q);
2296 if (hdev->sent_cmd) {
2297 kfree_skb(hdev->sent_cmd);
2298 hdev->sent_cmd = NULL;
2302 hdev->flags &= BIT(HCI_RAW);
2306 hci_req_unlock(hdev);
2310 /* ---- HCI ioctl helpers ---- */
2312 int hci_dev_open(__u16 dev)
2314 struct hci_dev *hdev;
2317 hdev = hci_dev_get(dev);
2321 /* Devices that are marked as unconfigured can only be powered
2322 * up as user channel. Trying to bring them up as normal devices
2323 * will result into a failure. Only user channel operation is
2326 * When this function is called for a user channel, the flag
2327 * HCI_USER_CHANNEL will be set first before attempting to
2330 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2331 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2336 /* We need to ensure that no other power on/off work is pending
2337 * before proceeding to call hci_dev_do_open. This is
2338 * particularly important if the setup procedure has not yet
2341 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2342 cancel_delayed_work(&hdev->power_off);
2344 /* After this call it is guaranteed that the setup procedure
2345 * has finished. This means that error conditions like RFKILL
2346 * or no valid public or static random address apply.
2348 flush_workqueue(hdev->req_workqueue);
2350 err = hci_dev_do_open(hdev);
2357 /* This function requires the caller holds hdev->lock */
2358 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2360 struct hci_conn_params *p;
2362 list_for_each_entry(p, &hdev->le_conn_params, list)
2363 list_del_init(&p->action);
2365 BT_DBG("All LE pending actions cleared");
2368 static int hci_dev_do_close(struct hci_dev *hdev)
2370 BT_DBG("%s %p", hdev->name, hdev);
2372 cancel_delayed_work(&hdev->power_off);
2374 hci_req_cancel(hdev, ENODEV);
2377 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2378 cancel_delayed_work_sync(&hdev->cmd_timer);
2379 hci_req_unlock(hdev);
2383 /* Flush RX and TX works */
2384 flush_work(&hdev->tx_work);
2385 flush_work(&hdev->rx_work);
2387 if (hdev->discov_timeout > 0) {
2388 cancel_delayed_work(&hdev->discov_off);
2389 hdev->discov_timeout = 0;
2390 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2391 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2394 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2395 cancel_delayed_work(&hdev->service_cache);
2397 cancel_delayed_work_sync(&hdev->le_scan_disable);
2399 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2400 cancel_delayed_work_sync(&hdev->rpa_expired);
2403 hci_inquiry_cache_flush(hdev);
2404 hci_conn_hash_flush(hdev);
2405 hci_pend_le_actions_clear(hdev);
2406 hci_dev_unlock(hdev);
2408 hci_notify(hdev, HCI_DEV_DOWN);
2414 skb_queue_purge(&hdev->cmd_q);
2415 atomic_set(&hdev->cmd_cnt, 1);
2416 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2417 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2418 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2419 set_bit(HCI_INIT, &hdev->flags);
2420 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2421 clear_bit(HCI_INIT, &hdev->flags);
2424 /* flush cmd work */
2425 flush_work(&hdev->cmd_work);
2428 skb_queue_purge(&hdev->rx_q);
2429 skb_queue_purge(&hdev->cmd_q);
2430 skb_queue_purge(&hdev->raw_q);
2432 /* Drop last sent command */
2433 if (hdev->sent_cmd) {
2434 cancel_delayed_work_sync(&hdev->cmd_timer);
2435 kfree_skb(hdev->sent_cmd);
2436 hdev->sent_cmd = NULL;
2439 kfree_skb(hdev->recv_evt);
2440 hdev->recv_evt = NULL;
2442 /* After this point our queues are empty
2443 * and no tasks are scheduled. */
2447 hdev->flags &= BIT(HCI_RAW);
2448 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2450 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2451 if (hdev->dev_type == HCI_BREDR) {
2453 mgmt_powered(hdev, 0);
2454 hci_dev_unlock(hdev);
2458 /* Controller radio is available but is currently powered down */
2459 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2461 memset(hdev->eir, 0, sizeof(hdev->eir));
2462 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2463 bacpy(&hdev->random_addr, BDADDR_ANY);
2465 hci_req_unlock(hdev);
2471 int hci_dev_close(__u16 dev)
2473 struct hci_dev *hdev;
2476 hdev = hci_dev_get(dev);
2480 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2485 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2486 cancel_delayed_work(&hdev->power_off);
2488 err = hci_dev_do_close(hdev);
2495 int hci_dev_reset(__u16 dev)
2497 struct hci_dev *hdev;
2500 hdev = hci_dev_get(dev);
2506 if (!test_bit(HCI_UP, &hdev->flags)) {
2511 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2516 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2522 skb_queue_purge(&hdev->rx_q);
2523 skb_queue_purge(&hdev->cmd_q);
2526 hci_inquiry_cache_flush(hdev);
2527 hci_conn_hash_flush(hdev);
2528 hci_dev_unlock(hdev);
2533 atomic_set(&hdev->cmd_cnt, 1);
2534 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2536 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2539 hci_req_unlock(hdev);
2544 int hci_dev_reset_stat(__u16 dev)
2546 struct hci_dev *hdev;
2549 hdev = hci_dev_get(dev);
2553 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2558 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2563 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2570 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2572 struct hci_dev *hdev;
2573 struct hci_dev_req dr;
2576 if (copy_from_user(&dr, arg, sizeof(dr)))
2579 hdev = hci_dev_get(dr.dev_id);
2583 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2588 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2593 if (hdev->dev_type != HCI_BREDR) {
2598 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2605 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2610 if (!lmp_encrypt_capable(hdev)) {
2615 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2616 /* Auth must be enabled first */
2617 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2623 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2628 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2633 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2637 case HCISETLINKMODE:
2638 hdev->link_mode = ((__u16) dr.dev_opt) &
2639 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2643 hdev->pkt_type = (__u16) dr.dev_opt;
2647 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2648 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2652 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2653 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2666 int hci_get_dev_list(void __user *arg)
2668 struct hci_dev *hdev;
2669 struct hci_dev_list_req *dl;
2670 struct hci_dev_req *dr;
2671 int n = 0, size, err;
2674 if (get_user(dev_num, (__u16 __user *) arg))
2677 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2680 size = sizeof(*dl) + dev_num * sizeof(*dr);
2682 dl = kzalloc(size, GFP_KERNEL);
2688 read_lock(&hci_dev_list_lock);
2689 list_for_each_entry(hdev, &hci_dev_list, list) {
2690 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2691 cancel_delayed_work(&hdev->power_off);
2693 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2694 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2696 (dr + n)->dev_id = hdev->id;
2697 (dr + n)->dev_opt = hdev->flags;
2702 read_unlock(&hci_dev_list_lock);
2705 size = sizeof(*dl) + n * sizeof(*dr);
2707 err = copy_to_user(arg, dl, size);
2710 return err ? -EFAULT : 0;
2713 int hci_get_dev_info(void __user *arg)
2715 struct hci_dev *hdev;
2716 struct hci_dev_info di;
2719 if (copy_from_user(&di, arg, sizeof(di)))
2722 hdev = hci_dev_get(di.dev_id);
2726 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2727 cancel_delayed_work_sync(&hdev->power_off);
2729 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2730 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2732 strcpy(di.name, hdev->name);
2733 di.bdaddr = hdev->bdaddr;
2734 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2735 di.flags = hdev->flags;
2736 di.pkt_type = hdev->pkt_type;
2737 if (lmp_bredr_capable(hdev)) {
2738 di.acl_mtu = hdev->acl_mtu;
2739 di.acl_pkts = hdev->acl_pkts;
2740 di.sco_mtu = hdev->sco_mtu;
2741 di.sco_pkts = hdev->sco_pkts;
2743 di.acl_mtu = hdev->le_mtu;
2744 di.acl_pkts = hdev->le_pkts;
2748 di.link_policy = hdev->link_policy;
2749 di.link_mode = hdev->link_mode;
2751 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2752 memcpy(&di.features, &hdev->features, sizeof(di.features));
2754 if (copy_to_user(arg, &di, sizeof(di)))
2762 /* ---- Interface to HCI drivers ---- */
2764 static int hci_rfkill_set_block(void *data, bool blocked)
2766 struct hci_dev *hdev = data;
2768 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2770 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2774 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2775 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2776 hci_dev_do_close(hdev);
2778 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2784 static const struct rfkill_ops hci_rfkill_ops = {
2785 .set_block = hci_rfkill_set_block,
2788 static void hci_power_on(struct work_struct *work)
2790 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2793 BT_DBG("%s", hdev->name);
2795 err = hci_dev_do_open(hdev);
2797 mgmt_set_powered_failed(hdev, err);
2801 /* During the HCI setup phase, a few error conditions are
2802 * ignored and they need to be checked now. If they are still
2803 * valid, it is important to turn the device back off.
2805 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2806 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2807 (hdev->dev_type == HCI_BREDR &&
2808 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2809 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2810 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2811 hci_dev_do_close(hdev);
2812 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2813 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2814 HCI_AUTO_OFF_TIMEOUT);
2817 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2818 /* For unconfigured devices, set the HCI_RAW flag
2819 * so that userspace can easily identify them.
2821 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2822 set_bit(HCI_RAW, &hdev->flags);
2824 /* For fully configured devices, this will send
2825 * the Index Added event. For unconfigured devices,
2826 * it will send Unconfigued Index Added event.
2828 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2829 * and no event will be send.
2831 mgmt_index_added(hdev);
2835 static void hci_power_off(struct work_struct *work)
2837 struct hci_dev *hdev = container_of(work, struct hci_dev,
2840 BT_DBG("%s", hdev->name);
2842 hci_dev_do_close(hdev);
2845 static void hci_discov_off(struct work_struct *work)
2847 struct hci_dev *hdev;
2849 hdev = container_of(work, struct hci_dev, discov_off.work);
2851 BT_DBG("%s", hdev->name);
2853 mgmt_discoverable_timeout(hdev);
2856 void hci_uuids_clear(struct hci_dev *hdev)
2858 struct bt_uuid *uuid, *tmp;
2860 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2861 list_del(&uuid->list);
2866 void hci_link_keys_clear(struct hci_dev *hdev)
2868 struct list_head *p, *n;
2870 list_for_each_safe(p, n, &hdev->link_keys) {
2871 struct link_key *key;
2873 key = list_entry(p, struct link_key, list);
2880 void hci_smp_ltks_clear(struct hci_dev *hdev)
2882 struct smp_ltk *k, *tmp;
2884 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2890 void hci_smp_irks_clear(struct hci_dev *hdev)
2892 struct smp_irk *k, *tmp;
2894 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2900 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2904 list_for_each_entry(k, &hdev->link_keys, list)
2905 if (bacmp(bdaddr, &k->bdaddr) == 0)
2911 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2912 u8 key_type, u8 old_key_type)
2915 if (key_type < 0x03)
2918 /* Debug keys are insecure so don't store them persistently */
2919 if (key_type == HCI_LK_DEBUG_COMBINATION)
2922 /* Changed combination key and there's no previous one */
2923 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2926 /* Security mode 3 case */
2930 /* Neither local nor remote side had no-bonding as requirement */
2931 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2934 /* Local side had dedicated bonding as requirement */
2935 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2938 /* Remote side had dedicated bonding as requirement */
2939 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2942 /* If none of the above criteria match, then don't store the key
2947 static bool ltk_type_master(u8 type)
2949 return (type == SMP_LTK);
2952 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
2957 list_for_each_entry(k, &hdev->long_term_keys, list) {
2958 if (k->ediv != ediv || k->rand != rand)
2961 if (ltk_type_master(k->type) != master)
2970 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2971 u8 addr_type, bool master)
2975 list_for_each_entry(k, &hdev->long_term_keys, list)
2976 if (addr_type == k->bdaddr_type &&
2977 bacmp(bdaddr, &k->bdaddr) == 0 &&
2978 ltk_type_master(k->type) == master)
2984 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2986 struct smp_irk *irk;
2988 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2989 if (!bacmp(&irk->rpa, rpa))
2993 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2994 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2995 bacpy(&irk->rpa, rpa);
3003 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3006 struct smp_irk *irk;
3008 /* Identity Address must be public or static random */
3009 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3012 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3013 if (addr_type == irk->addr_type &&
3014 bacmp(bdaddr, &irk->bdaddr) == 0)
3021 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3022 bdaddr_t *bdaddr, u8 *val, u8 type,
3023 u8 pin_len, bool *persistent)
3025 struct link_key *key, *old_key;
3028 old_key = hci_find_link_key(hdev, bdaddr);
3030 old_key_type = old_key->type;
3033 old_key_type = conn ? conn->key_type : 0xff;
3034 key = kzalloc(sizeof(*key), GFP_KERNEL);
3037 list_add(&key->list, &hdev->link_keys);
3040 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3042 /* Some buggy controller combinations generate a changed
3043 * combination key for legacy pairing even when there's no
3045 if (type == HCI_LK_CHANGED_COMBINATION &&
3046 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3047 type = HCI_LK_COMBINATION;
3049 conn->key_type = type;
3052 bacpy(&key->bdaddr, bdaddr);
3053 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3054 key->pin_len = pin_len;
3056 if (type == HCI_LK_CHANGED_COMBINATION)
3057 key->type = old_key_type;
3062 *persistent = hci_persistent_key(hdev, conn, type,
3068 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3069 u8 addr_type, u8 type, u8 authenticated,
3070 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3072 struct smp_ltk *key, *old_key;
3073 bool master = ltk_type_master(type);
3075 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3079 key = kzalloc(sizeof(*key), GFP_KERNEL);
3082 list_add(&key->list, &hdev->long_term_keys);
3085 bacpy(&key->bdaddr, bdaddr);
3086 key->bdaddr_type = addr_type;
3087 memcpy(key->val, tk, sizeof(key->val));
3088 key->authenticated = authenticated;
3091 key->enc_size = enc_size;
3097 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3098 u8 addr_type, u8 val[16], bdaddr_t *rpa)
3100 struct smp_irk *irk;
3102 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3104 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3108 bacpy(&irk->bdaddr, bdaddr);
3109 irk->addr_type = addr_type;
3111 list_add(&irk->list, &hdev->identity_resolving_keys);
3114 memcpy(irk->val, val, 16);
3115 bacpy(&irk->rpa, rpa);
3120 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3122 struct link_key *key;
3124 key = hci_find_link_key(hdev, bdaddr);
3128 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3130 list_del(&key->list);
3136 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3138 struct smp_ltk *k, *tmp;
3141 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3142 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3145 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3152 return removed ? 0 : -ENOENT;
3155 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3157 struct smp_irk *k, *tmp;
3159 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3160 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3163 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3170 /* HCI command timer function */
3171 static void hci_cmd_timeout(struct work_struct *work)
3173 struct hci_dev *hdev = container_of(work, struct hci_dev,
3176 if (hdev->sent_cmd) {
3177 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3178 u16 opcode = __le16_to_cpu(sent->opcode);
3180 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3182 BT_ERR("%s command tx timeout", hdev->name);
3185 atomic_set(&hdev->cmd_cnt, 1);
3186 queue_work(hdev->workqueue, &hdev->cmd_work);
3189 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3192 struct oob_data *data;
3194 list_for_each_entry(data, &hdev->remote_oob_data, list)
3195 if (bacmp(bdaddr, &data->bdaddr) == 0)
3201 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3203 struct oob_data *data;
3205 data = hci_find_remote_oob_data(hdev, bdaddr);
3209 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3211 list_del(&data->list);
3217 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3219 struct oob_data *data, *n;
3221 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3222 list_del(&data->list);
3227 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3228 u8 *hash, u8 *randomizer)
3230 struct oob_data *data;
3232 data = hci_find_remote_oob_data(hdev, bdaddr);
3234 data = kmalloc(sizeof(*data), GFP_KERNEL);
3238 bacpy(&data->bdaddr, bdaddr);
3239 list_add(&data->list, &hdev->remote_oob_data);
3242 memcpy(data->hash192, hash, sizeof(data->hash192));
3243 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3245 memset(data->hash256, 0, sizeof(data->hash256));
3246 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3248 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3253 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3254 u8 *hash192, u8 *randomizer192,
3255 u8 *hash256, u8 *randomizer256)
3257 struct oob_data *data;
3259 data = hci_find_remote_oob_data(hdev, bdaddr);
3261 data = kmalloc(sizeof(*data), GFP_KERNEL);
3265 bacpy(&data->bdaddr, bdaddr);
3266 list_add(&data->list, &hdev->remote_oob_data);
3269 memcpy(data->hash192, hash192, sizeof(data->hash192));
3270 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3272 memcpy(data->hash256, hash256, sizeof(data->hash256));
3273 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3275 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3280 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3281 bdaddr_t *bdaddr, u8 type)
3283 struct bdaddr_list *b;
3285 list_for_each_entry(b, &hdev->blacklist, list) {
3286 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3293 static void hci_blacklist_clear(struct hci_dev *hdev)
3295 struct list_head *p, *n;
3297 list_for_each_safe(p, n, &hdev->blacklist) {
3298 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3305 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3307 struct bdaddr_list *entry;
3309 if (!bacmp(bdaddr, BDADDR_ANY))
3312 if (hci_blacklist_lookup(hdev, bdaddr, type))
3315 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3319 bacpy(&entry->bdaddr, bdaddr);
3320 entry->bdaddr_type = type;
3322 list_add(&entry->list, &hdev->blacklist);
3327 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3329 struct bdaddr_list *entry;
3331 if (!bacmp(bdaddr, BDADDR_ANY)) {
3332 hci_blacklist_clear(hdev);
3336 entry = hci_blacklist_lookup(hdev, bdaddr, type);
3340 list_del(&entry->list);
3346 struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3347 bdaddr_t *bdaddr, u8 type)
3349 struct bdaddr_list *b;
3351 list_for_each_entry(b, &hdev->le_white_list, list) {
3352 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3359 void hci_white_list_clear(struct hci_dev *hdev)
3361 struct list_head *p, *n;
3363 list_for_each_safe(p, n, &hdev->le_white_list) {
3364 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3371 int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3373 struct bdaddr_list *entry;
3375 if (!bacmp(bdaddr, BDADDR_ANY))
3378 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3382 bacpy(&entry->bdaddr, bdaddr);
3383 entry->bdaddr_type = type;
3385 list_add(&entry->list, &hdev->le_white_list);
3390 int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3392 struct bdaddr_list *entry;
3394 if (!bacmp(bdaddr, BDADDR_ANY))
3397 entry = hci_white_list_lookup(hdev, bdaddr, type);
3401 list_del(&entry->list);
3407 /* This function requires the caller holds hdev->lock */
3408 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3409 bdaddr_t *addr, u8 addr_type)
3411 struct hci_conn_params *params;
3413 /* The conn params list only contains identity addresses */
3414 if (!hci_is_identity_address(addr, addr_type))
3417 list_for_each_entry(params, &hdev->le_conn_params, list) {
3418 if (bacmp(¶ms->addr, addr) == 0 &&
3419 params->addr_type == addr_type) {
3427 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3429 struct hci_conn *conn;
3431 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3435 if (conn->dst_type != type)
3438 if (conn->state != BT_CONNECTED)
3444 /* This function requires the caller holds hdev->lock */
3445 struct hci_conn_params *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3446 bdaddr_t *addr, u8 addr_type)
3448 struct hci_conn_params *param;
3450 /* The list only contains identity addresses */
3451 if (!hci_is_identity_address(addr, addr_type))
3454 list_for_each_entry(param, &hdev->pend_le_conns, action) {
3455 if (bacmp(¶m->addr, addr) == 0 &&
3456 param->addr_type == addr_type)
3463 /* This function requires the caller holds hdev->lock */
3464 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3465 bdaddr_t *addr, u8 addr_type)
3467 struct hci_conn_params *params;
3469 if (!hci_is_identity_address(addr, addr_type))
3472 params = hci_conn_params_lookup(hdev, addr, addr_type);
3476 params = kzalloc(sizeof(*params), GFP_KERNEL);
3478 BT_ERR("Out of memory");
3482 bacpy(¶ms->addr, addr);
3483 params->addr_type = addr_type;
3485 list_add(¶ms->list, &hdev->le_conn_params);
3486 INIT_LIST_HEAD(¶ms->action);
3488 params->conn_min_interval = hdev->le_conn_min_interval;
3489 params->conn_max_interval = hdev->le_conn_max_interval;
3490 params->conn_latency = hdev->le_conn_latency;
3491 params->supervision_timeout = hdev->le_supv_timeout;
3492 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3494 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3499 /* This function requires the caller holds hdev->lock */
3500 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3503 struct hci_conn_params *params;
3505 params = hci_conn_params_add(hdev, addr, addr_type);
3509 if (params->auto_connect == auto_connect)
3512 list_del_init(¶ms->action);
3514 switch (auto_connect) {
3515 case HCI_AUTO_CONN_DISABLED:
3516 case HCI_AUTO_CONN_LINK_LOSS:
3517 hci_update_background_scan(hdev);
3519 case HCI_AUTO_CONN_REPORT:
3520 list_add(¶ms->action, &hdev->pend_le_reports);
3521 hci_update_background_scan(hdev);
3523 case HCI_AUTO_CONN_ALWAYS:
3524 if (!is_connected(hdev, addr, addr_type)) {
3525 list_add(¶ms->action, &hdev->pend_le_conns);
3526 hci_update_background_scan(hdev);
3531 params->auto_connect = auto_connect;
3533 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3539 /* This function requires the caller holds hdev->lock */
3540 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3542 struct hci_conn_params *params;
3544 params = hci_conn_params_lookup(hdev, addr, addr_type);
3548 list_del(¶ms->action);
3549 list_del(¶ms->list);
3552 hci_update_background_scan(hdev);
3554 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3557 /* This function requires the caller holds hdev->lock */
3558 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3560 struct hci_conn_params *params, *tmp;
3562 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3563 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3565 list_del(¶ms->list);
3569 BT_DBG("All LE disabled connection parameters were removed");
3572 /* This function requires the caller holds hdev->lock */
3573 void hci_conn_params_clear_enabled(struct hci_dev *hdev)
3575 struct hci_conn_params *params, *tmp;
3577 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3578 if (params->auto_connect == HCI_AUTO_CONN_DISABLED)
3580 list_del(¶ms->action);
3581 list_del(¶ms->list);
3585 hci_update_background_scan(hdev);
3587 BT_DBG("All enabled LE connection parameters were removed");
3590 /* This function requires the caller holds hdev->lock */
3591 void hci_conn_params_clear_all(struct hci_dev *hdev)
3593 struct hci_conn_params *params, *tmp;
3595 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3596 list_del(¶ms->action);
3597 list_del(¶ms->list);
3601 hci_update_background_scan(hdev);
3603 BT_DBG("All LE connection parameters were removed");
3606 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3609 BT_ERR("Failed to start inquiry: status %d", status);
3612 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3613 hci_dev_unlock(hdev);
3618 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3620 /* General inquiry access code (GIAC) */
3621 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3622 struct hci_request req;
3623 struct hci_cp_inquiry cp;
3627 BT_ERR("Failed to disable LE scanning: status %d", status);
3631 switch (hdev->discovery.type) {
3632 case DISCOV_TYPE_LE:
3634 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3635 hci_dev_unlock(hdev);
3638 case DISCOV_TYPE_INTERLEAVED:
3639 hci_req_init(&req, hdev);
3641 memset(&cp, 0, sizeof(cp));
3642 memcpy(&cp.lap, lap, sizeof(cp.lap));
3643 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3644 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3648 hci_inquiry_cache_flush(hdev);
3650 err = hci_req_run(&req, inquiry_complete);
3652 BT_ERR("Inquiry request failed: err %d", err);
3653 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3656 hci_dev_unlock(hdev);
3661 static void le_scan_disable_work(struct work_struct *work)
3663 struct hci_dev *hdev = container_of(work, struct hci_dev,
3664 le_scan_disable.work);
3665 struct hci_request req;
3668 BT_DBG("%s", hdev->name);
3670 hci_req_init(&req, hdev);
3672 hci_req_add_le_scan_disable(&req);
3674 err = hci_req_run(&req, le_scan_disable_work_complete);
3676 BT_ERR("Disable LE scanning request failed: err %d", err);
3679 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3681 struct hci_dev *hdev = req->hdev;
3683 /* If we're advertising or initiating an LE connection we can't
3684 * go ahead and change the random address at this time. This is
3685 * because the eventual initiator address used for the
3686 * subsequently created connection will be undefined (some
3687 * controllers use the new address and others the one we had
3688 * when the operation started).
3690 * In this kind of scenario skip the update and let the random
3691 * address be updated at the next cycle.
3693 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3694 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3695 BT_DBG("Deferring random address update");
3699 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3702 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3705 struct hci_dev *hdev = req->hdev;
3708 /* If privacy is enabled use a resolvable private address. If
3709 * current RPA has expired or there is something else than
3710 * the current RPA in use, then generate a new one.
3712 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3715 *own_addr_type = ADDR_LE_DEV_RANDOM;
3717 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3718 !bacmp(&hdev->random_addr, &hdev->rpa))
3721 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3723 BT_ERR("%s failed to generate new RPA", hdev->name);
3727 set_random_addr(req, &hdev->rpa);
3729 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3730 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3735 /* In case of required privacy without resolvable private address,
3736 * use an unresolvable private address. This is useful for active
3737 * scanning and non-connectable advertising.
3739 if (require_privacy) {
3742 get_random_bytes(&urpa, 6);
3743 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3745 *own_addr_type = ADDR_LE_DEV_RANDOM;
3746 set_random_addr(req, &urpa);
3750 /* If forcing static address is in use or there is no public
3751 * address use the static address as random address (but skip
3752 * the HCI command if the current random address is already the
3755 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3756 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3757 *own_addr_type = ADDR_LE_DEV_RANDOM;
3758 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3759 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3760 &hdev->static_addr);
3764 /* Neither privacy nor static address is being used so use a
3767 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3772 /* Copy the Identity Address of the controller.
3774 * If the controller has a public BD_ADDR, then by default use that one.
3775 * If this is a LE only controller without a public address, default to
3776 * the static random address.
3778 * For debugging purposes it is possible to force controllers with a
3779 * public address to use the static random address instead.
3781 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3784 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3785 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3786 bacpy(bdaddr, &hdev->static_addr);
3787 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3789 bacpy(bdaddr, &hdev->bdaddr);
3790 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3794 /* Alloc HCI device */
3795 struct hci_dev *hci_alloc_dev(void)
3797 struct hci_dev *hdev;
3799 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3803 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3804 hdev->esco_type = (ESCO_HV1);
3805 hdev->link_mode = (HCI_LM_ACCEPT);
3806 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3807 hdev->io_capability = 0x03; /* No Input No Output */
3808 hdev->manufacturer = 0xffff; /* Default to internal use */
3809 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3810 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3812 hdev->sniff_max_interval = 800;
3813 hdev->sniff_min_interval = 80;
3815 hdev->le_adv_channel_map = 0x07;
3816 hdev->le_scan_interval = 0x0060;
3817 hdev->le_scan_window = 0x0030;
3818 hdev->le_conn_min_interval = 0x0028;
3819 hdev->le_conn_max_interval = 0x0038;
3820 hdev->le_conn_latency = 0x0000;
3821 hdev->le_supv_timeout = 0x002a;
3823 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3824 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3825 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3826 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3828 mutex_init(&hdev->lock);
3829 mutex_init(&hdev->req_lock);
3831 INIT_LIST_HEAD(&hdev->mgmt_pending);
3832 INIT_LIST_HEAD(&hdev->blacklist);
3833 INIT_LIST_HEAD(&hdev->uuids);
3834 INIT_LIST_HEAD(&hdev->link_keys);
3835 INIT_LIST_HEAD(&hdev->long_term_keys);
3836 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3837 INIT_LIST_HEAD(&hdev->remote_oob_data);
3838 INIT_LIST_HEAD(&hdev->le_white_list);
3839 INIT_LIST_HEAD(&hdev->le_conn_params);
3840 INIT_LIST_HEAD(&hdev->pend_le_conns);
3841 INIT_LIST_HEAD(&hdev->pend_le_reports);
3842 INIT_LIST_HEAD(&hdev->conn_hash.list);
3844 INIT_WORK(&hdev->rx_work, hci_rx_work);
3845 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3846 INIT_WORK(&hdev->tx_work, hci_tx_work);
3847 INIT_WORK(&hdev->power_on, hci_power_on);
3849 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3850 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3851 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3853 skb_queue_head_init(&hdev->rx_q);
3854 skb_queue_head_init(&hdev->cmd_q);
3855 skb_queue_head_init(&hdev->raw_q);
3857 init_waitqueue_head(&hdev->req_wait_q);
3859 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3861 hci_init_sysfs(hdev);
3862 discovery_init(hdev);
3866 EXPORT_SYMBOL(hci_alloc_dev);
3868 /* Free HCI device */
3869 void hci_free_dev(struct hci_dev *hdev)
3871 /* will free via device release */
3872 put_device(&hdev->dev);
3874 EXPORT_SYMBOL(hci_free_dev);
3876 /* Register HCI device */
3877 int hci_register_dev(struct hci_dev *hdev)
3881 if (!hdev->open || !hdev->close)
3884 /* Do not allow HCI_AMP devices to register at index 0,
3885 * so the index can be used as the AMP controller ID.
3887 switch (hdev->dev_type) {
3889 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3892 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3901 sprintf(hdev->name, "hci%d", id);
3904 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3906 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3907 WQ_MEM_RECLAIM, 1, hdev->name);
3908 if (!hdev->workqueue) {
3913 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3914 WQ_MEM_RECLAIM, 1, hdev->name);
3915 if (!hdev->req_workqueue) {
3916 destroy_workqueue(hdev->workqueue);
3921 if (!IS_ERR_OR_NULL(bt_debugfs))
3922 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3924 dev_set_name(&hdev->dev, "%s", hdev->name);
3926 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3928 if (IS_ERR(hdev->tfm_aes)) {
3929 BT_ERR("Unable to create crypto context");
3930 error = PTR_ERR(hdev->tfm_aes);
3931 hdev->tfm_aes = NULL;
3935 error = device_add(&hdev->dev);
3939 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3940 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3943 if (rfkill_register(hdev->rfkill) < 0) {
3944 rfkill_destroy(hdev->rfkill);
3945 hdev->rfkill = NULL;
3949 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3950 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3952 set_bit(HCI_SETUP, &hdev->dev_flags);
3953 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3955 if (hdev->dev_type == HCI_BREDR) {
3956 /* Assume BR/EDR support until proven otherwise (such as
3957 * through reading supported features during init.
3959 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3962 write_lock(&hci_dev_list_lock);
3963 list_add(&hdev->list, &hci_dev_list);
3964 write_unlock(&hci_dev_list_lock);
3966 /* Devices that are marked for raw-only usage are unconfigured
3967 * and should not be included in normal operation.
3969 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3970 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
3972 hci_notify(hdev, HCI_DEV_REG);
3975 queue_work(hdev->req_workqueue, &hdev->power_on);
3980 crypto_free_blkcipher(hdev->tfm_aes);
3982 destroy_workqueue(hdev->workqueue);
3983 destroy_workqueue(hdev->req_workqueue);
3985 ida_simple_remove(&hci_index_ida, hdev->id);
3989 EXPORT_SYMBOL(hci_register_dev);
3991 /* Unregister HCI device */
3992 void hci_unregister_dev(struct hci_dev *hdev)
3996 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3998 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4002 write_lock(&hci_dev_list_lock);
4003 list_del(&hdev->list);
4004 write_unlock(&hci_dev_list_lock);
4006 hci_dev_do_close(hdev);
4008 for (i = 0; i < NUM_REASSEMBLY; i++)
4009 kfree_skb(hdev->reassembly[i]);
4011 cancel_work_sync(&hdev->power_on);
4013 if (!test_bit(HCI_INIT, &hdev->flags) &&
4014 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
4016 mgmt_index_removed(hdev);
4017 hci_dev_unlock(hdev);
4020 /* mgmt_index_removed should take care of emptying the
4022 BUG_ON(!list_empty(&hdev->mgmt_pending));
4024 hci_notify(hdev, HCI_DEV_UNREG);
4027 rfkill_unregister(hdev->rfkill);
4028 rfkill_destroy(hdev->rfkill);
4032 crypto_free_blkcipher(hdev->tfm_aes);
4034 device_del(&hdev->dev);
4036 debugfs_remove_recursive(hdev->debugfs);
4038 destroy_workqueue(hdev->workqueue);
4039 destroy_workqueue(hdev->req_workqueue);
4042 hci_blacklist_clear(hdev);
4043 hci_uuids_clear(hdev);
4044 hci_link_keys_clear(hdev);
4045 hci_smp_ltks_clear(hdev);
4046 hci_smp_irks_clear(hdev);
4047 hci_remote_oob_data_clear(hdev);
4048 hci_white_list_clear(hdev);
4049 hci_conn_params_clear_all(hdev);
4050 hci_dev_unlock(hdev);
4054 ida_simple_remove(&hci_index_ida, id);
4056 EXPORT_SYMBOL(hci_unregister_dev);
4058 /* Suspend HCI device */
4059 int hci_suspend_dev(struct hci_dev *hdev)
4061 hci_notify(hdev, HCI_DEV_SUSPEND);
4064 EXPORT_SYMBOL(hci_suspend_dev);
4066 /* Resume HCI device */
4067 int hci_resume_dev(struct hci_dev *hdev)
4069 hci_notify(hdev, HCI_DEV_RESUME);
4072 EXPORT_SYMBOL(hci_resume_dev);
4074 /* Receive frame from HCI drivers */
4075 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4077 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4078 && !test_bit(HCI_INIT, &hdev->flags))) {
4084 bt_cb(skb)->incoming = 1;
4087 __net_timestamp(skb);
4089 skb_queue_tail(&hdev->rx_q, skb);
4090 queue_work(hdev->workqueue, &hdev->rx_work);
4094 EXPORT_SYMBOL(hci_recv_frame);
4096 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4097 int count, __u8 index)
4102 struct sk_buff *skb;
4103 struct bt_skb_cb *scb;
4105 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4106 index >= NUM_REASSEMBLY)
4109 skb = hdev->reassembly[index];
4113 case HCI_ACLDATA_PKT:
4114 len = HCI_MAX_FRAME_SIZE;
4115 hlen = HCI_ACL_HDR_SIZE;
4118 len = HCI_MAX_EVENT_SIZE;
4119 hlen = HCI_EVENT_HDR_SIZE;
4121 case HCI_SCODATA_PKT:
4122 len = HCI_MAX_SCO_SIZE;
4123 hlen = HCI_SCO_HDR_SIZE;
4127 skb = bt_skb_alloc(len, GFP_ATOMIC);
4131 scb = (void *) skb->cb;
4133 scb->pkt_type = type;
4135 hdev->reassembly[index] = skb;
4139 scb = (void *) skb->cb;
4140 len = min_t(uint, scb->expect, count);
4142 memcpy(skb_put(skb, len), data, len);
4151 if (skb->len == HCI_EVENT_HDR_SIZE) {
4152 struct hci_event_hdr *h = hci_event_hdr(skb);
4153 scb->expect = h->plen;
4155 if (skb_tailroom(skb) < scb->expect) {
4157 hdev->reassembly[index] = NULL;
4163 case HCI_ACLDATA_PKT:
4164 if (skb->len == HCI_ACL_HDR_SIZE) {
4165 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4166 scb->expect = __le16_to_cpu(h->dlen);
4168 if (skb_tailroom(skb) < scb->expect) {
4170 hdev->reassembly[index] = NULL;
4176 case HCI_SCODATA_PKT:
4177 if (skb->len == HCI_SCO_HDR_SIZE) {
4178 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4179 scb->expect = h->dlen;
4181 if (skb_tailroom(skb) < scb->expect) {
4183 hdev->reassembly[index] = NULL;
4190 if (scb->expect == 0) {
4191 /* Complete frame */
4193 bt_cb(skb)->pkt_type = type;
4194 hci_recv_frame(hdev, skb);
4196 hdev->reassembly[index] = NULL;
4204 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4208 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4212 rem = hci_reassembly(hdev, type, data, count, type - 1);
4216 data += (count - rem);
4222 EXPORT_SYMBOL(hci_recv_fragment);
4224 #define STREAM_REASSEMBLY 0
4226 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4232 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4235 struct { char type; } *pkt;
4237 /* Start of the frame */
4244 type = bt_cb(skb)->pkt_type;
4246 rem = hci_reassembly(hdev, type, data, count,
4251 data += (count - rem);
4257 EXPORT_SYMBOL(hci_recv_stream_fragment);
4259 /* ---- Interface to upper protocols ---- */
4261 int hci_register_cb(struct hci_cb *cb)
4263 BT_DBG("%p name %s", cb, cb->name);
4265 write_lock(&hci_cb_list_lock);
4266 list_add(&cb->list, &hci_cb_list);
4267 write_unlock(&hci_cb_list_lock);
4271 EXPORT_SYMBOL(hci_register_cb);
4273 int hci_unregister_cb(struct hci_cb *cb)
4275 BT_DBG("%p name %s", cb, cb->name);
4277 write_lock(&hci_cb_list_lock);
4278 list_del(&cb->list);
4279 write_unlock(&hci_cb_list_lock);
4283 EXPORT_SYMBOL(hci_unregister_cb);
4285 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4287 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4290 __net_timestamp(skb);
4292 /* Send copy to monitor */
4293 hci_send_to_monitor(hdev, skb);
4295 if (atomic_read(&hdev->promisc)) {
4296 /* Send copy to the sockets */
4297 hci_send_to_sock(hdev, skb);
4300 /* Get rid of skb owner, prior to sending to the driver. */
4303 if (hdev->send(hdev, skb) < 0)
4304 BT_ERR("%s sending frame failed", hdev->name);
4307 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4309 skb_queue_head_init(&req->cmd_q);
4314 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4316 struct hci_dev *hdev = req->hdev;
4317 struct sk_buff *skb;
4318 unsigned long flags;
4320 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4322 /* If an error occured during request building, remove all HCI
4323 * commands queued on the HCI request queue.
4326 skb_queue_purge(&req->cmd_q);
4330 /* Do not allow empty requests */
4331 if (skb_queue_empty(&req->cmd_q))
4334 skb = skb_peek_tail(&req->cmd_q);
4335 bt_cb(skb)->req.complete = complete;
4337 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4338 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4339 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4341 queue_work(hdev->workqueue, &hdev->cmd_work);
4346 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4347 u32 plen, const void *param)
4349 int len = HCI_COMMAND_HDR_SIZE + plen;
4350 struct hci_command_hdr *hdr;
4351 struct sk_buff *skb;
4353 skb = bt_skb_alloc(len, GFP_ATOMIC);
4357 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4358 hdr->opcode = cpu_to_le16(opcode);
4362 memcpy(skb_put(skb, plen), param, plen);
4364 BT_DBG("skb len %d", skb->len);
4366 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4371 /* Send HCI command */
4372 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4375 struct sk_buff *skb;
4377 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4379 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4381 BT_ERR("%s no memory for command", hdev->name);
4385 /* Stand-alone HCI commands must be flaged as
4386 * single-command requests.
4388 bt_cb(skb)->req.start = true;
4390 skb_queue_tail(&hdev->cmd_q, skb);
4391 queue_work(hdev->workqueue, &hdev->cmd_work);
4396 /* Queue a command to an asynchronous HCI request */
4397 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4398 const void *param, u8 event)
4400 struct hci_dev *hdev = req->hdev;
4401 struct sk_buff *skb;
4403 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4405 /* If an error occured during request building, there is no point in
4406 * queueing the HCI command. We can simply return.
4411 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4413 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4414 hdev->name, opcode);
4419 if (skb_queue_empty(&req->cmd_q))
4420 bt_cb(skb)->req.start = true;
4422 bt_cb(skb)->req.event = event;
4424 skb_queue_tail(&req->cmd_q, skb);
4427 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4430 hci_req_add_ev(req, opcode, plen, param, 0);
4433 /* Get data from the previously sent command */
4434 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4436 struct hci_command_hdr *hdr;
4438 if (!hdev->sent_cmd)
4441 hdr = (void *) hdev->sent_cmd->data;
4443 if (hdr->opcode != cpu_to_le16(opcode))
4446 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4448 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4452 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4454 struct hci_acl_hdr *hdr;
4457 skb_push(skb, HCI_ACL_HDR_SIZE);
4458 skb_reset_transport_header(skb);
4459 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4460 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4461 hdr->dlen = cpu_to_le16(len);
4464 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4465 struct sk_buff *skb, __u16 flags)
4467 struct hci_conn *conn = chan->conn;
4468 struct hci_dev *hdev = conn->hdev;
4469 struct sk_buff *list;
4471 skb->len = skb_headlen(skb);
4474 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4476 switch (hdev->dev_type) {
4478 hci_add_acl_hdr(skb, conn->handle, flags);
4481 hci_add_acl_hdr(skb, chan->handle, flags);
4484 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4488 list = skb_shinfo(skb)->frag_list;
4490 /* Non fragmented */
4491 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4493 skb_queue_tail(queue, skb);
4496 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4498 skb_shinfo(skb)->frag_list = NULL;
4500 /* Queue all fragments atomically */
4501 spin_lock(&queue->lock);
4503 __skb_queue_tail(queue, skb);
4505 flags &= ~ACL_START;
4508 skb = list; list = list->next;
4510 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4511 hci_add_acl_hdr(skb, conn->handle, flags);
4513 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4515 __skb_queue_tail(queue, skb);
4518 spin_unlock(&queue->lock);
4522 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4524 struct hci_dev *hdev = chan->conn->hdev;
4526 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4528 hci_queue_acl(chan, &chan->data_q, skb, flags);
4530 queue_work(hdev->workqueue, &hdev->tx_work);
4534 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4536 struct hci_dev *hdev = conn->hdev;
4537 struct hci_sco_hdr hdr;
4539 BT_DBG("%s len %d", hdev->name, skb->len);
4541 hdr.handle = cpu_to_le16(conn->handle);
4542 hdr.dlen = skb->len;
4544 skb_push(skb, HCI_SCO_HDR_SIZE);
4545 skb_reset_transport_header(skb);
4546 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4548 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4550 skb_queue_tail(&conn->data_q, skb);
4551 queue_work(hdev->workqueue, &hdev->tx_work);
4554 /* ---- HCI TX task (outgoing data) ---- */
4556 /* HCI Connection scheduler */
4557 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4560 struct hci_conn_hash *h = &hdev->conn_hash;
4561 struct hci_conn *conn = NULL, *c;
4562 unsigned int num = 0, min = ~0;
4564 /* We don't have to lock device here. Connections are always
4565 * added and removed with TX task disabled. */
4569 list_for_each_entry_rcu(c, &h->list, list) {
4570 if (c->type != type || skb_queue_empty(&c->data_q))
4573 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4578 if (c->sent < min) {
4583 if (hci_conn_num(hdev, type) == num)
4592 switch (conn->type) {
4594 cnt = hdev->acl_cnt;
4598 cnt = hdev->sco_cnt;
4601 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4605 BT_ERR("Unknown link type");
4613 BT_DBG("conn %p quote %d", conn, *quote);
4617 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4619 struct hci_conn_hash *h = &hdev->conn_hash;
4622 BT_ERR("%s link tx timeout", hdev->name);
4626 /* Kill stalled connections */
4627 list_for_each_entry_rcu(c, &h->list, list) {
4628 if (c->type == type && c->sent) {
4629 BT_ERR("%s killing stalled connection %pMR",
4630 hdev->name, &c->dst);
4631 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4638 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4641 struct hci_conn_hash *h = &hdev->conn_hash;
4642 struct hci_chan *chan = NULL;
4643 unsigned int num = 0, min = ~0, cur_prio = 0;
4644 struct hci_conn *conn;
4645 int cnt, q, conn_num = 0;
4647 BT_DBG("%s", hdev->name);
4651 list_for_each_entry_rcu(conn, &h->list, list) {
4652 struct hci_chan *tmp;
4654 if (conn->type != type)
4657 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4662 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4663 struct sk_buff *skb;
4665 if (skb_queue_empty(&tmp->data_q))
4668 skb = skb_peek(&tmp->data_q);
4669 if (skb->priority < cur_prio)
4672 if (skb->priority > cur_prio) {
4675 cur_prio = skb->priority;
4680 if (conn->sent < min) {
4686 if (hci_conn_num(hdev, type) == conn_num)
4695 switch (chan->conn->type) {
4697 cnt = hdev->acl_cnt;
4700 cnt = hdev->block_cnt;
4704 cnt = hdev->sco_cnt;
4707 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4711 BT_ERR("Unknown link type");
4716 BT_DBG("chan %p quote %d", chan, *quote);
4720 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4722 struct hci_conn_hash *h = &hdev->conn_hash;
4723 struct hci_conn *conn;
4726 BT_DBG("%s", hdev->name);
4730 list_for_each_entry_rcu(conn, &h->list, list) {
4731 struct hci_chan *chan;
4733 if (conn->type != type)
4736 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4741 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4742 struct sk_buff *skb;
4749 if (skb_queue_empty(&chan->data_q))
4752 skb = skb_peek(&chan->data_q);
4753 if (skb->priority >= HCI_PRIO_MAX - 1)
4756 skb->priority = HCI_PRIO_MAX - 1;
4758 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4762 if (hci_conn_num(hdev, type) == num)
4770 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4772 /* Calculate count of blocks used by this packet */
4773 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4776 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4778 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4779 /* ACL tx timeout must be longer than maximum
4780 * link supervision timeout (40.9 seconds) */
4781 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4782 HCI_ACL_TX_TIMEOUT))
4783 hci_link_tx_to(hdev, ACL_LINK);
4787 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4789 unsigned int cnt = hdev->acl_cnt;
4790 struct hci_chan *chan;
4791 struct sk_buff *skb;
4794 __check_timeout(hdev, cnt);
4796 while (hdev->acl_cnt &&
4797 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4798 u32 priority = (skb_peek(&chan->data_q))->priority;
4799 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4800 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4801 skb->len, skb->priority);
4803 /* Stop if priority has changed */
4804 if (skb->priority < priority)
4807 skb = skb_dequeue(&chan->data_q);
4809 hci_conn_enter_active_mode(chan->conn,
4810 bt_cb(skb)->force_active);
4812 hci_send_frame(hdev, skb);
4813 hdev->acl_last_tx = jiffies;
4821 if (cnt != hdev->acl_cnt)
4822 hci_prio_recalculate(hdev, ACL_LINK);
4825 static void hci_sched_acl_blk(struct hci_dev *hdev)
4827 unsigned int cnt = hdev->block_cnt;
4828 struct hci_chan *chan;
4829 struct sk_buff *skb;
4833 __check_timeout(hdev, cnt);
4835 BT_DBG("%s", hdev->name);
4837 if (hdev->dev_type == HCI_AMP)
4842 while (hdev->block_cnt > 0 &&
4843 (chan = hci_chan_sent(hdev, type, "e))) {
4844 u32 priority = (skb_peek(&chan->data_q))->priority;
4845 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4848 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4849 skb->len, skb->priority);
4851 /* Stop if priority has changed */
4852 if (skb->priority < priority)
4855 skb = skb_dequeue(&chan->data_q);
4857 blocks = __get_blocks(hdev, skb);
4858 if (blocks > hdev->block_cnt)
4861 hci_conn_enter_active_mode(chan->conn,
4862 bt_cb(skb)->force_active);
4864 hci_send_frame(hdev, skb);
4865 hdev->acl_last_tx = jiffies;
4867 hdev->block_cnt -= blocks;
4870 chan->sent += blocks;
4871 chan->conn->sent += blocks;
4875 if (cnt != hdev->block_cnt)
4876 hci_prio_recalculate(hdev, type);
4879 static void hci_sched_acl(struct hci_dev *hdev)
4881 BT_DBG("%s", hdev->name);
4883 /* No ACL link over BR/EDR controller */
4884 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4887 /* No AMP link over AMP controller */
4888 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4891 switch (hdev->flow_ctl_mode) {
4892 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4893 hci_sched_acl_pkt(hdev);
4896 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4897 hci_sched_acl_blk(hdev);
4903 static void hci_sched_sco(struct hci_dev *hdev)
4905 struct hci_conn *conn;
4906 struct sk_buff *skb;
4909 BT_DBG("%s", hdev->name);
4911 if (!hci_conn_num(hdev, SCO_LINK))
4914 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
4915 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4916 BT_DBG("skb %p len %d", skb, skb->len);
4917 hci_send_frame(hdev, skb);
4920 if (conn->sent == ~0)
4926 static void hci_sched_esco(struct hci_dev *hdev)
4928 struct hci_conn *conn;
4929 struct sk_buff *skb;
4932 BT_DBG("%s", hdev->name);
4934 if (!hci_conn_num(hdev, ESCO_LINK))
4937 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4939 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4940 BT_DBG("skb %p len %d", skb, skb->len);
4941 hci_send_frame(hdev, skb);
4944 if (conn->sent == ~0)
4950 static void hci_sched_le(struct hci_dev *hdev)
4952 struct hci_chan *chan;
4953 struct sk_buff *skb;
4954 int quote, cnt, tmp;
4956 BT_DBG("%s", hdev->name);
4958 if (!hci_conn_num(hdev, LE_LINK))
4961 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4962 /* LE tx timeout must be longer than maximum
4963 * link supervision timeout (40.9 seconds) */
4964 if (!hdev->le_cnt && hdev->le_pkts &&
4965 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4966 hci_link_tx_to(hdev, LE_LINK);
4969 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4971 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
4972 u32 priority = (skb_peek(&chan->data_q))->priority;
4973 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4974 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4975 skb->len, skb->priority);
4977 /* Stop if priority has changed */
4978 if (skb->priority < priority)
4981 skb = skb_dequeue(&chan->data_q);
4983 hci_send_frame(hdev, skb);
4984 hdev->le_last_tx = jiffies;
4995 hdev->acl_cnt = cnt;
4998 hci_prio_recalculate(hdev, LE_LINK);
5001 static void hci_tx_work(struct work_struct *work)
5003 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5004 struct sk_buff *skb;
5006 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5007 hdev->sco_cnt, hdev->le_cnt);
5009 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5010 /* Schedule queues and send stuff to HCI driver */
5011 hci_sched_acl(hdev);
5012 hci_sched_sco(hdev);
5013 hci_sched_esco(hdev);
5017 /* Send next queued raw (unknown type) packet */
5018 while ((skb = skb_dequeue(&hdev->raw_q)))
5019 hci_send_frame(hdev, skb);
5022 /* ----- HCI RX task (incoming data processing) ----- */
5024 /* ACL data packet */
5025 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5027 struct hci_acl_hdr *hdr = (void *) skb->data;
5028 struct hci_conn *conn;
5029 __u16 handle, flags;
5031 skb_pull(skb, HCI_ACL_HDR_SIZE);
5033 handle = __le16_to_cpu(hdr->handle);
5034 flags = hci_flags(handle);
5035 handle = hci_handle(handle);
5037 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5040 hdev->stat.acl_rx++;
5043 conn = hci_conn_hash_lookup_handle(hdev, handle);
5044 hci_dev_unlock(hdev);
5047 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5049 /* Send to upper protocol */
5050 l2cap_recv_acldata(conn, skb, flags);
5053 BT_ERR("%s ACL packet for unknown connection handle %d",
5054 hdev->name, handle);
5060 /* SCO data packet */
5061 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5063 struct hci_sco_hdr *hdr = (void *) skb->data;
5064 struct hci_conn *conn;
5067 skb_pull(skb, HCI_SCO_HDR_SIZE);
5069 handle = __le16_to_cpu(hdr->handle);
5071 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5073 hdev->stat.sco_rx++;
5076 conn = hci_conn_hash_lookup_handle(hdev, handle);
5077 hci_dev_unlock(hdev);
5080 /* Send to upper protocol */
5081 sco_recv_scodata(conn, skb);
5084 BT_ERR("%s SCO packet for unknown connection handle %d",
5085 hdev->name, handle);
5091 static bool hci_req_is_complete(struct hci_dev *hdev)
5093 struct sk_buff *skb;
5095 skb = skb_peek(&hdev->cmd_q);
5099 return bt_cb(skb)->req.start;
5102 static void hci_resend_last(struct hci_dev *hdev)
5104 struct hci_command_hdr *sent;
5105 struct sk_buff *skb;
5108 if (!hdev->sent_cmd)
5111 sent = (void *) hdev->sent_cmd->data;
5112 opcode = __le16_to_cpu(sent->opcode);
5113 if (opcode == HCI_OP_RESET)
5116 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5120 skb_queue_head(&hdev->cmd_q, skb);
5121 queue_work(hdev->workqueue, &hdev->cmd_work);
5124 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5126 hci_req_complete_t req_complete = NULL;
5127 struct sk_buff *skb;
5128 unsigned long flags;
5130 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5132 /* If the completed command doesn't match the last one that was
5133 * sent we need to do special handling of it.
5135 if (!hci_sent_cmd_data(hdev, opcode)) {
5136 /* Some CSR based controllers generate a spontaneous
5137 * reset complete event during init and any pending
5138 * command will never be completed. In such a case we
5139 * need to resend whatever was the last sent
5142 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5143 hci_resend_last(hdev);
5148 /* If the command succeeded and there's still more commands in
5149 * this request the request is not yet complete.
5151 if (!status && !hci_req_is_complete(hdev))
5154 /* If this was the last command in a request the complete
5155 * callback would be found in hdev->sent_cmd instead of the
5156 * command queue (hdev->cmd_q).
5158 if (hdev->sent_cmd) {
5159 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5162 /* We must set the complete callback to NULL to
5163 * avoid calling the callback more than once if
5164 * this function gets called again.
5166 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5172 /* Remove all pending commands belonging to this request */
5173 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5174 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5175 if (bt_cb(skb)->req.start) {
5176 __skb_queue_head(&hdev->cmd_q, skb);
5180 req_complete = bt_cb(skb)->req.complete;
5183 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5187 req_complete(hdev, status);
5190 static void hci_rx_work(struct work_struct *work)
5192 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5193 struct sk_buff *skb;
5195 BT_DBG("%s", hdev->name);
5197 while ((skb = skb_dequeue(&hdev->rx_q))) {
5198 /* Send copy to monitor */
5199 hci_send_to_monitor(hdev, skb);
5201 if (atomic_read(&hdev->promisc)) {
5202 /* Send copy to the sockets */
5203 hci_send_to_sock(hdev, skb);
5206 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5211 if (test_bit(HCI_INIT, &hdev->flags)) {
5212 /* Don't process data packets in this states. */
5213 switch (bt_cb(skb)->pkt_type) {
5214 case HCI_ACLDATA_PKT:
5215 case HCI_SCODATA_PKT:
5222 switch (bt_cb(skb)->pkt_type) {
5224 BT_DBG("%s Event packet", hdev->name);
5225 hci_event_packet(hdev, skb);
5228 case HCI_ACLDATA_PKT:
5229 BT_DBG("%s ACL data packet", hdev->name);
5230 hci_acldata_packet(hdev, skb);
5233 case HCI_SCODATA_PKT:
5234 BT_DBG("%s SCO data packet", hdev->name);
5235 hci_scodata_packet(hdev, skb);
5245 static void hci_cmd_work(struct work_struct *work)
5247 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5248 struct sk_buff *skb;
5250 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5251 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5253 /* Send queued commands */
5254 if (atomic_read(&hdev->cmd_cnt)) {
5255 skb = skb_dequeue(&hdev->cmd_q);
5259 kfree_skb(hdev->sent_cmd);
5261 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5262 if (hdev->sent_cmd) {
5263 atomic_dec(&hdev->cmd_cnt);
5264 hci_send_frame(hdev, skb);
5265 if (test_bit(HCI_RESET, &hdev->flags))
5266 cancel_delayed_work(&hdev->cmd_timer);
5268 schedule_delayed_work(&hdev->cmd_timer,
5271 skb_queue_head(&hdev->cmd_q, skb);
5272 queue_work(hdev->workqueue, &hdev->cmd_work);
5277 void hci_req_add_le_scan_disable(struct hci_request *req)
5279 struct hci_cp_le_set_scan_enable cp;
5281 memset(&cp, 0, sizeof(cp));
5282 cp.enable = LE_SCAN_DISABLE;
5283 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5286 void hci_req_add_le_passive_scan(struct hci_request *req)
5288 struct hci_cp_le_set_scan_param param_cp;
5289 struct hci_cp_le_set_scan_enable enable_cp;
5290 struct hci_dev *hdev = req->hdev;
5293 /* Set require_privacy to false since no SCAN_REQ are send
5294 * during passive scanning. Not using an unresolvable address
5295 * here is important so that peer devices using direct
5296 * advertising with our address will be correctly reported
5297 * by the controller.
5299 if (hci_update_random_address(req, false, &own_addr_type))
5302 memset(¶m_cp, 0, sizeof(param_cp));
5303 param_cp.type = LE_SCAN_PASSIVE;
5304 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5305 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5306 param_cp.own_address_type = own_addr_type;
5307 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5310 memset(&enable_cp, 0, sizeof(enable_cp));
5311 enable_cp.enable = LE_SCAN_ENABLE;
5312 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5313 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5317 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5320 BT_DBG("HCI request failed to update background scanning: "
5321 "status 0x%2.2x", status);
5324 /* This function controls the background scanning based on hdev->pend_le_conns
5325 * list. If there are pending LE connection we start the background scanning,
5326 * otherwise we stop it.
5328 * This function requires the caller holds hdev->lock.
5330 void hci_update_background_scan(struct hci_dev *hdev)
5332 struct hci_request req;
5333 struct hci_conn *conn;
5336 if (!test_bit(HCI_UP, &hdev->flags) ||
5337 test_bit(HCI_INIT, &hdev->flags) ||
5338 test_bit(HCI_SETUP, &hdev->dev_flags) ||
5339 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5340 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5343 hci_req_init(&req, hdev);
5345 if (list_empty(&hdev->pend_le_conns) &&
5346 list_empty(&hdev->pend_le_reports)) {
5347 /* If there is no pending LE connections or devices
5348 * to be scanned for, we should stop the background
5352 /* If controller is not scanning we are done. */
5353 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5356 hci_req_add_le_scan_disable(&req);
5358 BT_DBG("%s stopping background scanning", hdev->name);
5360 /* If there is at least one pending LE connection, we should
5361 * keep the background scan running.
5364 /* If controller is connecting, we should not start scanning
5365 * since some controllers are not able to scan and connect at
5368 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5372 /* If controller is currently scanning, we stop it to ensure we
5373 * don't miss any advertising (due to duplicates filter).
5375 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5376 hci_req_add_le_scan_disable(&req);
5378 hci_req_add_le_passive_scan(&req);
5380 BT_DBG("%s starting background scanning", hdev->name);
5383 err = hci_req_run(&req, update_background_scan_complete);
5385 BT_ERR("Failed to run HCI request: err %d", err);