2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
41 static void hci_rx_work(struct work_struct *work);
42 static void hci_cmd_work(struct work_struct *work);
43 static void hci_tx_work(struct work_struct *work);
46 LIST_HEAD(hci_dev_list);
47 DEFINE_RWLOCK(hci_dev_list_lock);
49 /* HCI callback list */
50 LIST_HEAD(hci_cb_list);
51 DEFINE_RWLOCK(hci_cb_list_lock);
53 /* HCI ID Numbering */
54 static DEFINE_IDA(hci_index_ida);
56 /* ---- HCI notifications ---- */
58 static void hci_notify(struct hci_dev *hdev, int event)
60 hci_sock_dev_event(hdev, event);
63 /* ---- HCI debugfs entries ---- */
65 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66 size_t count, loff_t *ppos)
68 struct hci_dev *hdev = file->private_data;
71 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
74 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
77 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78 size_t count, loff_t *ppos)
80 struct hci_dev *hdev = file->private_data;
83 size_t buf_size = min(count, (sizeof(buf)-1));
87 if (!test_bit(HCI_UP, &hdev->flags))
90 if (copy_from_user(buf, user_buf, buf_size))
94 if (strtobool(buf, &enable))
97 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
102 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
105 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
107 hci_req_unlock(hdev);
112 err = -bt_to_errno(skb->data[0]);
118 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
123 static const struct file_operations dut_mode_fops = {
125 .read = dut_mode_read,
126 .write = dut_mode_write,
127 .llseek = default_llseek,
130 static int features_show(struct seq_file *f, void *ptr)
132 struct hci_dev *hdev = f->private;
136 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
137 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
138 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
139 hdev->features[p][0], hdev->features[p][1],
140 hdev->features[p][2], hdev->features[p][3],
141 hdev->features[p][4], hdev->features[p][5],
142 hdev->features[p][6], hdev->features[p][7]);
144 if (lmp_le_capable(hdev))
145 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
146 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
147 hdev->le_features[0], hdev->le_features[1],
148 hdev->le_features[2], hdev->le_features[3],
149 hdev->le_features[4], hdev->le_features[5],
150 hdev->le_features[6], hdev->le_features[7]);
151 hci_dev_unlock(hdev);
156 static int features_open(struct inode *inode, struct file *file)
158 return single_open(file, features_show, inode->i_private);
161 static const struct file_operations features_fops = {
162 .open = features_open,
165 .release = single_release,
168 static int blacklist_show(struct seq_file *f, void *p)
170 struct hci_dev *hdev = f->private;
171 struct bdaddr_list *b;
174 list_for_each_entry(b, &hdev->blacklist, list)
175 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
176 hci_dev_unlock(hdev);
181 static int blacklist_open(struct inode *inode, struct file *file)
183 return single_open(file, blacklist_show, inode->i_private);
186 static const struct file_operations blacklist_fops = {
187 .open = blacklist_open,
190 .release = single_release,
193 static int uuids_show(struct seq_file *f, void *p)
195 struct hci_dev *hdev = f->private;
196 struct bt_uuid *uuid;
199 list_for_each_entry(uuid, &hdev->uuids, list) {
202 /* The Bluetooth UUID values are stored in big endian,
203 * but with reversed byte order. So convert them into
204 * the right order for the %pUb modifier.
206 for (i = 0; i < 16; i++)
207 val[i] = uuid->uuid[15 - i];
209 seq_printf(f, "%pUb\n", val);
211 hci_dev_unlock(hdev);
216 static int uuids_open(struct inode *inode, struct file *file)
218 return single_open(file, uuids_show, inode->i_private);
221 static const struct file_operations uuids_fops = {
225 .release = single_release,
228 static int inquiry_cache_show(struct seq_file *f, void *p)
230 struct hci_dev *hdev = f->private;
231 struct discovery_state *cache = &hdev->discovery;
232 struct inquiry_entry *e;
236 list_for_each_entry(e, &cache->all, all) {
237 struct inquiry_data *data = &e->data;
238 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
240 data->pscan_rep_mode, data->pscan_period_mode,
241 data->pscan_mode, data->dev_class[2],
242 data->dev_class[1], data->dev_class[0],
243 __le16_to_cpu(data->clock_offset),
244 data->rssi, data->ssp_mode, e->timestamp);
247 hci_dev_unlock(hdev);
252 static int inquiry_cache_open(struct inode *inode, struct file *file)
254 return single_open(file, inquiry_cache_show, inode->i_private);
257 static const struct file_operations inquiry_cache_fops = {
258 .open = inquiry_cache_open,
261 .release = single_release,
264 static int link_keys_show(struct seq_file *f, void *ptr)
266 struct hci_dev *hdev = f->private;
267 struct list_head *p, *n;
270 list_for_each_safe(p, n, &hdev->link_keys) {
271 struct link_key *key = list_entry(p, struct link_key, list);
272 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
273 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
275 hci_dev_unlock(hdev);
280 static int link_keys_open(struct inode *inode, struct file *file)
282 return single_open(file, link_keys_show, inode->i_private);
285 static const struct file_operations link_keys_fops = {
286 .open = link_keys_open,
289 .release = single_release,
292 static int dev_class_show(struct seq_file *f, void *ptr)
294 struct hci_dev *hdev = f->private;
297 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
298 hdev->dev_class[1], hdev->dev_class[0]);
299 hci_dev_unlock(hdev);
304 static int dev_class_open(struct inode *inode, struct file *file)
306 return single_open(file, dev_class_show, inode->i_private);
309 static const struct file_operations dev_class_fops = {
310 .open = dev_class_open,
313 .release = single_release,
316 static int voice_setting_get(void *data, u64 *val)
318 struct hci_dev *hdev = data;
321 *val = hdev->voice_setting;
322 hci_dev_unlock(hdev);
327 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
328 NULL, "0x%4.4llx\n");
330 static int auto_accept_delay_set(void *data, u64 val)
332 struct hci_dev *hdev = data;
335 hdev->auto_accept_delay = val;
336 hci_dev_unlock(hdev);
341 static int auto_accept_delay_get(void *data, u64 *val)
343 struct hci_dev *hdev = data;
346 *val = hdev->auto_accept_delay;
347 hci_dev_unlock(hdev);
352 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
353 auto_accept_delay_set, "%llu\n");
355 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
356 size_t count, loff_t *ppos)
358 struct hci_dev *hdev = file->private_data;
361 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
364 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
367 static ssize_t force_sc_support_write(struct file *file,
368 const char __user *user_buf,
369 size_t count, loff_t *ppos)
371 struct hci_dev *hdev = file->private_data;
373 size_t buf_size = min(count, (sizeof(buf)-1));
376 if (test_bit(HCI_UP, &hdev->flags))
379 if (copy_from_user(buf, user_buf, buf_size))
382 buf[buf_size] = '\0';
383 if (strtobool(buf, &enable))
386 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
389 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
394 static const struct file_operations force_sc_support_fops = {
396 .read = force_sc_support_read,
397 .write = force_sc_support_write,
398 .llseek = default_llseek,
401 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
402 size_t count, loff_t *ppos)
404 struct hci_dev *hdev = file->private_data;
407 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
410 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413 static const struct file_operations sc_only_mode_fops = {
415 .read = sc_only_mode_read,
416 .llseek = default_llseek,
419 static int idle_timeout_set(void *data, u64 val)
421 struct hci_dev *hdev = data;
423 if (val != 0 && (val < 500 || val > 3600000))
427 hdev->idle_timeout = val;
428 hci_dev_unlock(hdev);
433 static int idle_timeout_get(void *data, u64 *val)
435 struct hci_dev *hdev = data;
438 *val = hdev->idle_timeout;
439 hci_dev_unlock(hdev);
444 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
445 idle_timeout_set, "%llu\n");
447 static int rpa_timeout_set(void *data, u64 val)
449 struct hci_dev *hdev = data;
451 /* Require the RPA timeout to be at least 30 seconds and at most
454 if (val < 30 || val > (60 * 60 * 24))
458 hdev->rpa_timeout = val;
459 hci_dev_unlock(hdev);
464 static int rpa_timeout_get(void *data, u64 *val)
466 struct hci_dev *hdev = data;
469 *val = hdev->rpa_timeout;
470 hci_dev_unlock(hdev);
475 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
476 rpa_timeout_set, "%llu\n");
478 static int sniff_min_interval_set(void *data, u64 val)
480 struct hci_dev *hdev = data;
482 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
486 hdev->sniff_min_interval = val;
487 hci_dev_unlock(hdev);
492 static int sniff_min_interval_get(void *data, u64 *val)
494 struct hci_dev *hdev = data;
497 *val = hdev->sniff_min_interval;
498 hci_dev_unlock(hdev);
503 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
504 sniff_min_interval_set, "%llu\n");
506 static int sniff_max_interval_set(void *data, u64 val)
508 struct hci_dev *hdev = data;
510 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
514 hdev->sniff_max_interval = val;
515 hci_dev_unlock(hdev);
520 static int sniff_max_interval_get(void *data, u64 *val)
522 struct hci_dev *hdev = data;
525 *val = hdev->sniff_max_interval;
526 hci_dev_unlock(hdev);
531 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
532 sniff_max_interval_set, "%llu\n");
534 static int conn_info_min_age_set(void *data, u64 val)
536 struct hci_dev *hdev = data;
538 if (val == 0 || val > hdev->conn_info_max_age)
542 hdev->conn_info_min_age = val;
543 hci_dev_unlock(hdev);
548 static int conn_info_min_age_get(void *data, u64 *val)
550 struct hci_dev *hdev = data;
553 *val = hdev->conn_info_min_age;
554 hci_dev_unlock(hdev);
559 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
560 conn_info_min_age_set, "%llu\n");
562 static int conn_info_max_age_set(void *data, u64 val)
564 struct hci_dev *hdev = data;
566 if (val == 0 || val < hdev->conn_info_min_age)
570 hdev->conn_info_max_age = val;
571 hci_dev_unlock(hdev);
576 static int conn_info_max_age_get(void *data, u64 *val)
578 struct hci_dev *hdev = data;
581 *val = hdev->conn_info_max_age;
582 hci_dev_unlock(hdev);
587 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
588 conn_info_max_age_set, "%llu\n");
590 static int identity_show(struct seq_file *f, void *p)
592 struct hci_dev *hdev = f->private;
598 hci_copy_identity_address(hdev, &addr, &addr_type);
600 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
601 16, hdev->irk, &hdev->rpa);
603 hci_dev_unlock(hdev);
608 static int identity_open(struct inode *inode, struct file *file)
610 return single_open(file, identity_show, inode->i_private);
613 static const struct file_operations identity_fops = {
614 .open = identity_open,
617 .release = single_release,
620 static int random_address_show(struct seq_file *f, void *p)
622 struct hci_dev *hdev = f->private;
625 seq_printf(f, "%pMR\n", &hdev->random_addr);
626 hci_dev_unlock(hdev);
631 static int random_address_open(struct inode *inode, struct file *file)
633 return single_open(file, random_address_show, inode->i_private);
636 static const struct file_operations random_address_fops = {
637 .open = random_address_open,
640 .release = single_release,
643 static int static_address_show(struct seq_file *f, void *p)
645 struct hci_dev *hdev = f->private;
648 seq_printf(f, "%pMR\n", &hdev->static_addr);
649 hci_dev_unlock(hdev);
654 static int static_address_open(struct inode *inode, struct file *file)
656 return single_open(file, static_address_show, inode->i_private);
659 static const struct file_operations static_address_fops = {
660 .open = static_address_open,
663 .release = single_release,
666 static ssize_t force_static_address_read(struct file *file,
667 char __user *user_buf,
668 size_t count, loff_t *ppos)
670 struct hci_dev *hdev = file->private_data;
673 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
676 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
679 static ssize_t force_static_address_write(struct file *file,
680 const char __user *user_buf,
681 size_t count, loff_t *ppos)
683 struct hci_dev *hdev = file->private_data;
685 size_t buf_size = min(count, (sizeof(buf)-1));
688 if (test_bit(HCI_UP, &hdev->flags))
691 if (copy_from_user(buf, user_buf, buf_size))
694 buf[buf_size] = '\0';
695 if (strtobool(buf, &enable))
698 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
701 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
706 static const struct file_operations force_static_address_fops = {
708 .read = force_static_address_read,
709 .write = force_static_address_write,
710 .llseek = default_llseek,
713 static int white_list_show(struct seq_file *f, void *ptr)
715 struct hci_dev *hdev = f->private;
716 struct bdaddr_list *b;
719 list_for_each_entry(b, &hdev->le_white_list, list)
720 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
721 hci_dev_unlock(hdev);
726 static int white_list_open(struct inode *inode, struct file *file)
728 return single_open(file, white_list_show, inode->i_private);
731 static const struct file_operations white_list_fops = {
732 .open = white_list_open,
735 .release = single_release,
738 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
740 struct hci_dev *hdev = f->private;
741 struct list_head *p, *n;
744 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
745 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
746 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
747 &irk->bdaddr, irk->addr_type,
748 16, irk->val, &irk->rpa);
750 hci_dev_unlock(hdev);
755 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
757 return single_open(file, identity_resolving_keys_show,
761 static const struct file_operations identity_resolving_keys_fops = {
762 .open = identity_resolving_keys_open,
765 .release = single_release,
768 static int long_term_keys_show(struct seq_file *f, void *ptr)
770 struct hci_dev *hdev = f->private;
771 struct list_head *p, *n;
774 list_for_each_safe(p, n, &hdev->long_term_keys) {
775 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
776 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
777 <k->bdaddr, ltk->bdaddr_type, ltk->authenticated,
778 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
779 __le64_to_cpu(ltk->rand), 16, ltk->val);
781 hci_dev_unlock(hdev);
786 static int long_term_keys_open(struct inode *inode, struct file *file)
788 return single_open(file, long_term_keys_show, inode->i_private);
791 static const struct file_operations long_term_keys_fops = {
792 .open = long_term_keys_open,
795 .release = single_release,
798 static int conn_min_interval_set(void *data, u64 val)
800 struct hci_dev *hdev = data;
802 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
806 hdev->le_conn_min_interval = val;
807 hci_dev_unlock(hdev);
812 static int conn_min_interval_get(void *data, u64 *val)
814 struct hci_dev *hdev = data;
817 *val = hdev->le_conn_min_interval;
818 hci_dev_unlock(hdev);
823 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
824 conn_min_interval_set, "%llu\n");
826 static int conn_max_interval_set(void *data, u64 val)
828 struct hci_dev *hdev = data;
830 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
834 hdev->le_conn_max_interval = val;
835 hci_dev_unlock(hdev);
840 static int conn_max_interval_get(void *data, u64 *val)
842 struct hci_dev *hdev = data;
845 *val = hdev->le_conn_max_interval;
846 hci_dev_unlock(hdev);
851 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
852 conn_max_interval_set, "%llu\n");
854 static int adv_channel_map_set(void *data, u64 val)
856 struct hci_dev *hdev = data;
858 if (val < 0x01 || val > 0x07)
862 hdev->le_adv_channel_map = val;
863 hci_dev_unlock(hdev);
868 static int adv_channel_map_get(void *data, u64 *val)
870 struct hci_dev *hdev = data;
873 *val = hdev->le_adv_channel_map;
874 hci_dev_unlock(hdev);
879 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
880 adv_channel_map_set, "%llu\n");
882 static int le_auto_conn_show(struct seq_file *sf, void *ptr)
884 struct hci_dev *hdev = sf->private;
885 struct hci_conn_params *p;
889 list_for_each_entry(p, &hdev->le_conn_params, list) {
890 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
894 hci_dev_unlock(hdev);
899 static int le_auto_conn_open(struct inode *inode, struct file *file)
901 return single_open(file, le_auto_conn_show, inode->i_private);
904 static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
905 size_t count, loff_t *offset)
907 struct seq_file *sf = file->private_data;
908 struct hci_dev *hdev = sf->private;
916 /* Don't allow partial write */
923 buf = memdup_user(data, count);
927 if (memcmp(buf, "add", 3) == 0) {
928 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
929 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
930 &addr.b[1], &addr.b[0], &addr_type,
939 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
940 hdev->le_conn_min_interval,
941 hdev->le_conn_max_interval);
942 hci_dev_unlock(hdev);
946 } else if (memcmp(buf, "del", 3) == 0) {
947 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
948 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
949 &addr.b[1], &addr.b[0], &addr_type);
957 hci_conn_params_del(hdev, &addr, addr_type);
958 hci_dev_unlock(hdev);
959 } else if (memcmp(buf, "clr", 3) == 0) {
961 hci_conn_params_clear(hdev);
962 hci_pend_le_conns_clear(hdev);
963 hci_update_background_scan(hdev);
964 hci_dev_unlock(hdev);
978 static const struct file_operations le_auto_conn_fops = {
979 .open = le_auto_conn_open,
981 .write = le_auto_conn_write,
983 .release = single_release,
986 /* ---- HCI requests ---- */
988 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
990 BT_DBG("%s result 0x%2.2x", hdev->name, result);
992 if (hdev->req_status == HCI_REQ_PEND) {
993 hdev->req_result = result;
994 hdev->req_status = HCI_REQ_DONE;
995 wake_up_interruptible(&hdev->req_wait_q);
999 static void hci_req_cancel(struct hci_dev *hdev, int err)
1001 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1003 if (hdev->req_status == HCI_REQ_PEND) {
1004 hdev->req_result = err;
1005 hdev->req_status = HCI_REQ_CANCELED;
1006 wake_up_interruptible(&hdev->req_wait_q);
1010 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1013 struct hci_ev_cmd_complete *ev;
1014 struct hci_event_hdr *hdr;
1015 struct sk_buff *skb;
1019 skb = hdev->recv_evt;
1020 hdev->recv_evt = NULL;
1022 hci_dev_unlock(hdev);
1025 return ERR_PTR(-ENODATA);
1027 if (skb->len < sizeof(*hdr)) {
1028 BT_ERR("Too short HCI event");
1032 hdr = (void *) skb->data;
1033 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1036 if (hdr->evt != event)
1041 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1042 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1046 if (skb->len < sizeof(*ev)) {
1047 BT_ERR("Too short cmd_complete event");
1051 ev = (void *) skb->data;
1052 skb_pull(skb, sizeof(*ev));
1054 if (opcode == __le16_to_cpu(ev->opcode))
1057 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1058 __le16_to_cpu(ev->opcode));
1062 return ERR_PTR(-ENODATA);
1065 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1066 const void *param, u8 event, u32 timeout)
1068 DECLARE_WAITQUEUE(wait, current);
1069 struct hci_request req;
1072 BT_DBG("%s", hdev->name);
1074 hci_req_init(&req, hdev);
1076 hci_req_add_ev(&req, opcode, plen, param, event);
1078 hdev->req_status = HCI_REQ_PEND;
1080 err = hci_req_run(&req, hci_req_sync_complete);
1082 return ERR_PTR(err);
1084 add_wait_queue(&hdev->req_wait_q, &wait);
1085 set_current_state(TASK_INTERRUPTIBLE);
1087 schedule_timeout(timeout);
1089 remove_wait_queue(&hdev->req_wait_q, &wait);
1091 if (signal_pending(current))
1092 return ERR_PTR(-EINTR);
1094 switch (hdev->req_status) {
1096 err = -bt_to_errno(hdev->req_result);
1099 case HCI_REQ_CANCELED:
1100 err = -hdev->req_result;
1108 hdev->req_status = hdev->req_result = 0;
1110 BT_DBG("%s end: err %d", hdev->name, err);
1113 return ERR_PTR(err);
1115 return hci_get_cmd_complete(hdev, opcode, event);
1117 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1119 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1120 const void *param, u32 timeout)
1122 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1124 EXPORT_SYMBOL(__hci_cmd_sync);
1126 /* Execute request and wait for completion. */
1127 static int __hci_req_sync(struct hci_dev *hdev,
1128 void (*func)(struct hci_request *req,
1130 unsigned long opt, __u32 timeout)
1132 struct hci_request req;
1133 DECLARE_WAITQUEUE(wait, current);
1136 BT_DBG("%s start", hdev->name);
1138 hci_req_init(&req, hdev);
1140 hdev->req_status = HCI_REQ_PEND;
1144 err = hci_req_run(&req, hci_req_sync_complete);
1146 hdev->req_status = 0;
1148 /* ENODATA means the HCI request command queue is empty.
1149 * This can happen when a request with conditionals doesn't
1150 * trigger any commands to be sent. This is normal behavior
1151 * and should not trigger an error return.
1153 if (err == -ENODATA)
1159 add_wait_queue(&hdev->req_wait_q, &wait);
1160 set_current_state(TASK_INTERRUPTIBLE);
1162 schedule_timeout(timeout);
1164 remove_wait_queue(&hdev->req_wait_q, &wait);
1166 if (signal_pending(current))
1169 switch (hdev->req_status) {
1171 err = -bt_to_errno(hdev->req_result);
1174 case HCI_REQ_CANCELED:
1175 err = -hdev->req_result;
1183 hdev->req_status = hdev->req_result = 0;
1185 BT_DBG("%s end: err %d", hdev->name, err);
1190 static int hci_req_sync(struct hci_dev *hdev,
1191 void (*req)(struct hci_request *req,
1193 unsigned long opt, __u32 timeout)
1197 if (!test_bit(HCI_UP, &hdev->flags))
1200 /* Serialize all requests */
1202 ret = __hci_req_sync(hdev, req, opt, timeout);
1203 hci_req_unlock(hdev);
1208 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1210 BT_DBG("%s %ld", req->hdev->name, opt);
1213 set_bit(HCI_RESET, &req->hdev->flags);
1214 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1217 static void bredr_init(struct hci_request *req)
1219 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1221 /* Read Local Supported Features */
1222 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1224 /* Read Local Version */
1225 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1227 /* Read BD Address */
1228 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1231 static void amp_init(struct hci_request *req)
1233 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1235 /* Read Local Version */
1236 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1238 /* Read Local Supported Commands */
1239 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1241 /* Read Local Supported Features */
1242 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1244 /* Read Local AMP Info */
1245 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1247 /* Read Data Blk size */
1248 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1250 /* Read Flow Control Mode */
1251 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1253 /* Read Location Data */
1254 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1257 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1259 struct hci_dev *hdev = req->hdev;
1261 BT_DBG("%s %ld", hdev->name, opt);
1264 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1265 hci_reset_req(req, 0);
1267 switch (hdev->dev_type) {
1277 BT_ERR("Unknown device type %d", hdev->dev_type);
1282 static void bredr_setup(struct hci_request *req)
1284 struct hci_dev *hdev = req->hdev;
1289 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1290 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1292 /* Read Class of Device */
1293 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1295 /* Read Local Name */
1296 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1298 /* Read Voice Setting */
1299 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1301 /* Read Number of Supported IAC */
1302 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1304 /* Read Current IAC LAP */
1305 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1307 /* Clear Event Filters */
1308 flt_type = HCI_FLT_CLEAR_ALL;
1309 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1311 /* Connection accept timeout ~20 secs */
1312 param = cpu_to_le16(0x7d00);
1313 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
1315 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1316 * but it does not support page scan related HCI commands.
1318 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1319 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1320 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1324 static void le_setup(struct hci_request *req)
1326 struct hci_dev *hdev = req->hdev;
1328 /* Read LE Buffer Size */
1329 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1331 /* Read LE Local Supported Features */
1332 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1334 /* Read LE Supported States */
1335 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1337 /* Read LE Advertising Channel TX Power */
1338 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1340 /* Read LE White List Size */
1341 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1343 /* Clear LE White List */
1344 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1346 /* LE-only controllers have LE implicitly enabled */
1347 if (!lmp_bredr_capable(hdev))
1348 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1351 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1353 if (lmp_ext_inq_capable(hdev))
1356 if (lmp_inq_rssi_capable(hdev))
1359 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1360 hdev->lmp_subver == 0x0757)
1363 if (hdev->manufacturer == 15) {
1364 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1366 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1368 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1372 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1373 hdev->lmp_subver == 0x1805)
1379 static void hci_setup_inquiry_mode(struct hci_request *req)
1383 mode = hci_get_inquiry_mode(req->hdev);
1385 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1388 static void hci_setup_event_mask(struct hci_request *req)
1390 struct hci_dev *hdev = req->hdev;
1392 /* The second byte is 0xff instead of 0x9f (two reserved bits
1393 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1394 * command otherwise.
1396 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1398 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1399 * any event mask for pre 1.2 devices.
1401 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1404 if (lmp_bredr_capable(hdev)) {
1405 events[4] |= 0x01; /* Flow Specification Complete */
1406 events[4] |= 0x02; /* Inquiry Result with RSSI */
1407 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1408 events[5] |= 0x08; /* Synchronous Connection Complete */
1409 events[5] |= 0x10; /* Synchronous Connection Changed */
1411 /* Use a different default for LE-only devices */
1412 memset(events, 0, sizeof(events));
1413 events[0] |= 0x10; /* Disconnection Complete */
1414 events[0] |= 0x80; /* Encryption Change */
1415 events[1] |= 0x08; /* Read Remote Version Information Complete */
1416 events[1] |= 0x20; /* Command Complete */
1417 events[1] |= 0x40; /* Command Status */
1418 events[1] |= 0x80; /* Hardware Error */
1419 events[2] |= 0x04; /* Number of Completed Packets */
1420 events[3] |= 0x02; /* Data Buffer Overflow */
1421 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1424 if (lmp_inq_rssi_capable(hdev))
1425 events[4] |= 0x02; /* Inquiry Result with RSSI */
1427 if (lmp_sniffsubr_capable(hdev))
1428 events[5] |= 0x20; /* Sniff Subrating */
1430 if (lmp_pause_enc_capable(hdev))
1431 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1433 if (lmp_ext_inq_capable(hdev))
1434 events[5] |= 0x40; /* Extended Inquiry Result */
1436 if (lmp_no_flush_capable(hdev))
1437 events[7] |= 0x01; /* Enhanced Flush Complete */
1439 if (lmp_lsto_capable(hdev))
1440 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1442 if (lmp_ssp_capable(hdev)) {
1443 events[6] |= 0x01; /* IO Capability Request */
1444 events[6] |= 0x02; /* IO Capability Response */
1445 events[6] |= 0x04; /* User Confirmation Request */
1446 events[6] |= 0x08; /* User Passkey Request */
1447 events[6] |= 0x10; /* Remote OOB Data Request */
1448 events[6] |= 0x20; /* Simple Pairing Complete */
1449 events[7] |= 0x04; /* User Passkey Notification */
1450 events[7] |= 0x08; /* Keypress Notification */
1451 events[7] |= 0x10; /* Remote Host Supported
1452 * Features Notification
1456 if (lmp_le_capable(hdev))
1457 events[7] |= 0x20; /* LE Meta-Event */
1459 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1461 if (lmp_le_capable(hdev)) {
1462 memset(events, 0, sizeof(events));
1464 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1465 sizeof(events), events);
1469 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1471 struct hci_dev *hdev = req->hdev;
1473 if (lmp_bredr_capable(hdev))
1476 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1478 if (lmp_le_capable(hdev))
1481 hci_setup_event_mask(req);
1483 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1484 * local supported commands HCI command.
1486 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1487 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1489 if (lmp_ssp_capable(hdev)) {
1490 /* When SSP is available, then the host features page
1491 * should also be available as well. However some
1492 * controllers list the max_page as 0 as long as SSP
1493 * has not been enabled. To achieve proper debugging
1494 * output, force the minimum max_page to 1 at least.
1496 hdev->max_page = 0x01;
1498 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1500 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1501 sizeof(mode), &mode);
1503 struct hci_cp_write_eir cp;
1505 memset(hdev->eir, 0, sizeof(hdev->eir));
1506 memset(&cp, 0, sizeof(cp));
1508 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1512 if (lmp_inq_rssi_capable(hdev))
1513 hci_setup_inquiry_mode(req);
1515 if (lmp_inq_tx_pwr_capable(hdev))
1516 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1518 if (lmp_ext_feat_capable(hdev)) {
1519 struct hci_cp_read_local_ext_features cp;
1522 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1526 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1528 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1533 static void hci_setup_link_policy(struct hci_request *req)
1535 struct hci_dev *hdev = req->hdev;
1536 struct hci_cp_write_def_link_policy cp;
1537 u16 link_policy = 0;
1539 if (lmp_rswitch_capable(hdev))
1540 link_policy |= HCI_LP_RSWITCH;
1541 if (lmp_hold_capable(hdev))
1542 link_policy |= HCI_LP_HOLD;
1543 if (lmp_sniff_capable(hdev))
1544 link_policy |= HCI_LP_SNIFF;
1545 if (lmp_park_capable(hdev))
1546 link_policy |= HCI_LP_PARK;
1548 cp.policy = cpu_to_le16(link_policy);
1549 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1552 static void hci_set_le_support(struct hci_request *req)
1554 struct hci_dev *hdev = req->hdev;
1555 struct hci_cp_write_le_host_supported cp;
1557 /* LE-only devices do not support explicit enablement */
1558 if (!lmp_bredr_capable(hdev))
1561 memset(&cp, 0, sizeof(cp));
1563 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1565 cp.simul = lmp_le_br_capable(hdev);
1568 if (cp.le != lmp_host_le_capable(hdev))
1569 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1573 static void hci_set_event_mask_page_2(struct hci_request *req)
1575 struct hci_dev *hdev = req->hdev;
1576 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1578 /* If Connectionless Slave Broadcast master role is supported
1579 * enable all necessary events for it.
1581 if (lmp_csb_master_capable(hdev)) {
1582 events[1] |= 0x40; /* Triggered Clock Capture */
1583 events[1] |= 0x80; /* Synchronization Train Complete */
1584 events[2] |= 0x10; /* Slave Page Response Timeout */
1585 events[2] |= 0x20; /* CSB Channel Map Change */
1588 /* If Connectionless Slave Broadcast slave role is supported
1589 * enable all necessary events for it.
1591 if (lmp_csb_slave_capable(hdev)) {
1592 events[2] |= 0x01; /* Synchronization Train Received */
1593 events[2] |= 0x02; /* CSB Receive */
1594 events[2] |= 0x04; /* CSB Timeout */
1595 events[2] |= 0x08; /* Truncated Page Complete */
1598 /* Enable Authenticated Payload Timeout Expired event if supported */
1599 if (lmp_ping_capable(hdev))
1602 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1605 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1607 struct hci_dev *hdev = req->hdev;
1610 /* Some Broadcom based Bluetooth controllers do not support the
1611 * Delete Stored Link Key command. They are clearly indicating its
1612 * absence in the bit mask of supported commands.
1614 * Check the supported commands and only if the the command is marked
1615 * as supported send it. If not supported assume that the controller
1616 * does not have actual support for stored link keys which makes this
1617 * command redundant anyway.
1619 * Some controllers indicate that they support handling deleting
1620 * stored link keys, but they don't. The quirk lets a driver
1621 * just disable this command.
1623 if (hdev->commands[6] & 0x80 &&
1624 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1625 struct hci_cp_delete_stored_link_key cp;
1627 bacpy(&cp.bdaddr, BDADDR_ANY);
1628 cp.delete_all = 0x01;
1629 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1633 if (hdev->commands[5] & 0x10)
1634 hci_setup_link_policy(req);
1636 if (lmp_le_capable(hdev))
1637 hci_set_le_support(req);
1639 /* Read features beyond page 1 if available */
1640 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1641 struct hci_cp_read_local_ext_features cp;
1644 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1649 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1651 struct hci_dev *hdev = req->hdev;
1653 /* Set event mask page 2 if the HCI command for it is supported */
1654 if (hdev->commands[22] & 0x04)
1655 hci_set_event_mask_page_2(req);
1657 /* Check for Synchronization Train support */
1658 if (lmp_sync_train_capable(hdev))
1659 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1661 /* Enable Secure Connections if supported and configured */
1662 if ((lmp_sc_capable(hdev) ||
1663 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1664 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1666 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1667 sizeof(support), &support);
1671 static int __hci_init(struct hci_dev *hdev)
1675 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1679 /* The Device Under Test (DUT) mode is special and available for
1680 * all controller types. So just create it early on.
1682 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1683 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1687 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1688 * BR/EDR/LE type controllers. AMP controllers only need the
1691 if (hdev->dev_type != HCI_BREDR)
1694 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1698 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1702 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1706 /* Only create debugfs entries during the initial setup
1707 * phase and not every time the controller gets powered on.
1709 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1712 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1714 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1715 &hdev->manufacturer);
1716 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1717 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1718 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1720 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1722 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1723 &conn_info_min_age_fops);
1724 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1725 &conn_info_max_age_fops);
1727 if (lmp_bredr_capable(hdev)) {
1728 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1729 hdev, &inquiry_cache_fops);
1730 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1731 hdev, &link_keys_fops);
1732 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1733 hdev, &dev_class_fops);
1734 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1735 hdev, &voice_setting_fops);
1738 if (lmp_ssp_capable(hdev)) {
1739 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1740 hdev, &auto_accept_delay_fops);
1741 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1742 hdev, &force_sc_support_fops);
1743 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1744 hdev, &sc_only_mode_fops);
1747 if (lmp_sniff_capable(hdev)) {
1748 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1749 hdev, &idle_timeout_fops);
1750 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1751 hdev, &sniff_min_interval_fops);
1752 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1753 hdev, &sniff_max_interval_fops);
1756 if (lmp_le_capable(hdev)) {
1757 debugfs_create_file("identity", 0400, hdev->debugfs,
1758 hdev, &identity_fops);
1759 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1760 hdev, &rpa_timeout_fops);
1761 debugfs_create_file("random_address", 0444, hdev->debugfs,
1762 hdev, &random_address_fops);
1763 debugfs_create_file("static_address", 0444, hdev->debugfs,
1764 hdev, &static_address_fops);
1766 /* For controllers with a public address, provide a debug
1767 * option to force the usage of the configured static
1768 * address. By default the public address is used.
1770 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1771 debugfs_create_file("force_static_address", 0644,
1772 hdev->debugfs, hdev,
1773 &force_static_address_fops);
1775 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1776 &hdev->le_white_list_size);
1777 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1779 debugfs_create_file("identity_resolving_keys", 0400,
1780 hdev->debugfs, hdev,
1781 &identity_resolving_keys_fops);
1782 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1783 hdev, &long_term_keys_fops);
1784 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1785 hdev, &conn_min_interval_fops);
1786 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1787 hdev, &conn_max_interval_fops);
1788 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1789 hdev, &adv_channel_map_fops);
1790 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1791 &le_auto_conn_fops);
1792 debugfs_create_u16("discov_interleaved_timeout", 0644,
1794 &hdev->discov_interleaved_timeout);
1800 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1804 BT_DBG("%s %x", req->hdev->name, scan);
1806 /* Inquiry and Page scans */
1807 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1810 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1814 BT_DBG("%s %x", req->hdev->name, auth);
1816 /* Authentication */
1817 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1820 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1824 BT_DBG("%s %x", req->hdev->name, encrypt);
1827 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1830 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1832 __le16 policy = cpu_to_le16(opt);
1834 BT_DBG("%s %x", req->hdev->name, policy);
1836 /* Default link policy */
1837 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1840 /* Get HCI device by index.
1841 * Device is held on return. */
1842 struct hci_dev *hci_dev_get(int index)
1844 struct hci_dev *hdev = NULL, *d;
1846 BT_DBG("%d", index);
1851 read_lock(&hci_dev_list_lock);
1852 list_for_each_entry(d, &hci_dev_list, list) {
1853 if (d->id == index) {
1854 hdev = hci_dev_hold(d);
1858 read_unlock(&hci_dev_list_lock);
1862 /* ---- Inquiry support ---- */
1864 bool hci_discovery_active(struct hci_dev *hdev)
1866 struct discovery_state *discov = &hdev->discovery;
1868 switch (discov->state) {
1869 case DISCOVERY_FINDING:
1870 case DISCOVERY_RESOLVING:
1878 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1880 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1882 if (hdev->discovery.state == state)
1886 case DISCOVERY_STOPPED:
1887 hci_update_background_scan(hdev);
1889 if (hdev->discovery.state != DISCOVERY_STARTING)
1890 mgmt_discovering(hdev, 0);
1892 case DISCOVERY_STARTING:
1894 case DISCOVERY_FINDING:
1895 mgmt_discovering(hdev, 1);
1897 case DISCOVERY_RESOLVING:
1899 case DISCOVERY_STOPPING:
1903 hdev->discovery.state = state;
1906 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1908 struct discovery_state *cache = &hdev->discovery;
1909 struct inquiry_entry *p, *n;
1911 list_for_each_entry_safe(p, n, &cache->all, all) {
1916 INIT_LIST_HEAD(&cache->unknown);
1917 INIT_LIST_HEAD(&cache->resolve);
1920 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1923 struct discovery_state *cache = &hdev->discovery;
1924 struct inquiry_entry *e;
1926 BT_DBG("cache %p, %pMR", cache, bdaddr);
1928 list_for_each_entry(e, &cache->all, all) {
1929 if (!bacmp(&e->data.bdaddr, bdaddr))
1936 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1939 struct discovery_state *cache = &hdev->discovery;
1940 struct inquiry_entry *e;
1942 BT_DBG("cache %p, %pMR", cache, bdaddr);
1944 list_for_each_entry(e, &cache->unknown, list) {
1945 if (!bacmp(&e->data.bdaddr, bdaddr))
1952 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1956 struct discovery_state *cache = &hdev->discovery;
1957 struct inquiry_entry *e;
1959 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1961 list_for_each_entry(e, &cache->resolve, list) {
1962 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1964 if (!bacmp(&e->data.bdaddr, bdaddr))
1971 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1972 struct inquiry_entry *ie)
1974 struct discovery_state *cache = &hdev->discovery;
1975 struct list_head *pos = &cache->resolve;
1976 struct inquiry_entry *p;
1978 list_del(&ie->list);
1980 list_for_each_entry(p, &cache->resolve, list) {
1981 if (p->name_state != NAME_PENDING &&
1982 abs(p->data.rssi) >= abs(ie->data.rssi))
1987 list_add(&ie->list, pos);
1990 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1991 bool name_known, bool *ssp)
1993 struct discovery_state *cache = &hdev->discovery;
1994 struct inquiry_entry *ie;
1996 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1998 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2000 *ssp = data->ssp_mode;
2002 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2004 if (ie->data.ssp_mode)
2007 if (ie->name_state == NAME_NEEDED &&
2008 data->rssi != ie->data.rssi) {
2009 ie->data.rssi = data->rssi;
2010 hci_inquiry_cache_update_resolve(hdev, ie);
2016 /* Entry not in the cache. Add new one. */
2017 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2021 list_add(&ie->all, &cache->all);
2024 ie->name_state = NAME_KNOWN;
2026 ie->name_state = NAME_NOT_KNOWN;
2027 list_add(&ie->list, &cache->unknown);
2031 if (name_known && ie->name_state != NAME_KNOWN &&
2032 ie->name_state != NAME_PENDING) {
2033 ie->name_state = NAME_KNOWN;
2034 list_del(&ie->list);
2037 memcpy(&ie->data, data, sizeof(*data));
2038 ie->timestamp = jiffies;
2039 cache->timestamp = jiffies;
2041 if (ie->name_state == NAME_NOT_KNOWN)
2047 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2049 struct discovery_state *cache = &hdev->discovery;
2050 struct inquiry_info *info = (struct inquiry_info *) buf;
2051 struct inquiry_entry *e;
2054 list_for_each_entry(e, &cache->all, all) {
2055 struct inquiry_data *data = &e->data;
2060 bacpy(&info->bdaddr, &data->bdaddr);
2061 info->pscan_rep_mode = data->pscan_rep_mode;
2062 info->pscan_period_mode = data->pscan_period_mode;
2063 info->pscan_mode = data->pscan_mode;
2064 memcpy(info->dev_class, data->dev_class, 3);
2065 info->clock_offset = data->clock_offset;
2071 BT_DBG("cache %p, copied %d", cache, copied);
2075 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2077 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2078 struct hci_dev *hdev = req->hdev;
2079 struct hci_cp_inquiry cp;
2081 BT_DBG("%s", hdev->name);
2083 if (test_bit(HCI_INQUIRY, &hdev->flags))
2087 memcpy(&cp.lap, &ir->lap, 3);
2088 cp.length = ir->length;
2089 cp.num_rsp = ir->num_rsp;
2090 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2093 static int wait_inquiry(void *word)
2096 return signal_pending(current);
2099 int hci_inquiry(void __user *arg)
2101 __u8 __user *ptr = arg;
2102 struct hci_inquiry_req ir;
2103 struct hci_dev *hdev;
2104 int err = 0, do_inquiry = 0, max_rsp;
2108 if (copy_from_user(&ir, ptr, sizeof(ir)))
2111 hdev = hci_dev_get(ir.dev_id);
2115 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2120 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2125 if (hdev->dev_type != HCI_BREDR) {
2130 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2136 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2137 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2138 hci_inquiry_cache_flush(hdev);
2141 hci_dev_unlock(hdev);
2143 timeo = ir.length * msecs_to_jiffies(2000);
2146 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2151 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2152 * cleared). If it is interrupted by a signal, return -EINTR.
2154 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2155 TASK_INTERRUPTIBLE))
2159 /* for unlimited number of responses we will use buffer with
2162 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2164 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2165 * copy it to the user space.
2167 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2174 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2175 hci_dev_unlock(hdev);
2177 BT_DBG("num_rsp %d", ir.num_rsp);
2179 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2181 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2194 static int hci_dev_do_open(struct hci_dev *hdev)
2198 BT_DBG("%s %p", hdev->name, hdev);
2202 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2207 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2208 /* Check for rfkill but allow the HCI setup stage to
2209 * proceed (which in itself doesn't cause any RF activity).
2211 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2216 /* Check for valid public address or a configured static
2217 * random adddress, but let the HCI setup proceed to
2218 * be able to determine if there is a public address
2221 * In case of user channel usage, it is not important
2222 * if a public address or static random address is
2225 * This check is only valid for BR/EDR controllers
2226 * since AMP controllers do not have an address.
2228 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2229 hdev->dev_type == HCI_BREDR &&
2230 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2231 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2232 ret = -EADDRNOTAVAIL;
2237 if (test_bit(HCI_UP, &hdev->flags)) {
2242 if (hdev->open(hdev)) {
2247 atomic_set(&hdev->cmd_cnt, 1);
2248 set_bit(HCI_INIT, &hdev->flags);
2250 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2251 ret = hdev->setup(hdev);
2254 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
2255 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2256 ret = __hci_init(hdev);
2259 clear_bit(HCI_INIT, &hdev->flags);
2263 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2264 set_bit(HCI_UP, &hdev->flags);
2265 hci_notify(hdev, HCI_DEV_UP);
2266 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2267 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2268 hdev->dev_type == HCI_BREDR) {
2270 mgmt_powered(hdev, 1);
2271 hci_dev_unlock(hdev);
2274 /* Init failed, cleanup */
2275 flush_work(&hdev->tx_work);
2276 flush_work(&hdev->cmd_work);
2277 flush_work(&hdev->rx_work);
2279 skb_queue_purge(&hdev->cmd_q);
2280 skb_queue_purge(&hdev->rx_q);
2285 if (hdev->sent_cmd) {
2286 kfree_skb(hdev->sent_cmd);
2287 hdev->sent_cmd = NULL;
2291 hdev->flags &= BIT(HCI_RAW);
2295 hci_req_unlock(hdev);
2299 /* ---- HCI ioctl helpers ---- */
2301 int hci_dev_open(__u16 dev)
2303 struct hci_dev *hdev;
2306 hdev = hci_dev_get(dev);
2310 /* Devices that are marked for raw-only usage can only be powered
2311 * up as user channel. Trying to bring them up as normal devices
2312 * will result into a failure. Only user channel operation is
2315 * When this function is called for a user channel, the flag
2316 * HCI_USER_CHANNEL will be set first before attempting to
2319 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
2320 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2325 /* We need to ensure that no other power on/off work is pending
2326 * before proceeding to call hci_dev_do_open. This is
2327 * particularly important if the setup procedure has not yet
2330 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2331 cancel_delayed_work(&hdev->power_off);
2333 /* After this call it is guaranteed that the setup procedure
2334 * has finished. This means that error conditions like RFKILL
2335 * or no valid public or static random address apply.
2337 flush_workqueue(hdev->req_workqueue);
2339 err = hci_dev_do_open(hdev);
2346 static int hci_dev_do_close(struct hci_dev *hdev)
2348 BT_DBG("%s %p", hdev->name, hdev);
2350 cancel_delayed_work(&hdev->power_off);
2352 hci_req_cancel(hdev, ENODEV);
2355 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2356 cancel_delayed_work_sync(&hdev->cmd_timer);
2357 hci_req_unlock(hdev);
2361 /* Flush RX and TX works */
2362 flush_work(&hdev->tx_work);
2363 flush_work(&hdev->rx_work);
2365 if (hdev->discov_timeout > 0) {
2366 cancel_delayed_work(&hdev->discov_off);
2367 hdev->discov_timeout = 0;
2368 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2369 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2372 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2373 cancel_delayed_work(&hdev->service_cache);
2375 cancel_delayed_work_sync(&hdev->le_scan_disable);
2377 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2378 cancel_delayed_work_sync(&hdev->rpa_expired);
2381 hci_inquiry_cache_flush(hdev);
2382 hci_conn_hash_flush(hdev);
2383 hci_pend_le_conns_clear(hdev);
2384 hci_dev_unlock(hdev);
2386 hci_notify(hdev, HCI_DEV_DOWN);
2392 skb_queue_purge(&hdev->cmd_q);
2393 atomic_set(&hdev->cmd_cnt, 1);
2394 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
2395 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2396 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2397 set_bit(HCI_INIT, &hdev->flags);
2398 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2399 clear_bit(HCI_INIT, &hdev->flags);
2402 /* flush cmd work */
2403 flush_work(&hdev->cmd_work);
2406 skb_queue_purge(&hdev->rx_q);
2407 skb_queue_purge(&hdev->cmd_q);
2408 skb_queue_purge(&hdev->raw_q);
2410 /* Drop last sent command */
2411 if (hdev->sent_cmd) {
2412 cancel_delayed_work_sync(&hdev->cmd_timer);
2413 kfree_skb(hdev->sent_cmd);
2414 hdev->sent_cmd = NULL;
2417 kfree_skb(hdev->recv_evt);
2418 hdev->recv_evt = NULL;
2420 /* After this point our queues are empty
2421 * and no tasks are scheduled. */
2425 hdev->flags &= BIT(HCI_RAW);
2426 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2428 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2429 if (hdev->dev_type == HCI_BREDR) {
2431 mgmt_powered(hdev, 0);
2432 hci_dev_unlock(hdev);
2436 /* Controller radio is available but is currently powered down */
2437 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2439 memset(hdev->eir, 0, sizeof(hdev->eir));
2440 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2441 bacpy(&hdev->random_addr, BDADDR_ANY);
2443 hci_req_unlock(hdev);
2449 int hci_dev_close(__u16 dev)
2451 struct hci_dev *hdev;
2454 hdev = hci_dev_get(dev);
2458 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2463 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2464 cancel_delayed_work(&hdev->power_off);
2466 err = hci_dev_do_close(hdev);
2473 int hci_dev_reset(__u16 dev)
2475 struct hci_dev *hdev;
2478 hdev = hci_dev_get(dev);
2484 if (!test_bit(HCI_UP, &hdev->flags)) {
2489 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2494 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2500 skb_queue_purge(&hdev->rx_q);
2501 skb_queue_purge(&hdev->cmd_q);
2504 hci_inquiry_cache_flush(hdev);
2505 hci_conn_hash_flush(hdev);
2506 hci_dev_unlock(hdev);
2511 atomic_set(&hdev->cmd_cnt, 1);
2512 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2514 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2517 hci_req_unlock(hdev);
2522 int hci_dev_reset_stat(__u16 dev)
2524 struct hci_dev *hdev;
2527 hdev = hci_dev_get(dev);
2531 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2536 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2541 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2548 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2550 struct hci_dev *hdev;
2551 struct hci_dev_req dr;
2554 if (copy_from_user(&dr, arg, sizeof(dr)))
2557 hdev = hci_dev_get(dr.dev_id);
2561 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2566 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2571 if (hdev->dev_type != HCI_BREDR) {
2576 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2583 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2588 if (!lmp_encrypt_capable(hdev)) {
2593 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2594 /* Auth must be enabled first */
2595 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2601 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2606 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2611 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2615 case HCISETLINKMODE:
2616 hdev->link_mode = ((__u16) dr.dev_opt) &
2617 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2621 hdev->pkt_type = (__u16) dr.dev_opt;
2625 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2626 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2630 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2631 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2644 int hci_get_dev_list(void __user *arg)
2646 struct hci_dev *hdev;
2647 struct hci_dev_list_req *dl;
2648 struct hci_dev_req *dr;
2649 int n = 0, size, err;
2652 if (get_user(dev_num, (__u16 __user *) arg))
2655 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2658 size = sizeof(*dl) + dev_num * sizeof(*dr);
2660 dl = kzalloc(size, GFP_KERNEL);
2666 read_lock(&hci_dev_list_lock);
2667 list_for_each_entry(hdev, &hci_dev_list, list) {
2668 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2669 cancel_delayed_work(&hdev->power_off);
2671 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2672 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2674 (dr + n)->dev_id = hdev->id;
2675 (dr + n)->dev_opt = hdev->flags;
2680 read_unlock(&hci_dev_list_lock);
2683 size = sizeof(*dl) + n * sizeof(*dr);
2685 err = copy_to_user(arg, dl, size);
2688 return err ? -EFAULT : 0;
2691 int hci_get_dev_info(void __user *arg)
2693 struct hci_dev *hdev;
2694 struct hci_dev_info di;
2697 if (copy_from_user(&di, arg, sizeof(di)))
2700 hdev = hci_dev_get(di.dev_id);
2704 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2705 cancel_delayed_work_sync(&hdev->power_off);
2707 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2708 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2710 strcpy(di.name, hdev->name);
2711 di.bdaddr = hdev->bdaddr;
2712 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2713 di.flags = hdev->flags;
2714 di.pkt_type = hdev->pkt_type;
2715 if (lmp_bredr_capable(hdev)) {
2716 di.acl_mtu = hdev->acl_mtu;
2717 di.acl_pkts = hdev->acl_pkts;
2718 di.sco_mtu = hdev->sco_mtu;
2719 di.sco_pkts = hdev->sco_pkts;
2721 di.acl_mtu = hdev->le_mtu;
2722 di.acl_pkts = hdev->le_pkts;
2726 di.link_policy = hdev->link_policy;
2727 di.link_mode = hdev->link_mode;
2729 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2730 memcpy(&di.features, &hdev->features, sizeof(di.features));
2732 if (copy_to_user(arg, &di, sizeof(di)))
2740 /* ---- Interface to HCI drivers ---- */
2742 static int hci_rfkill_set_block(void *data, bool blocked)
2744 struct hci_dev *hdev = data;
2746 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2748 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2752 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2753 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2754 hci_dev_do_close(hdev);
2756 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2762 static const struct rfkill_ops hci_rfkill_ops = {
2763 .set_block = hci_rfkill_set_block,
2766 static void hci_power_on(struct work_struct *work)
2768 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2771 BT_DBG("%s", hdev->name);
2773 err = hci_dev_do_open(hdev);
2775 mgmt_set_powered_failed(hdev, err);
2779 /* During the HCI setup phase, a few error conditions are
2780 * ignored and they need to be checked now. If they are still
2781 * valid, it is important to turn the device back off.
2783 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2784 (hdev->dev_type == HCI_BREDR &&
2785 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2786 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2787 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2788 hci_dev_do_close(hdev);
2789 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2790 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2791 HCI_AUTO_OFF_TIMEOUT);
2794 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2795 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2796 mgmt_index_added(hdev);
2800 static void hci_power_off(struct work_struct *work)
2802 struct hci_dev *hdev = container_of(work, struct hci_dev,
2805 BT_DBG("%s", hdev->name);
2807 hci_dev_do_close(hdev);
2810 static void hci_discov_off(struct work_struct *work)
2812 struct hci_dev *hdev;
2814 hdev = container_of(work, struct hci_dev, discov_off.work);
2816 BT_DBG("%s", hdev->name);
2818 mgmt_discoverable_timeout(hdev);
2821 void hci_uuids_clear(struct hci_dev *hdev)
2823 struct bt_uuid *uuid, *tmp;
2825 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2826 list_del(&uuid->list);
2831 void hci_link_keys_clear(struct hci_dev *hdev)
2833 struct list_head *p, *n;
2835 list_for_each_safe(p, n, &hdev->link_keys) {
2836 struct link_key *key;
2838 key = list_entry(p, struct link_key, list);
2845 void hci_smp_ltks_clear(struct hci_dev *hdev)
2847 struct smp_ltk *k, *tmp;
2849 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2855 void hci_smp_irks_clear(struct hci_dev *hdev)
2857 struct smp_irk *k, *tmp;
2859 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2865 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2869 list_for_each_entry(k, &hdev->link_keys, list)
2870 if (bacmp(bdaddr, &k->bdaddr) == 0)
2876 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2877 u8 key_type, u8 old_key_type)
2880 if (key_type < 0x03)
2883 /* Debug keys are insecure so don't store them persistently */
2884 if (key_type == HCI_LK_DEBUG_COMBINATION)
2887 /* Changed combination key and there's no previous one */
2888 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2891 /* Security mode 3 case */
2895 /* Neither local nor remote side had no-bonding as requirement */
2896 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2899 /* Local side had dedicated bonding as requirement */
2900 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2903 /* Remote side had dedicated bonding as requirement */
2904 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2907 /* If none of the above criteria match, then don't store the key
2912 static bool ltk_type_master(u8 type)
2914 return (type == SMP_LTK);
2917 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
2922 list_for_each_entry(k, &hdev->long_term_keys, list) {
2923 if (k->ediv != ediv || k->rand != rand)
2926 if (ltk_type_master(k->type) != master)
2935 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2936 u8 addr_type, bool master)
2940 list_for_each_entry(k, &hdev->long_term_keys, list)
2941 if (addr_type == k->bdaddr_type &&
2942 bacmp(bdaddr, &k->bdaddr) == 0 &&
2943 ltk_type_master(k->type) == master)
2949 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2951 struct smp_irk *irk;
2953 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2954 if (!bacmp(&irk->rpa, rpa))
2958 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2959 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2960 bacpy(&irk->rpa, rpa);
2968 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2971 struct smp_irk *irk;
2973 /* Identity Address must be public or static random */
2974 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2977 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2978 if (addr_type == irk->addr_type &&
2979 bacmp(bdaddr, &irk->bdaddr) == 0)
2986 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2987 bdaddr_t *bdaddr, u8 *val, u8 type,
2988 u8 pin_len, bool *persistent)
2990 struct link_key *key, *old_key;
2993 old_key = hci_find_link_key(hdev, bdaddr);
2995 old_key_type = old_key->type;
2998 old_key_type = conn ? conn->key_type : 0xff;
2999 key = kzalloc(sizeof(*key), GFP_KERNEL);
3002 list_add(&key->list, &hdev->link_keys);
3005 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3007 /* Some buggy controller combinations generate a changed
3008 * combination key for legacy pairing even when there's no
3010 if (type == HCI_LK_CHANGED_COMBINATION &&
3011 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3012 type = HCI_LK_COMBINATION;
3014 conn->key_type = type;
3017 bacpy(&key->bdaddr, bdaddr);
3018 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3019 key->pin_len = pin_len;
3021 if (type == HCI_LK_CHANGED_COMBINATION)
3022 key->type = old_key_type;
3027 *persistent = hci_persistent_key(hdev, conn, type,
3033 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3034 u8 addr_type, u8 type, u8 authenticated,
3035 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3037 struct smp_ltk *key, *old_key;
3038 bool master = ltk_type_master(type);
3040 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3044 key = kzalloc(sizeof(*key), GFP_KERNEL);
3047 list_add(&key->list, &hdev->long_term_keys);
3050 bacpy(&key->bdaddr, bdaddr);
3051 key->bdaddr_type = addr_type;
3052 memcpy(key->val, tk, sizeof(key->val));
3053 key->authenticated = authenticated;
3056 key->enc_size = enc_size;
3062 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3063 u8 addr_type, u8 val[16], bdaddr_t *rpa)
3065 struct smp_irk *irk;
3067 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3069 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3073 bacpy(&irk->bdaddr, bdaddr);
3074 irk->addr_type = addr_type;
3076 list_add(&irk->list, &hdev->identity_resolving_keys);
3079 memcpy(irk->val, val, 16);
3080 bacpy(&irk->rpa, rpa);
3085 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3087 struct link_key *key;
3089 key = hci_find_link_key(hdev, bdaddr);
3093 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3095 list_del(&key->list);
3101 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3103 struct smp_ltk *k, *tmp;
3106 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3107 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3110 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3117 return removed ? 0 : -ENOENT;
3120 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3122 struct smp_irk *k, *tmp;
3124 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3125 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3128 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3135 /* HCI command timer function */
3136 static void hci_cmd_timeout(struct work_struct *work)
3138 struct hci_dev *hdev = container_of(work, struct hci_dev,
3141 if (hdev->sent_cmd) {
3142 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3143 u16 opcode = __le16_to_cpu(sent->opcode);
3145 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3147 BT_ERR("%s command tx timeout", hdev->name);
3150 atomic_set(&hdev->cmd_cnt, 1);
3151 queue_work(hdev->workqueue, &hdev->cmd_work);
3154 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3157 struct oob_data *data;
3159 list_for_each_entry(data, &hdev->remote_oob_data, list)
3160 if (bacmp(bdaddr, &data->bdaddr) == 0)
3166 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3168 struct oob_data *data;
3170 data = hci_find_remote_oob_data(hdev, bdaddr);
3174 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3176 list_del(&data->list);
3182 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3184 struct oob_data *data, *n;
3186 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3187 list_del(&data->list);
3192 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3193 u8 *hash, u8 *randomizer)
3195 struct oob_data *data;
3197 data = hci_find_remote_oob_data(hdev, bdaddr);
3199 data = kmalloc(sizeof(*data), GFP_KERNEL);
3203 bacpy(&data->bdaddr, bdaddr);
3204 list_add(&data->list, &hdev->remote_oob_data);
3207 memcpy(data->hash192, hash, sizeof(data->hash192));
3208 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3210 memset(data->hash256, 0, sizeof(data->hash256));
3211 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3213 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3218 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3219 u8 *hash192, u8 *randomizer192,
3220 u8 *hash256, u8 *randomizer256)
3222 struct oob_data *data;
3224 data = hci_find_remote_oob_data(hdev, bdaddr);
3226 data = kmalloc(sizeof(*data), GFP_KERNEL);
3230 bacpy(&data->bdaddr, bdaddr);
3231 list_add(&data->list, &hdev->remote_oob_data);
3234 memcpy(data->hash192, hash192, sizeof(data->hash192));
3235 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3237 memcpy(data->hash256, hash256, sizeof(data->hash256));
3238 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3240 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3245 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3246 bdaddr_t *bdaddr, u8 type)
3248 struct bdaddr_list *b;
3250 list_for_each_entry(b, &hdev->blacklist, list) {
3251 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3258 static void hci_blacklist_clear(struct hci_dev *hdev)
3260 struct list_head *p, *n;
3262 list_for_each_safe(p, n, &hdev->blacklist) {
3263 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3270 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3272 struct bdaddr_list *entry;
3274 if (!bacmp(bdaddr, BDADDR_ANY))
3277 if (hci_blacklist_lookup(hdev, bdaddr, type))
3280 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3284 bacpy(&entry->bdaddr, bdaddr);
3285 entry->bdaddr_type = type;
3287 list_add(&entry->list, &hdev->blacklist);
3289 return mgmt_device_blocked(hdev, bdaddr, type);
3292 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3294 struct bdaddr_list *entry;
3296 if (!bacmp(bdaddr, BDADDR_ANY)) {
3297 hci_blacklist_clear(hdev);
3301 entry = hci_blacklist_lookup(hdev, bdaddr, type);
3305 list_del(&entry->list);
3308 return mgmt_device_unblocked(hdev, bdaddr, type);
3311 struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3312 bdaddr_t *bdaddr, u8 type)
3314 struct bdaddr_list *b;
3316 list_for_each_entry(b, &hdev->le_white_list, list) {
3317 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3324 void hci_white_list_clear(struct hci_dev *hdev)
3326 struct list_head *p, *n;
3328 list_for_each_safe(p, n, &hdev->le_white_list) {
3329 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3336 int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3338 struct bdaddr_list *entry;
3340 if (!bacmp(bdaddr, BDADDR_ANY))
3343 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3347 bacpy(&entry->bdaddr, bdaddr);
3348 entry->bdaddr_type = type;
3350 list_add(&entry->list, &hdev->le_white_list);
3355 int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3357 struct bdaddr_list *entry;
3359 if (!bacmp(bdaddr, BDADDR_ANY))
3362 entry = hci_white_list_lookup(hdev, bdaddr, type);
3366 list_del(&entry->list);
3372 /* This function requires the caller holds hdev->lock */
3373 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3374 bdaddr_t *addr, u8 addr_type)
3376 struct hci_conn_params *params;
3378 list_for_each_entry(params, &hdev->le_conn_params, list) {
3379 if (bacmp(¶ms->addr, addr) == 0 &&
3380 params->addr_type == addr_type) {
3388 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3390 struct hci_conn *conn;
3392 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3396 if (conn->dst_type != type)
3399 if (conn->state != BT_CONNECTED)
3405 static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3407 if (addr_type == ADDR_LE_DEV_PUBLIC)
3410 /* Check for Random Static address type */
3411 if ((addr->b[5] & 0xc0) == 0xc0)
3417 /* This function requires the caller holds hdev->lock */
3418 struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3419 bdaddr_t *addr, u8 addr_type)
3421 struct bdaddr_list *entry;
3423 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3424 if (bacmp(&entry->bdaddr, addr) == 0 &&
3425 entry->bdaddr_type == addr_type)
3432 /* This function requires the caller holds hdev->lock */
3433 void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3435 struct bdaddr_list *entry;
3437 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3441 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3443 BT_ERR("Out of memory");
3447 bacpy(&entry->bdaddr, addr);
3448 entry->bdaddr_type = addr_type;
3450 list_add(&entry->list, &hdev->pend_le_conns);
3452 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3455 hci_update_background_scan(hdev);
3458 /* This function requires the caller holds hdev->lock */
3459 void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3461 struct bdaddr_list *entry;
3463 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3467 list_del(&entry->list);
3470 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3473 hci_update_background_scan(hdev);
3476 /* This function requires the caller holds hdev->lock */
3477 void hci_pend_le_conns_clear(struct hci_dev *hdev)
3479 struct bdaddr_list *entry, *tmp;
3481 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3482 list_del(&entry->list);
3486 BT_DBG("All LE pending connections cleared");
3489 /* This function requires the caller holds hdev->lock */
3490 int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3491 u8 auto_connect, u16 conn_min_interval,
3492 u16 conn_max_interval)
3494 struct hci_conn_params *params;
3496 if (!is_identity_address(addr, addr_type))
3499 params = hci_conn_params_lookup(hdev, addr, addr_type);
3503 params = kzalloc(sizeof(*params), GFP_KERNEL);
3505 BT_ERR("Out of memory");
3509 bacpy(¶ms->addr, addr);
3510 params->addr_type = addr_type;
3512 list_add(¶ms->list, &hdev->le_conn_params);
3515 params->conn_min_interval = conn_min_interval;
3516 params->conn_max_interval = conn_max_interval;
3517 params->auto_connect = auto_connect;
3519 switch (auto_connect) {
3520 case HCI_AUTO_CONN_DISABLED:
3521 case HCI_AUTO_CONN_LINK_LOSS:
3522 hci_pend_le_conn_del(hdev, addr, addr_type);
3524 case HCI_AUTO_CONN_ALWAYS:
3525 if (!is_connected(hdev, addr, addr_type))
3526 hci_pend_le_conn_add(hdev, addr, addr_type);
3530 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3531 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3532 conn_min_interval, conn_max_interval);
3537 /* This function requires the caller holds hdev->lock */
3538 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3540 struct hci_conn_params *params;
3542 params = hci_conn_params_lookup(hdev, addr, addr_type);
3546 hci_pend_le_conn_del(hdev, addr, addr_type);
3548 list_del(¶ms->list);
3551 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3554 /* This function requires the caller holds hdev->lock */
3555 void hci_conn_params_clear(struct hci_dev *hdev)
3557 struct hci_conn_params *params, *tmp;
3559 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3560 list_del(¶ms->list);
3564 BT_DBG("All LE connection parameters were removed");
3567 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3570 BT_ERR("Failed to start inquiry: status %d", status);
3573 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3574 hci_dev_unlock(hdev);
3579 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3581 /* General inquiry access code (GIAC) */
3582 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3583 struct hci_request req;
3584 struct hci_cp_inquiry cp;
3588 BT_ERR("Failed to disable LE scanning: status %d", status);
3592 switch (hdev->discovery.type) {
3593 case DISCOV_TYPE_LE:
3595 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3596 hci_dev_unlock(hdev);
3599 case DISCOV_TYPE_INTERLEAVED:
3600 hci_req_init(&req, hdev);
3602 memset(&cp, 0, sizeof(cp));
3603 memcpy(&cp.lap, lap, sizeof(cp.lap));
3604 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3605 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3609 hci_inquiry_cache_flush(hdev);
3611 err = hci_req_run(&req, inquiry_complete);
3613 BT_ERR("Inquiry request failed: err %d", err);
3614 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3617 hci_dev_unlock(hdev);
3622 static void le_scan_disable_work(struct work_struct *work)
3624 struct hci_dev *hdev = container_of(work, struct hci_dev,
3625 le_scan_disable.work);
3626 struct hci_request req;
3629 BT_DBG("%s", hdev->name);
3631 hci_req_init(&req, hdev);
3633 hci_req_add_le_scan_disable(&req);
3635 err = hci_req_run(&req, le_scan_disable_work_complete);
3637 BT_ERR("Disable LE scanning request failed: err %d", err);
3640 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3642 struct hci_dev *hdev = req->hdev;
3644 /* If we're advertising or initiating an LE connection we can't
3645 * go ahead and change the random address at this time. This is
3646 * because the eventual initiator address used for the
3647 * subsequently created connection will be undefined (some
3648 * controllers use the new address and others the one we had
3649 * when the operation started).
3651 * In this kind of scenario skip the update and let the random
3652 * address be updated at the next cycle.
3654 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3655 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3656 BT_DBG("Deferring random address update");
3660 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3663 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3666 struct hci_dev *hdev = req->hdev;
3669 /* If privacy is enabled use a resolvable private address. If
3670 * current RPA has expired or there is something else than
3671 * the current RPA in use, then generate a new one.
3673 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3676 *own_addr_type = ADDR_LE_DEV_RANDOM;
3678 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3679 !bacmp(&hdev->random_addr, &hdev->rpa))
3682 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3684 BT_ERR("%s failed to generate new RPA", hdev->name);
3688 set_random_addr(req, &hdev->rpa);
3690 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3691 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3696 /* In case of required privacy without resolvable private address,
3697 * use an unresolvable private address. This is useful for active
3698 * scanning and non-connectable advertising.
3700 if (require_privacy) {
3703 get_random_bytes(&urpa, 6);
3704 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3706 *own_addr_type = ADDR_LE_DEV_RANDOM;
3707 set_random_addr(req, &urpa);
3711 /* If forcing static address is in use or there is no public
3712 * address use the static address as random address (but skip
3713 * the HCI command if the current random address is already the
3716 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3717 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3718 *own_addr_type = ADDR_LE_DEV_RANDOM;
3719 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3720 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3721 &hdev->static_addr);
3725 /* Neither privacy nor static address is being used so use a
3728 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3733 /* Copy the Identity Address of the controller.
3735 * If the controller has a public BD_ADDR, then by default use that one.
3736 * If this is a LE only controller without a public address, default to
3737 * the static random address.
3739 * For debugging purposes it is possible to force controllers with a
3740 * public address to use the static random address instead.
3742 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3745 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3746 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3747 bacpy(bdaddr, &hdev->static_addr);
3748 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3750 bacpy(bdaddr, &hdev->bdaddr);
3751 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3755 /* Alloc HCI device */
3756 struct hci_dev *hci_alloc_dev(void)
3758 struct hci_dev *hdev;
3760 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3764 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3765 hdev->esco_type = (ESCO_HV1);
3766 hdev->link_mode = (HCI_LM_ACCEPT);
3767 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3768 hdev->io_capability = 0x03; /* No Input No Output */
3769 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3770 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3772 hdev->sniff_max_interval = 800;
3773 hdev->sniff_min_interval = 80;
3775 hdev->le_adv_channel_map = 0x07;
3776 hdev->le_scan_interval = 0x0060;
3777 hdev->le_scan_window = 0x0030;
3778 hdev->le_conn_min_interval = 0x0028;
3779 hdev->le_conn_max_interval = 0x0038;
3781 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3782 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3783 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3784 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3786 mutex_init(&hdev->lock);
3787 mutex_init(&hdev->req_lock);
3789 INIT_LIST_HEAD(&hdev->mgmt_pending);
3790 INIT_LIST_HEAD(&hdev->blacklist);
3791 INIT_LIST_HEAD(&hdev->uuids);
3792 INIT_LIST_HEAD(&hdev->link_keys);
3793 INIT_LIST_HEAD(&hdev->long_term_keys);
3794 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3795 INIT_LIST_HEAD(&hdev->remote_oob_data);
3796 INIT_LIST_HEAD(&hdev->le_white_list);
3797 INIT_LIST_HEAD(&hdev->le_conn_params);
3798 INIT_LIST_HEAD(&hdev->pend_le_conns);
3799 INIT_LIST_HEAD(&hdev->conn_hash.list);
3801 INIT_WORK(&hdev->rx_work, hci_rx_work);
3802 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3803 INIT_WORK(&hdev->tx_work, hci_tx_work);
3804 INIT_WORK(&hdev->power_on, hci_power_on);
3806 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3807 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3808 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3810 skb_queue_head_init(&hdev->rx_q);
3811 skb_queue_head_init(&hdev->cmd_q);
3812 skb_queue_head_init(&hdev->raw_q);
3814 init_waitqueue_head(&hdev->req_wait_q);
3816 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3818 hci_init_sysfs(hdev);
3819 discovery_init(hdev);
3823 EXPORT_SYMBOL(hci_alloc_dev);
3825 /* Free HCI device */
3826 void hci_free_dev(struct hci_dev *hdev)
3828 /* will free via device release */
3829 put_device(&hdev->dev);
3831 EXPORT_SYMBOL(hci_free_dev);
3833 /* Register HCI device */
3834 int hci_register_dev(struct hci_dev *hdev)
3838 if (!hdev->open || !hdev->close)
3841 /* Do not allow HCI_AMP devices to register at index 0,
3842 * so the index can be used as the AMP controller ID.
3844 switch (hdev->dev_type) {
3846 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3849 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3858 sprintf(hdev->name, "hci%d", id);
3861 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3863 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3864 WQ_MEM_RECLAIM, 1, hdev->name);
3865 if (!hdev->workqueue) {
3870 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3871 WQ_MEM_RECLAIM, 1, hdev->name);
3872 if (!hdev->req_workqueue) {
3873 destroy_workqueue(hdev->workqueue);
3878 if (!IS_ERR_OR_NULL(bt_debugfs))
3879 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3881 dev_set_name(&hdev->dev, "%s", hdev->name);
3883 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3885 if (IS_ERR(hdev->tfm_aes)) {
3886 BT_ERR("Unable to create crypto context");
3887 error = PTR_ERR(hdev->tfm_aes);
3888 hdev->tfm_aes = NULL;
3892 error = device_add(&hdev->dev);
3896 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3897 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3900 if (rfkill_register(hdev->rfkill) < 0) {
3901 rfkill_destroy(hdev->rfkill);
3902 hdev->rfkill = NULL;
3906 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3907 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3909 set_bit(HCI_SETUP, &hdev->dev_flags);
3910 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3912 if (hdev->dev_type == HCI_BREDR) {
3913 /* Assume BR/EDR support until proven otherwise (such as
3914 * through reading supported features during init.
3916 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3919 write_lock(&hci_dev_list_lock);
3920 list_add(&hdev->list, &hci_dev_list);
3921 write_unlock(&hci_dev_list_lock);
3923 /* Devices that are marked for raw-only usage need to set
3924 * the HCI_RAW flag to indicate that only user channel is
3927 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3928 set_bit(HCI_RAW, &hdev->flags);
3930 hci_notify(hdev, HCI_DEV_REG);
3933 queue_work(hdev->req_workqueue, &hdev->power_on);
3938 crypto_free_blkcipher(hdev->tfm_aes);
3940 destroy_workqueue(hdev->workqueue);
3941 destroy_workqueue(hdev->req_workqueue);
3943 ida_simple_remove(&hci_index_ida, hdev->id);
3947 EXPORT_SYMBOL(hci_register_dev);
3949 /* Unregister HCI device */
3950 void hci_unregister_dev(struct hci_dev *hdev)
3954 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3956 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3960 write_lock(&hci_dev_list_lock);
3961 list_del(&hdev->list);
3962 write_unlock(&hci_dev_list_lock);
3964 hci_dev_do_close(hdev);
3966 for (i = 0; i < NUM_REASSEMBLY; i++)
3967 kfree_skb(hdev->reassembly[i]);
3969 cancel_work_sync(&hdev->power_on);
3971 if (!test_bit(HCI_INIT, &hdev->flags) &&
3972 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3973 !test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
3975 mgmt_index_removed(hdev);
3976 hci_dev_unlock(hdev);
3979 /* mgmt_index_removed should take care of emptying the
3981 BUG_ON(!list_empty(&hdev->mgmt_pending));
3983 hci_notify(hdev, HCI_DEV_UNREG);
3986 rfkill_unregister(hdev->rfkill);
3987 rfkill_destroy(hdev->rfkill);
3991 crypto_free_blkcipher(hdev->tfm_aes);
3993 device_del(&hdev->dev);
3995 debugfs_remove_recursive(hdev->debugfs);
3997 destroy_workqueue(hdev->workqueue);
3998 destroy_workqueue(hdev->req_workqueue);
4001 hci_blacklist_clear(hdev);
4002 hci_uuids_clear(hdev);
4003 hci_link_keys_clear(hdev);
4004 hci_smp_ltks_clear(hdev);
4005 hci_smp_irks_clear(hdev);
4006 hci_remote_oob_data_clear(hdev);
4007 hci_white_list_clear(hdev);
4008 hci_conn_params_clear(hdev);
4009 hci_pend_le_conns_clear(hdev);
4010 hci_dev_unlock(hdev);
4014 ida_simple_remove(&hci_index_ida, id);
4016 EXPORT_SYMBOL(hci_unregister_dev);
4018 /* Suspend HCI device */
4019 int hci_suspend_dev(struct hci_dev *hdev)
4021 hci_notify(hdev, HCI_DEV_SUSPEND);
4024 EXPORT_SYMBOL(hci_suspend_dev);
4026 /* Resume HCI device */
4027 int hci_resume_dev(struct hci_dev *hdev)
4029 hci_notify(hdev, HCI_DEV_RESUME);
4032 EXPORT_SYMBOL(hci_resume_dev);
4034 /* Receive frame from HCI drivers */
4035 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4037 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4038 && !test_bit(HCI_INIT, &hdev->flags))) {
4044 bt_cb(skb)->incoming = 1;
4047 __net_timestamp(skb);
4049 skb_queue_tail(&hdev->rx_q, skb);
4050 queue_work(hdev->workqueue, &hdev->rx_work);
4054 EXPORT_SYMBOL(hci_recv_frame);
4056 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4057 int count, __u8 index)
4062 struct sk_buff *skb;
4063 struct bt_skb_cb *scb;
4065 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4066 index >= NUM_REASSEMBLY)
4069 skb = hdev->reassembly[index];
4073 case HCI_ACLDATA_PKT:
4074 len = HCI_MAX_FRAME_SIZE;
4075 hlen = HCI_ACL_HDR_SIZE;
4078 len = HCI_MAX_EVENT_SIZE;
4079 hlen = HCI_EVENT_HDR_SIZE;
4081 case HCI_SCODATA_PKT:
4082 len = HCI_MAX_SCO_SIZE;
4083 hlen = HCI_SCO_HDR_SIZE;
4087 skb = bt_skb_alloc(len, GFP_ATOMIC);
4091 scb = (void *) skb->cb;
4093 scb->pkt_type = type;
4095 hdev->reassembly[index] = skb;
4099 scb = (void *) skb->cb;
4100 len = min_t(uint, scb->expect, count);
4102 memcpy(skb_put(skb, len), data, len);
4111 if (skb->len == HCI_EVENT_HDR_SIZE) {
4112 struct hci_event_hdr *h = hci_event_hdr(skb);
4113 scb->expect = h->plen;
4115 if (skb_tailroom(skb) < scb->expect) {
4117 hdev->reassembly[index] = NULL;
4123 case HCI_ACLDATA_PKT:
4124 if (skb->len == HCI_ACL_HDR_SIZE) {
4125 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4126 scb->expect = __le16_to_cpu(h->dlen);
4128 if (skb_tailroom(skb) < scb->expect) {
4130 hdev->reassembly[index] = NULL;
4136 case HCI_SCODATA_PKT:
4137 if (skb->len == HCI_SCO_HDR_SIZE) {
4138 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4139 scb->expect = h->dlen;
4141 if (skb_tailroom(skb) < scb->expect) {
4143 hdev->reassembly[index] = NULL;
4150 if (scb->expect == 0) {
4151 /* Complete frame */
4153 bt_cb(skb)->pkt_type = type;
4154 hci_recv_frame(hdev, skb);
4156 hdev->reassembly[index] = NULL;
4164 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4168 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4172 rem = hci_reassembly(hdev, type, data, count, type - 1);
4176 data += (count - rem);
4182 EXPORT_SYMBOL(hci_recv_fragment);
4184 #define STREAM_REASSEMBLY 0
4186 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4192 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4195 struct { char type; } *pkt;
4197 /* Start of the frame */
4204 type = bt_cb(skb)->pkt_type;
4206 rem = hci_reassembly(hdev, type, data, count,
4211 data += (count - rem);
4217 EXPORT_SYMBOL(hci_recv_stream_fragment);
4219 /* ---- Interface to upper protocols ---- */
4221 int hci_register_cb(struct hci_cb *cb)
4223 BT_DBG("%p name %s", cb, cb->name);
4225 write_lock(&hci_cb_list_lock);
4226 list_add(&cb->list, &hci_cb_list);
4227 write_unlock(&hci_cb_list_lock);
4231 EXPORT_SYMBOL(hci_register_cb);
4233 int hci_unregister_cb(struct hci_cb *cb)
4235 BT_DBG("%p name %s", cb, cb->name);
4237 write_lock(&hci_cb_list_lock);
4238 list_del(&cb->list);
4239 write_unlock(&hci_cb_list_lock);
4243 EXPORT_SYMBOL(hci_unregister_cb);
4245 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4247 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4250 __net_timestamp(skb);
4252 /* Send copy to monitor */
4253 hci_send_to_monitor(hdev, skb);
4255 if (atomic_read(&hdev->promisc)) {
4256 /* Send copy to the sockets */
4257 hci_send_to_sock(hdev, skb);
4260 /* Get rid of skb owner, prior to sending to the driver. */
4263 if (hdev->send(hdev, skb) < 0)
4264 BT_ERR("%s sending frame failed", hdev->name);
4267 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4269 skb_queue_head_init(&req->cmd_q);
4274 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4276 struct hci_dev *hdev = req->hdev;
4277 struct sk_buff *skb;
4278 unsigned long flags;
4280 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4282 /* If an error occured during request building, remove all HCI
4283 * commands queued on the HCI request queue.
4286 skb_queue_purge(&req->cmd_q);
4290 /* Do not allow empty requests */
4291 if (skb_queue_empty(&req->cmd_q))
4294 skb = skb_peek_tail(&req->cmd_q);
4295 bt_cb(skb)->req.complete = complete;
4297 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4298 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4299 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4301 queue_work(hdev->workqueue, &hdev->cmd_work);
4306 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4307 u32 plen, const void *param)
4309 int len = HCI_COMMAND_HDR_SIZE + plen;
4310 struct hci_command_hdr *hdr;
4311 struct sk_buff *skb;
4313 skb = bt_skb_alloc(len, GFP_ATOMIC);
4317 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4318 hdr->opcode = cpu_to_le16(opcode);
4322 memcpy(skb_put(skb, plen), param, plen);
4324 BT_DBG("skb len %d", skb->len);
4326 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4331 /* Send HCI command */
4332 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4335 struct sk_buff *skb;
4337 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4339 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4341 BT_ERR("%s no memory for command", hdev->name);
4345 /* Stand-alone HCI commands must be flaged as
4346 * single-command requests.
4348 bt_cb(skb)->req.start = true;
4350 skb_queue_tail(&hdev->cmd_q, skb);
4351 queue_work(hdev->workqueue, &hdev->cmd_work);
4356 /* Queue a command to an asynchronous HCI request */
4357 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4358 const void *param, u8 event)
4360 struct hci_dev *hdev = req->hdev;
4361 struct sk_buff *skb;
4363 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4365 /* If an error occured during request building, there is no point in
4366 * queueing the HCI command. We can simply return.
4371 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4373 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4374 hdev->name, opcode);
4379 if (skb_queue_empty(&req->cmd_q))
4380 bt_cb(skb)->req.start = true;
4382 bt_cb(skb)->req.event = event;
4384 skb_queue_tail(&req->cmd_q, skb);
4387 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4390 hci_req_add_ev(req, opcode, plen, param, 0);
4393 /* Get data from the previously sent command */
4394 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4396 struct hci_command_hdr *hdr;
4398 if (!hdev->sent_cmd)
4401 hdr = (void *) hdev->sent_cmd->data;
4403 if (hdr->opcode != cpu_to_le16(opcode))
4406 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4408 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4412 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4414 struct hci_acl_hdr *hdr;
4417 skb_push(skb, HCI_ACL_HDR_SIZE);
4418 skb_reset_transport_header(skb);
4419 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4420 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4421 hdr->dlen = cpu_to_le16(len);
4424 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4425 struct sk_buff *skb, __u16 flags)
4427 struct hci_conn *conn = chan->conn;
4428 struct hci_dev *hdev = conn->hdev;
4429 struct sk_buff *list;
4431 skb->len = skb_headlen(skb);
4434 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4436 switch (hdev->dev_type) {
4438 hci_add_acl_hdr(skb, conn->handle, flags);
4441 hci_add_acl_hdr(skb, chan->handle, flags);
4444 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4448 list = skb_shinfo(skb)->frag_list;
4450 /* Non fragmented */
4451 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4453 skb_queue_tail(queue, skb);
4456 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4458 skb_shinfo(skb)->frag_list = NULL;
4460 /* Queue all fragments atomically */
4461 spin_lock(&queue->lock);
4463 __skb_queue_tail(queue, skb);
4465 flags &= ~ACL_START;
4468 skb = list; list = list->next;
4470 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4471 hci_add_acl_hdr(skb, conn->handle, flags);
4473 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4475 __skb_queue_tail(queue, skb);
4478 spin_unlock(&queue->lock);
4482 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4484 struct hci_dev *hdev = chan->conn->hdev;
4486 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4488 hci_queue_acl(chan, &chan->data_q, skb, flags);
4490 queue_work(hdev->workqueue, &hdev->tx_work);
4494 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4496 struct hci_dev *hdev = conn->hdev;
4497 struct hci_sco_hdr hdr;
4499 BT_DBG("%s len %d", hdev->name, skb->len);
4501 hdr.handle = cpu_to_le16(conn->handle);
4502 hdr.dlen = skb->len;
4504 skb_push(skb, HCI_SCO_HDR_SIZE);
4505 skb_reset_transport_header(skb);
4506 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4508 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4510 skb_queue_tail(&conn->data_q, skb);
4511 queue_work(hdev->workqueue, &hdev->tx_work);
4514 /* ---- HCI TX task (outgoing data) ---- */
4516 /* HCI Connection scheduler */
4517 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4520 struct hci_conn_hash *h = &hdev->conn_hash;
4521 struct hci_conn *conn = NULL, *c;
4522 unsigned int num = 0, min = ~0;
4524 /* We don't have to lock device here. Connections are always
4525 * added and removed with TX task disabled. */
4529 list_for_each_entry_rcu(c, &h->list, list) {
4530 if (c->type != type || skb_queue_empty(&c->data_q))
4533 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4538 if (c->sent < min) {
4543 if (hci_conn_num(hdev, type) == num)
4552 switch (conn->type) {
4554 cnt = hdev->acl_cnt;
4558 cnt = hdev->sco_cnt;
4561 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4565 BT_ERR("Unknown link type");
4573 BT_DBG("conn %p quote %d", conn, *quote);
4577 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4579 struct hci_conn_hash *h = &hdev->conn_hash;
4582 BT_ERR("%s link tx timeout", hdev->name);
4586 /* Kill stalled connections */
4587 list_for_each_entry_rcu(c, &h->list, list) {
4588 if (c->type == type && c->sent) {
4589 BT_ERR("%s killing stalled connection %pMR",
4590 hdev->name, &c->dst);
4591 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4598 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4601 struct hci_conn_hash *h = &hdev->conn_hash;
4602 struct hci_chan *chan = NULL;
4603 unsigned int num = 0, min = ~0, cur_prio = 0;
4604 struct hci_conn *conn;
4605 int cnt, q, conn_num = 0;
4607 BT_DBG("%s", hdev->name);
4611 list_for_each_entry_rcu(conn, &h->list, list) {
4612 struct hci_chan *tmp;
4614 if (conn->type != type)
4617 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4622 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4623 struct sk_buff *skb;
4625 if (skb_queue_empty(&tmp->data_q))
4628 skb = skb_peek(&tmp->data_q);
4629 if (skb->priority < cur_prio)
4632 if (skb->priority > cur_prio) {
4635 cur_prio = skb->priority;
4640 if (conn->sent < min) {
4646 if (hci_conn_num(hdev, type) == conn_num)
4655 switch (chan->conn->type) {
4657 cnt = hdev->acl_cnt;
4660 cnt = hdev->block_cnt;
4664 cnt = hdev->sco_cnt;
4667 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4671 BT_ERR("Unknown link type");
4676 BT_DBG("chan %p quote %d", chan, *quote);
4680 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4682 struct hci_conn_hash *h = &hdev->conn_hash;
4683 struct hci_conn *conn;
4686 BT_DBG("%s", hdev->name);
4690 list_for_each_entry_rcu(conn, &h->list, list) {
4691 struct hci_chan *chan;
4693 if (conn->type != type)
4696 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4701 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4702 struct sk_buff *skb;
4709 if (skb_queue_empty(&chan->data_q))
4712 skb = skb_peek(&chan->data_q);
4713 if (skb->priority >= HCI_PRIO_MAX - 1)
4716 skb->priority = HCI_PRIO_MAX - 1;
4718 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4722 if (hci_conn_num(hdev, type) == num)
4730 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4732 /* Calculate count of blocks used by this packet */
4733 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4736 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4738 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
4739 /* ACL tx timeout must be longer than maximum
4740 * link supervision timeout (40.9 seconds) */
4741 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4742 HCI_ACL_TX_TIMEOUT))
4743 hci_link_tx_to(hdev, ACL_LINK);
4747 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4749 unsigned int cnt = hdev->acl_cnt;
4750 struct hci_chan *chan;
4751 struct sk_buff *skb;
4754 __check_timeout(hdev, cnt);
4756 while (hdev->acl_cnt &&
4757 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4758 u32 priority = (skb_peek(&chan->data_q))->priority;
4759 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4760 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4761 skb->len, skb->priority);
4763 /* Stop if priority has changed */
4764 if (skb->priority < priority)
4767 skb = skb_dequeue(&chan->data_q);
4769 hci_conn_enter_active_mode(chan->conn,
4770 bt_cb(skb)->force_active);
4772 hci_send_frame(hdev, skb);
4773 hdev->acl_last_tx = jiffies;
4781 if (cnt != hdev->acl_cnt)
4782 hci_prio_recalculate(hdev, ACL_LINK);
4785 static void hci_sched_acl_blk(struct hci_dev *hdev)
4787 unsigned int cnt = hdev->block_cnt;
4788 struct hci_chan *chan;
4789 struct sk_buff *skb;
4793 __check_timeout(hdev, cnt);
4795 BT_DBG("%s", hdev->name);
4797 if (hdev->dev_type == HCI_AMP)
4802 while (hdev->block_cnt > 0 &&
4803 (chan = hci_chan_sent(hdev, type, "e))) {
4804 u32 priority = (skb_peek(&chan->data_q))->priority;
4805 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4808 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4809 skb->len, skb->priority);
4811 /* Stop if priority has changed */
4812 if (skb->priority < priority)
4815 skb = skb_dequeue(&chan->data_q);
4817 blocks = __get_blocks(hdev, skb);
4818 if (blocks > hdev->block_cnt)
4821 hci_conn_enter_active_mode(chan->conn,
4822 bt_cb(skb)->force_active);
4824 hci_send_frame(hdev, skb);
4825 hdev->acl_last_tx = jiffies;
4827 hdev->block_cnt -= blocks;
4830 chan->sent += blocks;
4831 chan->conn->sent += blocks;
4835 if (cnt != hdev->block_cnt)
4836 hci_prio_recalculate(hdev, type);
4839 static void hci_sched_acl(struct hci_dev *hdev)
4841 BT_DBG("%s", hdev->name);
4843 /* No ACL link over BR/EDR controller */
4844 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4847 /* No AMP link over AMP controller */
4848 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4851 switch (hdev->flow_ctl_mode) {
4852 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4853 hci_sched_acl_pkt(hdev);
4856 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4857 hci_sched_acl_blk(hdev);
4863 static void hci_sched_sco(struct hci_dev *hdev)
4865 struct hci_conn *conn;
4866 struct sk_buff *skb;
4869 BT_DBG("%s", hdev->name);
4871 if (!hci_conn_num(hdev, SCO_LINK))
4874 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
4875 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4876 BT_DBG("skb %p len %d", skb, skb->len);
4877 hci_send_frame(hdev, skb);
4880 if (conn->sent == ~0)
4886 static void hci_sched_esco(struct hci_dev *hdev)
4888 struct hci_conn *conn;
4889 struct sk_buff *skb;
4892 BT_DBG("%s", hdev->name);
4894 if (!hci_conn_num(hdev, ESCO_LINK))
4897 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4899 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4900 BT_DBG("skb %p len %d", skb, skb->len);
4901 hci_send_frame(hdev, skb);
4904 if (conn->sent == ~0)
4910 static void hci_sched_le(struct hci_dev *hdev)
4912 struct hci_chan *chan;
4913 struct sk_buff *skb;
4914 int quote, cnt, tmp;
4916 BT_DBG("%s", hdev->name);
4918 if (!hci_conn_num(hdev, LE_LINK))
4921 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
4922 /* LE tx timeout must be longer than maximum
4923 * link supervision timeout (40.9 seconds) */
4924 if (!hdev->le_cnt && hdev->le_pkts &&
4925 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4926 hci_link_tx_to(hdev, LE_LINK);
4929 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4931 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
4932 u32 priority = (skb_peek(&chan->data_q))->priority;
4933 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4934 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4935 skb->len, skb->priority);
4937 /* Stop if priority has changed */
4938 if (skb->priority < priority)
4941 skb = skb_dequeue(&chan->data_q);
4943 hci_send_frame(hdev, skb);
4944 hdev->le_last_tx = jiffies;
4955 hdev->acl_cnt = cnt;
4958 hci_prio_recalculate(hdev, LE_LINK);
4961 static void hci_tx_work(struct work_struct *work)
4963 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4964 struct sk_buff *skb;
4966 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4967 hdev->sco_cnt, hdev->le_cnt);
4969 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4970 /* Schedule queues and send stuff to HCI driver */
4971 hci_sched_acl(hdev);
4972 hci_sched_sco(hdev);
4973 hci_sched_esco(hdev);
4977 /* Send next queued raw (unknown type) packet */
4978 while ((skb = skb_dequeue(&hdev->raw_q)))
4979 hci_send_frame(hdev, skb);
4982 /* ----- HCI RX task (incoming data processing) ----- */
4984 /* ACL data packet */
4985 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4987 struct hci_acl_hdr *hdr = (void *) skb->data;
4988 struct hci_conn *conn;
4989 __u16 handle, flags;
4991 skb_pull(skb, HCI_ACL_HDR_SIZE);
4993 handle = __le16_to_cpu(hdr->handle);
4994 flags = hci_flags(handle);
4995 handle = hci_handle(handle);
4997 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5000 hdev->stat.acl_rx++;
5003 conn = hci_conn_hash_lookup_handle(hdev, handle);
5004 hci_dev_unlock(hdev);
5007 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5009 /* Send to upper protocol */
5010 l2cap_recv_acldata(conn, skb, flags);
5013 BT_ERR("%s ACL packet for unknown connection handle %d",
5014 hdev->name, handle);
5020 /* SCO data packet */
5021 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5023 struct hci_sco_hdr *hdr = (void *) skb->data;
5024 struct hci_conn *conn;
5027 skb_pull(skb, HCI_SCO_HDR_SIZE);
5029 handle = __le16_to_cpu(hdr->handle);
5031 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5033 hdev->stat.sco_rx++;
5036 conn = hci_conn_hash_lookup_handle(hdev, handle);
5037 hci_dev_unlock(hdev);
5040 /* Send to upper protocol */
5041 sco_recv_scodata(conn, skb);
5044 BT_ERR("%s SCO packet for unknown connection handle %d",
5045 hdev->name, handle);
5051 static bool hci_req_is_complete(struct hci_dev *hdev)
5053 struct sk_buff *skb;
5055 skb = skb_peek(&hdev->cmd_q);
5059 return bt_cb(skb)->req.start;
5062 static void hci_resend_last(struct hci_dev *hdev)
5064 struct hci_command_hdr *sent;
5065 struct sk_buff *skb;
5068 if (!hdev->sent_cmd)
5071 sent = (void *) hdev->sent_cmd->data;
5072 opcode = __le16_to_cpu(sent->opcode);
5073 if (opcode == HCI_OP_RESET)
5076 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5080 skb_queue_head(&hdev->cmd_q, skb);
5081 queue_work(hdev->workqueue, &hdev->cmd_work);
5084 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5086 hci_req_complete_t req_complete = NULL;
5087 struct sk_buff *skb;
5088 unsigned long flags;
5090 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5092 /* If the completed command doesn't match the last one that was
5093 * sent we need to do special handling of it.
5095 if (!hci_sent_cmd_data(hdev, opcode)) {
5096 /* Some CSR based controllers generate a spontaneous
5097 * reset complete event during init and any pending
5098 * command will never be completed. In such a case we
5099 * need to resend whatever was the last sent
5102 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5103 hci_resend_last(hdev);
5108 /* If the command succeeded and there's still more commands in
5109 * this request the request is not yet complete.
5111 if (!status && !hci_req_is_complete(hdev))
5114 /* If this was the last command in a request the complete
5115 * callback would be found in hdev->sent_cmd instead of the
5116 * command queue (hdev->cmd_q).
5118 if (hdev->sent_cmd) {
5119 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5122 /* We must set the complete callback to NULL to
5123 * avoid calling the callback more than once if
5124 * this function gets called again.
5126 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5132 /* Remove all pending commands belonging to this request */
5133 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5134 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5135 if (bt_cb(skb)->req.start) {
5136 __skb_queue_head(&hdev->cmd_q, skb);
5140 req_complete = bt_cb(skb)->req.complete;
5143 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5147 req_complete(hdev, status);
5150 static void hci_rx_work(struct work_struct *work)
5152 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5153 struct sk_buff *skb;
5155 BT_DBG("%s", hdev->name);
5157 while ((skb = skb_dequeue(&hdev->rx_q))) {
5158 /* Send copy to monitor */
5159 hci_send_to_monitor(hdev, skb);
5161 if (atomic_read(&hdev->promisc)) {
5162 /* Send copy to the sockets */
5163 hci_send_to_sock(hdev, skb);
5166 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5171 if (test_bit(HCI_INIT, &hdev->flags)) {
5172 /* Don't process data packets in this states. */
5173 switch (bt_cb(skb)->pkt_type) {
5174 case HCI_ACLDATA_PKT:
5175 case HCI_SCODATA_PKT:
5182 switch (bt_cb(skb)->pkt_type) {
5184 BT_DBG("%s Event packet", hdev->name);
5185 hci_event_packet(hdev, skb);
5188 case HCI_ACLDATA_PKT:
5189 BT_DBG("%s ACL data packet", hdev->name);
5190 hci_acldata_packet(hdev, skb);
5193 case HCI_SCODATA_PKT:
5194 BT_DBG("%s SCO data packet", hdev->name);
5195 hci_scodata_packet(hdev, skb);
5205 static void hci_cmd_work(struct work_struct *work)
5207 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5208 struct sk_buff *skb;
5210 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5211 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5213 /* Send queued commands */
5214 if (atomic_read(&hdev->cmd_cnt)) {
5215 skb = skb_dequeue(&hdev->cmd_q);
5219 kfree_skb(hdev->sent_cmd);
5221 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5222 if (hdev->sent_cmd) {
5223 atomic_dec(&hdev->cmd_cnt);
5224 hci_send_frame(hdev, skb);
5225 if (test_bit(HCI_RESET, &hdev->flags))
5226 cancel_delayed_work(&hdev->cmd_timer);
5228 schedule_delayed_work(&hdev->cmd_timer,
5231 skb_queue_head(&hdev->cmd_q, skb);
5232 queue_work(hdev->workqueue, &hdev->cmd_work);
5237 void hci_req_add_le_scan_disable(struct hci_request *req)
5239 struct hci_cp_le_set_scan_enable cp;
5241 memset(&cp, 0, sizeof(cp));
5242 cp.enable = LE_SCAN_DISABLE;
5243 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5246 void hci_req_add_le_passive_scan(struct hci_request *req)
5248 struct hci_cp_le_set_scan_param param_cp;
5249 struct hci_cp_le_set_scan_enable enable_cp;
5250 struct hci_dev *hdev = req->hdev;
5253 /* Set require_privacy to false since no SCAN_REQ are send
5254 * during passive scanning. Not using an unresolvable address
5255 * here is important so that peer devices using direct
5256 * advertising with our address will be correctly reported
5257 * by the controller.
5259 if (hci_update_random_address(req, false, &own_addr_type))
5262 memset(¶m_cp, 0, sizeof(param_cp));
5263 param_cp.type = LE_SCAN_PASSIVE;
5264 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5265 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5266 param_cp.own_address_type = own_addr_type;
5267 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5270 memset(&enable_cp, 0, sizeof(enable_cp));
5271 enable_cp.enable = LE_SCAN_ENABLE;
5272 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5273 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5277 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5280 BT_DBG("HCI request failed to update background scanning: "
5281 "status 0x%2.2x", status);
5284 /* This function controls the background scanning based on hdev->pend_le_conns
5285 * list. If there are pending LE connection we start the background scanning,
5286 * otherwise we stop it.
5288 * This function requires the caller holds hdev->lock.
5290 void hci_update_background_scan(struct hci_dev *hdev)
5292 struct hci_request req;
5293 struct hci_conn *conn;
5296 hci_req_init(&req, hdev);
5298 if (list_empty(&hdev->pend_le_conns)) {
5299 /* If there is no pending LE connections, we should stop
5300 * the background scanning.
5303 /* If controller is not scanning we are done. */
5304 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5307 hci_req_add_le_scan_disable(&req);
5309 BT_DBG("%s stopping background scanning", hdev->name);
5311 /* If there is at least one pending LE connection, we should
5312 * keep the background scan running.
5315 /* If controller is connecting, we should not start scanning
5316 * since some controllers are not able to scan and connect at
5319 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5323 /* If controller is currently scanning, we stop it to ensure we
5324 * don't miss any advertising (due to duplicates filter).
5326 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5327 hci_req_add_le_scan_disable(&req);
5329 hci_req_add_le_passive_scan(&req);
5331 BT_DBG("%s starting background scanning", hdev->name);
5334 err = hci_req_run(&req, update_background_scan_complete);
5336 BT_ERR("Failed to run HCI request: err %d", err);