2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
57 /* ----- HCI requests ----- */
59 #define HCI_REQ_DONE 0
60 #define HCI_REQ_PEND 1
61 #define HCI_REQ_CANCELED 2
63 #define hci_req_lock(d) mutex_lock(&d->req_lock)
64 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
66 /* ---- HCI notifications ---- */
68 static void hci_notify(struct hci_dev *hdev, int event)
70 hci_sock_dev_event(hdev, event);
73 /* ---- HCI debugfs entries ---- */
75 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
78 struct hci_dev *hdev = file->private_data;
81 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
90 struct hci_dev *hdev = file->private_data;
93 size_t buf_size = min(count, (sizeof(buf)-1));
97 if (!test_bit(HCI_UP, &hdev->flags))
100 if (copy_from_user(buf, user_buf, buf_size))
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117 hci_req_unlock(hdev);
122 err = -bt_to_errno(skb->data[0]);
128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
133 static const struct file_operations dut_mode_fops = {
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
140 static int features_show(struct seq_file *f, void *ptr)
142 struct hci_dev *hdev = f->private;
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
161 hci_dev_unlock(hdev);
166 static int features_open(struct inode *inode, struct file *file)
168 return single_open(file, features_show, inode->i_private);
171 static const struct file_operations features_fops = {
172 .open = features_open,
175 .release = single_release,
178 static int blacklist_show(struct seq_file *f, void *p)
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
184 list_for_each_entry(b, &hdev->blacklist, list)
185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
186 hci_dev_unlock(hdev);
191 static int blacklist_open(struct inode *inode, struct file *file)
193 return single_open(file, blacklist_show, inode->i_private);
196 static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
200 .release = single_release,
203 static int whitelist_show(struct seq_file *f, void *p)
205 struct hci_dev *hdev = f->private;
206 struct bdaddr_list *b;
209 list_for_each_entry(b, &hdev->whitelist, list)
210 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
211 hci_dev_unlock(hdev);
216 static int whitelist_open(struct inode *inode, struct file *file)
218 return single_open(file, whitelist_show, inode->i_private);
221 static const struct file_operations whitelist_fops = {
222 .open = whitelist_open,
225 .release = single_release,
228 static int uuids_show(struct seq_file *f, void *p)
230 struct hci_dev *hdev = f->private;
231 struct bt_uuid *uuid;
234 list_for_each_entry(uuid, &hdev->uuids, list) {
237 /* The Bluetooth UUID values are stored in big endian,
238 * but with reversed byte order. So convert them into
239 * the right order for the %pUb modifier.
241 for (i = 0; i < 16; i++)
242 val[i] = uuid->uuid[15 - i];
244 seq_printf(f, "%pUb\n", val);
246 hci_dev_unlock(hdev);
251 static int uuids_open(struct inode *inode, struct file *file)
253 return single_open(file, uuids_show, inode->i_private);
256 static const struct file_operations uuids_fops = {
260 .release = single_release,
263 static int inquiry_cache_show(struct seq_file *f, void *p)
265 struct hci_dev *hdev = f->private;
266 struct discovery_state *cache = &hdev->discovery;
267 struct inquiry_entry *e;
271 list_for_each_entry(e, &cache->all, all) {
272 struct inquiry_data *data = &e->data;
273 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
275 data->pscan_rep_mode, data->pscan_period_mode,
276 data->pscan_mode, data->dev_class[2],
277 data->dev_class[1], data->dev_class[0],
278 __le16_to_cpu(data->clock_offset),
279 data->rssi, data->ssp_mode, e->timestamp);
282 hci_dev_unlock(hdev);
287 static int inquiry_cache_open(struct inode *inode, struct file *file)
289 return single_open(file, inquiry_cache_show, inode->i_private);
292 static const struct file_operations inquiry_cache_fops = {
293 .open = inquiry_cache_open,
296 .release = single_release,
299 static int link_keys_show(struct seq_file *f, void *ptr)
301 struct hci_dev *hdev = f->private;
302 struct list_head *p, *n;
305 list_for_each_safe(p, n, &hdev->link_keys) {
306 struct link_key *key = list_entry(p, struct link_key, list);
307 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
308 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
310 hci_dev_unlock(hdev);
315 static int link_keys_open(struct inode *inode, struct file *file)
317 return single_open(file, link_keys_show, inode->i_private);
320 static const struct file_operations link_keys_fops = {
321 .open = link_keys_open,
324 .release = single_release,
327 static int dev_class_show(struct seq_file *f, void *ptr)
329 struct hci_dev *hdev = f->private;
332 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
333 hdev->dev_class[1], hdev->dev_class[0]);
334 hci_dev_unlock(hdev);
339 static int dev_class_open(struct inode *inode, struct file *file)
341 return single_open(file, dev_class_show, inode->i_private);
344 static const struct file_operations dev_class_fops = {
345 .open = dev_class_open,
348 .release = single_release,
351 static int voice_setting_get(void *data, u64 *val)
353 struct hci_dev *hdev = data;
356 *val = hdev->voice_setting;
357 hci_dev_unlock(hdev);
362 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
363 NULL, "0x%4.4llx\n");
365 static int auto_accept_delay_set(void *data, u64 val)
367 struct hci_dev *hdev = data;
370 hdev->auto_accept_delay = val;
371 hci_dev_unlock(hdev);
376 static int auto_accept_delay_get(void *data, u64 *val)
378 struct hci_dev *hdev = data;
381 *val = hdev->auto_accept_delay;
382 hci_dev_unlock(hdev);
387 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
388 auto_accept_delay_set, "%llu\n");
390 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
391 size_t count, loff_t *ppos)
393 struct hci_dev *hdev = file->private_data;
396 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
399 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
402 static ssize_t force_sc_support_write(struct file *file,
403 const char __user *user_buf,
404 size_t count, loff_t *ppos)
406 struct hci_dev *hdev = file->private_data;
408 size_t buf_size = min(count, (sizeof(buf)-1));
411 if (test_bit(HCI_UP, &hdev->flags))
414 if (copy_from_user(buf, user_buf, buf_size))
417 buf[buf_size] = '\0';
418 if (strtobool(buf, &enable))
421 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
424 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
429 static const struct file_operations force_sc_support_fops = {
431 .read = force_sc_support_read,
432 .write = force_sc_support_write,
433 .llseek = default_llseek,
436 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
437 size_t count, loff_t *ppos)
439 struct hci_dev *hdev = file->private_data;
442 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
445 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
448 static const struct file_operations sc_only_mode_fops = {
450 .read = sc_only_mode_read,
451 .llseek = default_llseek,
454 static int idle_timeout_set(void *data, u64 val)
456 struct hci_dev *hdev = data;
458 if (val != 0 && (val < 500 || val > 3600000))
462 hdev->idle_timeout = val;
463 hci_dev_unlock(hdev);
468 static int idle_timeout_get(void *data, u64 *val)
470 struct hci_dev *hdev = data;
473 *val = hdev->idle_timeout;
474 hci_dev_unlock(hdev);
479 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
480 idle_timeout_set, "%llu\n");
482 static int rpa_timeout_set(void *data, u64 val)
484 struct hci_dev *hdev = data;
486 /* Require the RPA timeout to be at least 30 seconds and at most
489 if (val < 30 || val > (60 * 60 * 24))
493 hdev->rpa_timeout = val;
494 hci_dev_unlock(hdev);
499 static int rpa_timeout_get(void *data, u64 *val)
501 struct hci_dev *hdev = data;
504 *val = hdev->rpa_timeout;
505 hci_dev_unlock(hdev);
510 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
511 rpa_timeout_set, "%llu\n");
513 static int sniff_min_interval_set(void *data, u64 val)
515 struct hci_dev *hdev = data;
517 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
521 hdev->sniff_min_interval = val;
522 hci_dev_unlock(hdev);
527 static int sniff_min_interval_get(void *data, u64 *val)
529 struct hci_dev *hdev = data;
532 *val = hdev->sniff_min_interval;
533 hci_dev_unlock(hdev);
538 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
539 sniff_min_interval_set, "%llu\n");
541 static int sniff_max_interval_set(void *data, u64 val)
543 struct hci_dev *hdev = data;
545 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
549 hdev->sniff_max_interval = val;
550 hci_dev_unlock(hdev);
555 static int sniff_max_interval_get(void *data, u64 *val)
557 struct hci_dev *hdev = data;
560 *val = hdev->sniff_max_interval;
561 hci_dev_unlock(hdev);
566 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
567 sniff_max_interval_set, "%llu\n");
569 static int conn_info_min_age_set(void *data, u64 val)
571 struct hci_dev *hdev = data;
573 if (val == 0 || val > hdev->conn_info_max_age)
577 hdev->conn_info_min_age = val;
578 hci_dev_unlock(hdev);
583 static int conn_info_min_age_get(void *data, u64 *val)
585 struct hci_dev *hdev = data;
588 *val = hdev->conn_info_min_age;
589 hci_dev_unlock(hdev);
594 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
595 conn_info_min_age_set, "%llu\n");
597 static int conn_info_max_age_set(void *data, u64 val)
599 struct hci_dev *hdev = data;
601 if (val == 0 || val < hdev->conn_info_min_age)
605 hdev->conn_info_max_age = val;
606 hci_dev_unlock(hdev);
611 static int conn_info_max_age_get(void *data, u64 *val)
613 struct hci_dev *hdev = data;
616 *val = hdev->conn_info_max_age;
617 hci_dev_unlock(hdev);
622 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
623 conn_info_max_age_set, "%llu\n");
625 static int identity_show(struct seq_file *f, void *p)
627 struct hci_dev *hdev = f->private;
633 hci_copy_identity_address(hdev, &addr, &addr_type);
635 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
636 16, hdev->irk, &hdev->rpa);
638 hci_dev_unlock(hdev);
643 static int identity_open(struct inode *inode, struct file *file)
645 return single_open(file, identity_show, inode->i_private);
648 static const struct file_operations identity_fops = {
649 .open = identity_open,
652 .release = single_release,
655 static int random_address_show(struct seq_file *f, void *p)
657 struct hci_dev *hdev = f->private;
660 seq_printf(f, "%pMR\n", &hdev->random_addr);
661 hci_dev_unlock(hdev);
666 static int random_address_open(struct inode *inode, struct file *file)
668 return single_open(file, random_address_show, inode->i_private);
671 static const struct file_operations random_address_fops = {
672 .open = random_address_open,
675 .release = single_release,
678 static int static_address_show(struct seq_file *f, void *p)
680 struct hci_dev *hdev = f->private;
683 seq_printf(f, "%pMR\n", &hdev->static_addr);
684 hci_dev_unlock(hdev);
689 static int static_address_open(struct inode *inode, struct file *file)
691 return single_open(file, static_address_show, inode->i_private);
694 static const struct file_operations static_address_fops = {
695 .open = static_address_open,
698 .release = single_release,
701 static ssize_t force_static_address_read(struct file *file,
702 char __user *user_buf,
703 size_t count, loff_t *ppos)
705 struct hci_dev *hdev = file->private_data;
708 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
711 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
714 static ssize_t force_static_address_write(struct file *file,
715 const char __user *user_buf,
716 size_t count, loff_t *ppos)
718 struct hci_dev *hdev = file->private_data;
720 size_t buf_size = min(count, (sizeof(buf)-1));
723 if (test_bit(HCI_UP, &hdev->flags))
726 if (copy_from_user(buf, user_buf, buf_size))
729 buf[buf_size] = '\0';
730 if (strtobool(buf, &enable))
733 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
736 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
741 static const struct file_operations force_static_address_fops = {
743 .read = force_static_address_read,
744 .write = force_static_address_write,
745 .llseek = default_llseek,
748 static int white_list_show(struct seq_file *f, void *ptr)
750 struct hci_dev *hdev = f->private;
751 struct bdaddr_list *b;
754 list_for_each_entry(b, &hdev->le_white_list, list)
755 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
756 hci_dev_unlock(hdev);
761 static int white_list_open(struct inode *inode, struct file *file)
763 return single_open(file, white_list_show, inode->i_private);
766 static const struct file_operations white_list_fops = {
767 .open = white_list_open,
770 .release = single_release,
773 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
775 struct hci_dev *hdev = f->private;
776 struct list_head *p, *n;
779 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
780 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
781 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
782 &irk->bdaddr, irk->addr_type,
783 16, irk->val, &irk->rpa);
785 hci_dev_unlock(hdev);
790 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
792 return single_open(file, identity_resolving_keys_show,
796 static const struct file_operations identity_resolving_keys_fops = {
797 .open = identity_resolving_keys_open,
800 .release = single_release,
803 static int long_term_keys_show(struct seq_file *f, void *ptr)
805 struct hci_dev *hdev = f->private;
806 struct list_head *p, *n;
809 list_for_each_safe(p, n, &hdev->long_term_keys) {
810 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
811 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
812 <k->bdaddr, ltk->bdaddr_type, ltk->authenticated,
813 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
814 __le64_to_cpu(ltk->rand), 16, ltk->val);
816 hci_dev_unlock(hdev);
821 static int long_term_keys_open(struct inode *inode, struct file *file)
823 return single_open(file, long_term_keys_show, inode->i_private);
826 static const struct file_operations long_term_keys_fops = {
827 .open = long_term_keys_open,
830 .release = single_release,
833 static int conn_min_interval_set(void *data, u64 val)
835 struct hci_dev *hdev = data;
837 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
841 hdev->le_conn_min_interval = val;
842 hci_dev_unlock(hdev);
847 static int conn_min_interval_get(void *data, u64 *val)
849 struct hci_dev *hdev = data;
852 *val = hdev->le_conn_min_interval;
853 hci_dev_unlock(hdev);
858 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
859 conn_min_interval_set, "%llu\n");
861 static int conn_max_interval_set(void *data, u64 val)
863 struct hci_dev *hdev = data;
865 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
869 hdev->le_conn_max_interval = val;
870 hci_dev_unlock(hdev);
875 static int conn_max_interval_get(void *data, u64 *val)
877 struct hci_dev *hdev = data;
880 *val = hdev->le_conn_max_interval;
881 hci_dev_unlock(hdev);
886 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
887 conn_max_interval_set, "%llu\n");
889 static int conn_latency_set(void *data, u64 val)
891 struct hci_dev *hdev = data;
897 hdev->le_conn_latency = val;
898 hci_dev_unlock(hdev);
903 static int conn_latency_get(void *data, u64 *val)
905 struct hci_dev *hdev = data;
908 *val = hdev->le_conn_latency;
909 hci_dev_unlock(hdev);
914 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
915 conn_latency_set, "%llu\n");
917 static int supervision_timeout_set(void *data, u64 val)
919 struct hci_dev *hdev = data;
921 if (val < 0x000a || val > 0x0c80)
925 hdev->le_supv_timeout = val;
926 hci_dev_unlock(hdev);
931 static int supervision_timeout_get(void *data, u64 *val)
933 struct hci_dev *hdev = data;
936 *val = hdev->le_supv_timeout;
937 hci_dev_unlock(hdev);
942 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
943 supervision_timeout_set, "%llu\n");
945 static int adv_channel_map_set(void *data, u64 val)
947 struct hci_dev *hdev = data;
949 if (val < 0x01 || val > 0x07)
953 hdev->le_adv_channel_map = val;
954 hci_dev_unlock(hdev);
959 static int adv_channel_map_get(void *data, u64 *val)
961 struct hci_dev *hdev = data;
964 *val = hdev->le_adv_channel_map;
965 hci_dev_unlock(hdev);
970 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
971 adv_channel_map_set, "%llu\n");
973 static int adv_min_interval_set(void *data, u64 val)
975 struct hci_dev *hdev = data;
977 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
981 hdev->le_adv_min_interval = val;
982 hci_dev_unlock(hdev);
987 static int adv_min_interval_get(void *data, u64 *val)
989 struct hci_dev *hdev = data;
992 *val = hdev->le_adv_min_interval;
993 hci_dev_unlock(hdev);
998 DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
999 adv_min_interval_set, "%llu\n");
1001 static int adv_max_interval_set(void *data, u64 val)
1003 struct hci_dev *hdev = data;
1005 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
1009 hdev->le_adv_max_interval = val;
1010 hci_dev_unlock(hdev);
1015 static int adv_max_interval_get(void *data, u64 *val)
1017 struct hci_dev *hdev = data;
1020 *val = hdev->le_adv_max_interval;
1021 hci_dev_unlock(hdev);
1026 DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1027 adv_max_interval_set, "%llu\n");
1029 static int device_list_show(struct seq_file *f, void *ptr)
1031 struct hci_dev *hdev = f->private;
1032 struct hci_conn_params *p;
1035 list_for_each_entry(p, &hdev->le_conn_params, list) {
1036 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
1039 hci_dev_unlock(hdev);
1044 static int device_list_open(struct inode *inode, struct file *file)
1046 return single_open(file, device_list_show, inode->i_private);
1049 static const struct file_operations device_list_fops = {
1050 .open = device_list_open,
1052 .llseek = seq_lseek,
1053 .release = single_release,
1056 /* ---- HCI requests ---- */
1058 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1060 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1062 if (hdev->req_status == HCI_REQ_PEND) {
1063 hdev->req_result = result;
1064 hdev->req_status = HCI_REQ_DONE;
1065 wake_up_interruptible(&hdev->req_wait_q);
1069 static void hci_req_cancel(struct hci_dev *hdev, int err)
1071 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1073 if (hdev->req_status == HCI_REQ_PEND) {
1074 hdev->req_result = err;
1075 hdev->req_status = HCI_REQ_CANCELED;
1076 wake_up_interruptible(&hdev->req_wait_q);
1080 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1083 struct hci_ev_cmd_complete *ev;
1084 struct hci_event_hdr *hdr;
1085 struct sk_buff *skb;
1089 skb = hdev->recv_evt;
1090 hdev->recv_evt = NULL;
1092 hci_dev_unlock(hdev);
1095 return ERR_PTR(-ENODATA);
1097 if (skb->len < sizeof(*hdr)) {
1098 BT_ERR("Too short HCI event");
1102 hdr = (void *) skb->data;
1103 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1106 if (hdr->evt != event)
1111 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1112 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1116 if (skb->len < sizeof(*ev)) {
1117 BT_ERR("Too short cmd_complete event");
1121 ev = (void *) skb->data;
1122 skb_pull(skb, sizeof(*ev));
1124 if (opcode == __le16_to_cpu(ev->opcode))
1127 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1128 __le16_to_cpu(ev->opcode));
1132 return ERR_PTR(-ENODATA);
1135 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1136 const void *param, u8 event, u32 timeout)
1138 DECLARE_WAITQUEUE(wait, current);
1139 struct hci_request req;
1142 BT_DBG("%s", hdev->name);
1144 hci_req_init(&req, hdev);
1146 hci_req_add_ev(&req, opcode, plen, param, event);
1148 hdev->req_status = HCI_REQ_PEND;
1150 add_wait_queue(&hdev->req_wait_q, &wait);
1151 set_current_state(TASK_INTERRUPTIBLE);
1153 err = hci_req_run(&req, hci_req_sync_complete);
1155 remove_wait_queue(&hdev->req_wait_q, &wait);
1156 return ERR_PTR(err);
1159 schedule_timeout(timeout);
1161 remove_wait_queue(&hdev->req_wait_q, &wait);
1163 if (signal_pending(current))
1164 return ERR_PTR(-EINTR);
1166 switch (hdev->req_status) {
1168 err = -bt_to_errno(hdev->req_result);
1171 case HCI_REQ_CANCELED:
1172 err = -hdev->req_result;
1180 hdev->req_status = hdev->req_result = 0;
1182 BT_DBG("%s end: err %d", hdev->name, err);
1185 return ERR_PTR(err);
1187 return hci_get_cmd_complete(hdev, opcode, event);
1189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1192 const void *param, u32 timeout)
1194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1196 EXPORT_SYMBOL(__hci_cmd_sync);
1198 /* Execute request and wait for completion. */
1199 static int __hci_req_sync(struct hci_dev *hdev,
1200 void (*func)(struct hci_request *req,
1202 unsigned long opt, __u32 timeout)
1204 struct hci_request req;
1205 DECLARE_WAITQUEUE(wait, current);
1208 BT_DBG("%s start", hdev->name);
1210 hci_req_init(&req, hdev);
1212 hdev->req_status = HCI_REQ_PEND;
1216 add_wait_queue(&hdev->req_wait_q, &wait);
1217 set_current_state(TASK_INTERRUPTIBLE);
1219 err = hci_req_run(&req, hci_req_sync_complete);
1221 hdev->req_status = 0;
1223 remove_wait_queue(&hdev->req_wait_q, &wait);
1225 /* ENODATA means the HCI request command queue is empty.
1226 * This can happen when a request with conditionals doesn't
1227 * trigger any commands to be sent. This is normal behavior
1228 * and should not trigger an error return.
1230 if (err == -ENODATA)
1236 schedule_timeout(timeout);
1238 remove_wait_queue(&hdev->req_wait_q, &wait);
1240 if (signal_pending(current))
1243 switch (hdev->req_status) {
1245 err = -bt_to_errno(hdev->req_result);
1248 case HCI_REQ_CANCELED:
1249 err = -hdev->req_result;
1257 hdev->req_status = hdev->req_result = 0;
1259 BT_DBG("%s end: err %d", hdev->name, err);
1264 static int hci_req_sync(struct hci_dev *hdev,
1265 void (*req)(struct hci_request *req,
1267 unsigned long opt, __u32 timeout)
1271 if (!test_bit(HCI_UP, &hdev->flags))
1274 /* Serialize all requests */
1276 ret = __hci_req_sync(hdev, req, opt, timeout);
1277 hci_req_unlock(hdev);
1282 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1284 BT_DBG("%s %ld", req->hdev->name, opt);
1287 set_bit(HCI_RESET, &req->hdev->flags);
1288 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1291 static void bredr_init(struct hci_request *req)
1293 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1295 /* Read Local Supported Features */
1296 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1298 /* Read Local Version */
1299 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1301 /* Read BD Address */
1302 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1305 static void amp_init(struct hci_request *req)
1307 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1309 /* Read Local Version */
1310 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1312 /* Read Local Supported Commands */
1313 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1315 /* Read Local Supported Features */
1316 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1318 /* Read Local AMP Info */
1319 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1321 /* Read Data Blk size */
1322 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1324 /* Read Flow Control Mode */
1325 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1327 /* Read Location Data */
1328 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1331 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1333 struct hci_dev *hdev = req->hdev;
1335 BT_DBG("%s %ld", hdev->name, opt);
1338 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1339 hci_reset_req(req, 0);
1341 switch (hdev->dev_type) {
1351 BT_ERR("Unknown device type %d", hdev->dev_type);
1356 static void bredr_setup(struct hci_request *req)
1358 struct hci_dev *hdev = req->hdev;
1363 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1364 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1366 /* Read Class of Device */
1367 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1369 /* Read Local Name */
1370 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1372 /* Read Voice Setting */
1373 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1375 /* Read Number of Supported IAC */
1376 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1378 /* Read Current IAC LAP */
1379 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1381 /* Clear Event Filters */
1382 flt_type = HCI_FLT_CLEAR_ALL;
1383 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1385 /* Connection accept timeout ~20 secs */
1386 param = cpu_to_le16(0x7d00);
1387 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
1389 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1390 * but it does not support page scan related HCI commands.
1392 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1393 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1394 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1398 static void le_setup(struct hci_request *req)
1400 struct hci_dev *hdev = req->hdev;
1402 /* Read LE Buffer Size */
1403 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1405 /* Read LE Local Supported Features */
1406 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1408 /* Read LE Supported States */
1409 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1411 /* Read LE White List Size */
1412 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1414 /* Clear LE White List */
1415 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1417 /* LE-only controllers have LE implicitly enabled */
1418 if (!lmp_bredr_capable(hdev))
1419 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1422 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1424 if (lmp_ext_inq_capable(hdev))
1427 if (lmp_inq_rssi_capable(hdev))
1430 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1431 hdev->lmp_subver == 0x0757)
1434 if (hdev->manufacturer == 15) {
1435 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1437 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1439 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1443 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1444 hdev->lmp_subver == 0x1805)
1450 static void hci_setup_inquiry_mode(struct hci_request *req)
1454 mode = hci_get_inquiry_mode(req->hdev);
1456 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1459 static void hci_setup_event_mask(struct hci_request *req)
1461 struct hci_dev *hdev = req->hdev;
1463 /* The second byte is 0xff instead of 0x9f (two reserved bits
1464 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1465 * command otherwise.
1467 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1469 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1470 * any event mask for pre 1.2 devices.
1472 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1475 if (lmp_bredr_capable(hdev)) {
1476 events[4] |= 0x01; /* Flow Specification Complete */
1477 events[4] |= 0x02; /* Inquiry Result with RSSI */
1478 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1479 events[5] |= 0x08; /* Synchronous Connection Complete */
1480 events[5] |= 0x10; /* Synchronous Connection Changed */
1482 /* Use a different default for LE-only devices */
1483 memset(events, 0, sizeof(events));
1484 events[0] |= 0x10; /* Disconnection Complete */
1485 events[1] |= 0x08; /* Read Remote Version Information Complete */
1486 events[1] |= 0x20; /* Command Complete */
1487 events[1] |= 0x40; /* Command Status */
1488 events[1] |= 0x80; /* Hardware Error */
1489 events[2] |= 0x04; /* Number of Completed Packets */
1490 events[3] |= 0x02; /* Data Buffer Overflow */
1492 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1493 events[0] |= 0x80; /* Encryption Change */
1494 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1498 if (lmp_inq_rssi_capable(hdev))
1499 events[4] |= 0x02; /* Inquiry Result with RSSI */
1501 if (lmp_sniffsubr_capable(hdev))
1502 events[5] |= 0x20; /* Sniff Subrating */
1504 if (lmp_pause_enc_capable(hdev))
1505 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1507 if (lmp_ext_inq_capable(hdev))
1508 events[5] |= 0x40; /* Extended Inquiry Result */
1510 if (lmp_no_flush_capable(hdev))
1511 events[7] |= 0x01; /* Enhanced Flush Complete */
1513 if (lmp_lsto_capable(hdev))
1514 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1516 if (lmp_ssp_capable(hdev)) {
1517 events[6] |= 0x01; /* IO Capability Request */
1518 events[6] |= 0x02; /* IO Capability Response */
1519 events[6] |= 0x04; /* User Confirmation Request */
1520 events[6] |= 0x08; /* User Passkey Request */
1521 events[6] |= 0x10; /* Remote OOB Data Request */
1522 events[6] |= 0x20; /* Simple Pairing Complete */
1523 events[7] |= 0x04; /* User Passkey Notification */
1524 events[7] |= 0x08; /* Keypress Notification */
1525 events[7] |= 0x10; /* Remote Host Supported
1526 * Features Notification
1530 if (lmp_le_capable(hdev))
1531 events[7] |= 0x20; /* LE Meta-Event */
1533 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1536 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1538 struct hci_dev *hdev = req->hdev;
1540 if (lmp_bredr_capable(hdev))
1543 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1545 if (lmp_le_capable(hdev))
1548 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1549 * local supported commands HCI command.
1551 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1552 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1554 if (lmp_ssp_capable(hdev)) {
1555 /* When SSP is available, then the host features page
1556 * should also be available as well. However some
1557 * controllers list the max_page as 0 as long as SSP
1558 * has not been enabled. To achieve proper debugging
1559 * output, force the minimum max_page to 1 at least.
1561 hdev->max_page = 0x01;
1563 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1565 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1566 sizeof(mode), &mode);
1568 struct hci_cp_write_eir cp;
1570 memset(hdev->eir, 0, sizeof(hdev->eir));
1571 memset(&cp, 0, sizeof(cp));
1573 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1577 if (lmp_inq_rssi_capable(hdev))
1578 hci_setup_inquiry_mode(req);
1580 if (lmp_inq_tx_pwr_capable(hdev))
1581 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1583 if (lmp_ext_feat_capable(hdev)) {
1584 struct hci_cp_read_local_ext_features cp;
1587 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1591 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1593 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1598 static void hci_setup_link_policy(struct hci_request *req)
1600 struct hci_dev *hdev = req->hdev;
1601 struct hci_cp_write_def_link_policy cp;
1602 u16 link_policy = 0;
1604 if (lmp_rswitch_capable(hdev))
1605 link_policy |= HCI_LP_RSWITCH;
1606 if (lmp_hold_capable(hdev))
1607 link_policy |= HCI_LP_HOLD;
1608 if (lmp_sniff_capable(hdev))
1609 link_policy |= HCI_LP_SNIFF;
1610 if (lmp_park_capable(hdev))
1611 link_policy |= HCI_LP_PARK;
1613 cp.policy = cpu_to_le16(link_policy);
1614 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1617 static void hci_set_le_support(struct hci_request *req)
1619 struct hci_dev *hdev = req->hdev;
1620 struct hci_cp_write_le_host_supported cp;
1622 /* LE-only devices do not support explicit enablement */
1623 if (!lmp_bredr_capable(hdev))
1626 memset(&cp, 0, sizeof(cp));
1628 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1633 if (cp.le != lmp_host_le_capable(hdev))
1634 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1638 static void hci_set_event_mask_page_2(struct hci_request *req)
1640 struct hci_dev *hdev = req->hdev;
1641 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1643 /* If Connectionless Slave Broadcast master role is supported
1644 * enable all necessary events for it.
1646 if (lmp_csb_master_capable(hdev)) {
1647 events[1] |= 0x40; /* Triggered Clock Capture */
1648 events[1] |= 0x80; /* Synchronization Train Complete */
1649 events[2] |= 0x10; /* Slave Page Response Timeout */
1650 events[2] |= 0x20; /* CSB Channel Map Change */
1653 /* If Connectionless Slave Broadcast slave role is supported
1654 * enable all necessary events for it.
1656 if (lmp_csb_slave_capable(hdev)) {
1657 events[2] |= 0x01; /* Synchronization Train Received */
1658 events[2] |= 0x02; /* CSB Receive */
1659 events[2] |= 0x04; /* CSB Timeout */
1660 events[2] |= 0x08; /* Truncated Page Complete */
1663 /* Enable Authenticated Payload Timeout Expired event if supported */
1664 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1667 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1670 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1672 struct hci_dev *hdev = req->hdev;
1675 hci_setup_event_mask(req);
1677 /* Some Broadcom based Bluetooth controllers do not support the
1678 * Delete Stored Link Key command. They are clearly indicating its
1679 * absence in the bit mask of supported commands.
1681 * Check the supported commands and only if the the command is marked
1682 * as supported send it. If not supported assume that the controller
1683 * does not have actual support for stored link keys which makes this
1684 * command redundant anyway.
1686 * Some controllers indicate that they support handling deleting
1687 * stored link keys, but they don't. The quirk lets a driver
1688 * just disable this command.
1690 if (hdev->commands[6] & 0x80 &&
1691 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1692 struct hci_cp_delete_stored_link_key cp;
1694 bacpy(&cp.bdaddr, BDADDR_ANY);
1695 cp.delete_all = 0x01;
1696 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1700 if (hdev->commands[5] & 0x10)
1701 hci_setup_link_policy(req);
1703 if (lmp_le_capable(hdev)) {
1706 memset(events, 0, sizeof(events));
1709 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1710 events[0] |= 0x10; /* LE Long Term Key Request */
1712 /* If controller supports the Connection Parameters Request
1713 * Link Layer Procedure, enable the corresponding event.
1715 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1716 events[0] |= 0x20; /* LE Remote Connection
1720 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1723 if (hdev->commands[25] & 0x40) {
1724 /* Read LE Advertising Channel TX Power */
1725 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1728 hci_set_le_support(req);
1731 /* Read features beyond page 1 if available */
1732 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1733 struct hci_cp_read_local_ext_features cp;
1736 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1741 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1743 struct hci_dev *hdev = req->hdev;
1745 /* Set event mask page 2 if the HCI command for it is supported */
1746 if (hdev->commands[22] & 0x04)
1747 hci_set_event_mask_page_2(req);
1749 /* Read local codec list if the HCI command is supported */
1750 if (hdev->commands[29] & 0x20)
1751 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1753 /* Get MWS transport configuration if the HCI command is supported */
1754 if (hdev->commands[30] & 0x08)
1755 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1757 /* Check for Synchronization Train support */
1758 if (lmp_sync_train_capable(hdev))
1759 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1761 /* Enable Secure Connections if supported and configured */
1762 if ((lmp_sc_capable(hdev) ||
1763 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1764 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1766 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1767 sizeof(support), &support);
1771 static int __hci_init(struct hci_dev *hdev)
1775 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1779 /* The Device Under Test (DUT) mode is special and available for
1780 * all controller types. So just create it early on.
1782 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1783 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1787 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1788 * BR/EDR/LE type controllers. AMP controllers only need the
1791 if (hdev->dev_type != HCI_BREDR)
1794 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1798 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1802 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1806 /* Only create debugfs entries during the initial setup
1807 * phase and not every time the controller gets powered on.
1809 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1812 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1814 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1815 &hdev->manufacturer);
1816 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1817 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1818 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1820 debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1822 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1824 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1825 &conn_info_min_age_fops);
1826 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1827 &conn_info_max_age_fops);
1829 if (lmp_bredr_capable(hdev)) {
1830 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1831 hdev, &inquiry_cache_fops);
1832 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1833 hdev, &link_keys_fops);
1834 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1835 hdev, &dev_class_fops);
1836 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1837 hdev, &voice_setting_fops);
1840 if (lmp_ssp_capable(hdev)) {
1841 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1842 hdev, &auto_accept_delay_fops);
1843 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1844 hdev, &force_sc_support_fops);
1845 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1846 hdev, &sc_only_mode_fops);
1849 if (lmp_sniff_capable(hdev)) {
1850 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1851 hdev, &idle_timeout_fops);
1852 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1853 hdev, &sniff_min_interval_fops);
1854 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1855 hdev, &sniff_max_interval_fops);
1858 if (lmp_le_capable(hdev)) {
1859 debugfs_create_file("identity", 0400, hdev->debugfs,
1860 hdev, &identity_fops);
1861 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1862 hdev, &rpa_timeout_fops);
1863 debugfs_create_file("random_address", 0444, hdev->debugfs,
1864 hdev, &random_address_fops);
1865 debugfs_create_file("static_address", 0444, hdev->debugfs,
1866 hdev, &static_address_fops);
1868 /* For controllers with a public address, provide a debug
1869 * option to force the usage of the configured static
1870 * address. By default the public address is used.
1872 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1873 debugfs_create_file("force_static_address", 0644,
1874 hdev->debugfs, hdev,
1875 &force_static_address_fops);
1877 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1878 &hdev->le_white_list_size);
1879 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1881 debugfs_create_file("identity_resolving_keys", 0400,
1882 hdev->debugfs, hdev,
1883 &identity_resolving_keys_fops);
1884 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1885 hdev, &long_term_keys_fops);
1886 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1887 hdev, &conn_min_interval_fops);
1888 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1889 hdev, &conn_max_interval_fops);
1890 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1891 hdev, &conn_latency_fops);
1892 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1893 hdev, &supervision_timeout_fops);
1894 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1895 hdev, &adv_channel_map_fops);
1896 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1897 hdev, &adv_min_interval_fops);
1898 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1899 hdev, &adv_max_interval_fops);
1900 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1902 debugfs_create_u16("discov_interleaved_timeout", 0644,
1904 &hdev->discov_interleaved_timeout);
1912 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1914 struct hci_dev *hdev = req->hdev;
1916 BT_DBG("%s %ld", hdev->name, opt);
1919 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1920 hci_reset_req(req, 0);
1922 /* Read Local Version */
1923 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1925 /* Read BD Address */
1926 if (hdev->set_bdaddr)
1927 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1930 static int __hci_unconf_init(struct hci_dev *hdev)
1934 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1937 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1944 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1948 BT_DBG("%s %x", req->hdev->name, scan);
1950 /* Inquiry and Page scans */
1951 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1954 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1958 BT_DBG("%s %x", req->hdev->name, auth);
1960 /* Authentication */
1961 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1964 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1968 BT_DBG("%s %x", req->hdev->name, encrypt);
1971 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1974 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1976 __le16 policy = cpu_to_le16(opt);
1978 BT_DBG("%s %x", req->hdev->name, policy);
1980 /* Default link policy */
1981 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1984 /* Get HCI device by index.
1985 * Device is held on return. */
1986 struct hci_dev *hci_dev_get(int index)
1988 struct hci_dev *hdev = NULL, *d;
1990 BT_DBG("%d", index);
1995 read_lock(&hci_dev_list_lock);
1996 list_for_each_entry(d, &hci_dev_list, list) {
1997 if (d->id == index) {
1998 hdev = hci_dev_hold(d);
2002 read_unlock(&hci_dev_list_lock);
2006 /* ---- Inquiry support ---- */
2008 bool hci_discovery_active(struct hci_dev *hdev)
2010 struct discovery_state *discov = &hdev->discovery;
2012 switch (discov->state) {
2013 case DISCOVERY_FINDING:
2014 case DISCOVERY_RESOLVING:
2022 void hci_discovery_set_state(struct hci_dev *hdev, int state)
2024 int old_state = hdev->discovery.state;
2026 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2028 if (old_state == state)
2031 hdev->discovery.state = state;
2034 case DISCOVERY_STOPPED:
2035 hci_update_background_scan(hdev);
2037 if (old_state != DISCOVERY_STARTING)
2038 mgmt_discovering(hdev, 0);
2040 case DISCOVERY_STARTING:
2042 case DISCOVERY_FINDING:
2043 mgmt_discovering(hdev, 1);
2045 case DISCOVERY_RESOLVING:
2047 case DISCOVERY_STOPPING:
2052 void hci_inquiry_cache_flush(struct hci_dev *hdev)
2054 struct discovery_state *cache = &hdev->discovery;
2055 struct inquiry_entry *p, *n;
2057 list_for_each_entry_safe(p, n, &cache->all, all) {
2062 INIT_LIST_HEAD(&cache->unknown);
2063 INIT_LIST_HEAD(&cache->resolve);
2066 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2069 struct discovery_state *cache = &hdev->discovery;
2070 struct inquiry_entry *e;
2072 BT_DBG("cache %p, %pMR", cache, bdaddr);
2074 list_for_each_entry(e, &cache->all, all) {
2075 if (!bacmp(&e->data.bdaddr, bdaddr))
2082 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2085 struct discovery_state *cache = &hdev->discovery;
2086 struct inquiry_entry *e;
2088 BT_DBG("cache %p, %pMR", cache, bdaddr);
2090 list_for_each_entry(e, &cache->unknown, list) {
2091 if (!bacmp(&e->data.bdaddr, bdaddr))
2098 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2102 struct discovery_state *cache = &hdev->discovery;
2103 struct inquiry_entry *e;
2105 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2107 list_for_each_entry(e, &cache->resolve, list) {
2108 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2110 if (!bacmp(&e->data.bdaddr, bdaddr))
2117 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2118 struct inquiry_entry *ie)
2120 struct discovery_state *cache = &hdev->discovery;
2121 struct list_head *pos = &cache->resolve;
2122 struct inquiry_entry *p;
2124 list_del(&ie->list);
2126 list_for_each_entry(p, &cache->resolve, list) {
2127 if (p->name_state != NAME_PENDING &&
2128 abs(p->data.rssi) >= abs(ie->data.rssi))
2133 list_add(&ie->list, pos);
2136 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2139 struct discovery_state *cache = &hdev->discovery;
2140 struct inquiry_entry *ie;
2143 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2145 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2147 if (!data->ssp_mode)
2148 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2150 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2152 if (!ie->data.ssp_mode)
2153 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2155 if (ie->name_state == NAME_NEEDED &&
2156 data->rssi != ie->data.rssi) {
2157 ie->data.rssi = data->rssi;
2158 hci_inquiry_cache_update_resolve(hdev, ie);
2164 /* Entry not in the cache. Add new one. */
2165 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
2167 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2171 list_add(&ie->all, &cache->all);
2174 ie->name_state = NAME_KNOWN;
2176 ie->name_state = NAME_NOT_KNOWN;
2177 list_add(&ie->list, &cache->unknown);
2181 if (name_known && ie->name_state != NAME_KNOWN &&
2182 ie->name_state != NAME_PENDING) {
2183 ie->name_state = NAME_KNOWN;
2184 list_del(&ie->list);
2187 memcpy(&ie->data, data, sizeof(*data));
2188 ie->timestamp = jiffies;
2189 cache->timestamp = jiffies;
2191 if (ie->name_state == NAME_NOT_KNOWN)
2192 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2198 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2200 struct discovery_state *cache = &hdev->discovery;
2201 struct inquiry_info *info = (struct inquiry_info *) buf;
2202 struct inquiry_entry *e;
2205 list_for_each_entry(e, &cache->all, all) {
2206 struct inquiry_data *data = &e->data;
2211 bacpy(&info->bdaddr, &data->bdaddr);
2212 info->pscan_rep_mode = data->pscan_rep_mode;
2213 info->pscan_period_mode = data->pscan_period_mode;
2214 info->pscan_mode = data->pscan_mode;
2215 memcpy(info->dev_class, data->dev_class, 3);
2216 info->clock_offset = data->clock_offset;
2222 BT_DBG("cache %p, copied %d", cache, copied);
2226 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2228 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2229 struct hci_dev *hdev = req->hdev;
2230 struct hci_cp_inquiry cp;
2232 BT_DBG("%s", hdev->name);
2234 if (test_bit(HCI_INQUIRY, &hdev->flags))
2238 memcpy(&cp.lap, &ir->lap, 3);
2239 cp.length = ir->length;
2240 cp.num_rsp = ir->num_rsp;
2241 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2244 int hci_inquiry(void __user *arg)
2246 __u8 __user *ptr = arg;
2247 struct hci_inquiry_req ir;
2248 struct hci_dev *hdev;
2249 int err = 0, do_inquiry = 0, max_rsp;
2253 if (copy_from_user(&ir, ptr, sizeof(ir)))
2256 hdev = hci_dev_get(ir.dev_id);
2260 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2265 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2270 if (hdev->dev_type != HCI_BREDR) {
2275 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2281 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2282 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2283 hci_inquiry_cache_flush(hdev);
2286 hci_dev_unlock(hdev);
2288 timeo = ir.length * msecs_to_jiffies(2000);
2291 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2296 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2297 * cleared). If it is interrupted by a signal, return -EINTR.
2299 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
2300 TASK_INTERRUPTIBLE))
2304 /* for unlimited number of responses we will use buffer with
2307 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2309 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2310 * copy it to the user space.
2312 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2319 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2320 hci_dev_unlock(hdev);
2322 BT_DBG("num_rsp %d", ir.num_rsp);
2324 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2326 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2339 static int hci_dev_do_open(struct hci_dev *hdev)
2343 BT_DBG("%s %p", hdev->name, hdev);
2347 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2352 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2353 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2354 /* Check for rfkill but allow the HCI setup stage to
2355 * proceed (which in itself doesn't cause any RF activity).
2357 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2362 /* Check for valid public address or a configured static
2363 * random adddress, but let the HCI setup proceed to
2364 * be able to determine if there is a public address
2367 * In case of user channel usage, it is not important
2368 * if a public address or static random address is
2371 * This check is only valid for BR/EDR controllers
2372 * since AMP controllers do not have an address.
2374 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2375 hdev->dev_type == HCI_BREDR &&
2376 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2377 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2378 ret = -EADDRNOTAVAIL;
2383 if (test_bit(HCI_UP, &hdev->flags)) {
2388 if (hdev->open(hdev)) {
2393 atomic_set(&hdev->cmd_cnt, 1);
2394 set_bit(HCI_INIT, &hdev->flags);
2396 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2398 ret = hdev->setup(hdev);
2400 /* The transport driver can set these quirks before
2401 * creating the HCI device or in its setup callback.
2403 * In case any of them is set, the controller has to
2404 * start up as unconfigured.
2406 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2407 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2408 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2410 /* For an unconfigured controller it is required to
2411 * read at least the version information provided by
2412 * the Read Local Version Information command.
2414 * If the set_bdaddr driver callback is provided, then
2415 * also the original Bluetooth public device address
2416 * will be read using the Read BD Address command.
2418 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2419 ret = __hci_unconf_init(hdev);
2422 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2423 /* If public address change is configured, ensure that
2424 * the address gets programmed. If the driver does not
2425 * support changing the public address, fail the power
2428 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2430 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2432 ret = -EADDRNOTAVAIL;
2436 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2437 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2438 ret = __hci_init(hdev);
2441 clear_bit(HCI_INIT, &hdev->flags);
2445 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2446 set_bit(HCI_UP, &hdev->flags);
2447 hci_notify(hdev, HCI_DEV_UP);
2448 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2449 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2450 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2451 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2452 hdev->dev_type == HCI_BREDR) {
2454 mgmt_powered(hdev, 1);
2455 hci_dev_unlock(hdev);
2458 /* Init failed, cleanup */
2459 flush_work(&hdev->tx_work);
2460 flush_work(&hdev->cmd_work);
2461 flush_work(&hdev->rx_work);
2463 skb_queue_purge(&hdev->cmd_q);
2464 skb_queue_purge(&hdev->rx_q);
2469 if (hdev->sent_cmd) {
2470 kfree_skb(hdev->sent_cmd);
2471 hdev->sent_cmd = NULL;
2475 hdev->flags &= BIT(HCI_RAW);
2479 hci_req_unlock(hdev);
2483 /* ---- HCI ioctl helpers ---- */
2485 int hci_dev_open(__u16 dev)
2487 struct hci_dev *hdev;
2490 hdev = hci_dev_get(dev);
2494 /* Devices that are marked as unconfigured can only be powered
2495 * up as user channel. Trying to bring them up as normal devices
2496 * will result into a failure. Only user channel operation is
2499 * When this function is called for a user channel, the flag
2500 * HCI_USER_CHANNEL will be set first before attempting to
2503 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2504 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2509 /* We need to ensure that no other power on/off work is pending
2510 * before proceeding to call hci_dev_do_open. This is
2511 * particularly important if the setup procedure has not yet
2514 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2515 cancel_delayed_work(&hdev->power_off);
2517 /* After this call it is guaranteed that the setup procedure
2518 * has finished. This means that error conditions like RFKILL
2519 * or no valid public or static random address apply.
2521 flush_workqueue(hdev->req_workqueue);
2523 /* For controllers not using the management interface and that
2524 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
2525 * so that pairing works for them. Once the management interface
2526 * is in use this bit will be cleared again and userspace has
2527 * to explicitly enable it.
2529 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2530 !test_bit(HCI_MGMT, &hdev->dev_flags))
2531 set_bit(HCI_BONDABLE, &hdev->dev_flags);
2533 err = hci_dev_do_open(hdev);
2540 /* This function requires the caller holds hdev->lock */
2541 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2543 struct hci_conn_params *p;
2545 list_for_each_entry(p, &hdev->le_conn_params, list) {
2547 hci_conn_drop(p->conn);
2548 hci_conn_put(p->conn);
2551 list_del_init(&p->action);
2554 BT_DBG("All LE pending actions cleared");
2557 static int hci_dev_do_close(struct hci_dev *hdev)
2559 BT_DBG("%s %p", hdev->name, hdev);
2561 cancel_delayed_work(&hdev->power_off);
2563 hci_req_cancel(hdev, ENODEV);
2566 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2567 cancel_delayed_work_sync(&hdev->cmd_timer);
2568 hci_req_unlock(hdev);
2572 /* Flush RX and TX works */
2573 flush_work(&hdev->tx_work);
2574 flush_work(&hdev->rx_work);
2576 if (hdev->discov_timeout > 0) {
2577 cancel_delayed_work(&hdev->discov_off);
2578 hdev->discov_timeout = 0;
2579 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2580 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2583 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2584 cancel_delayed_work(&hdev->service_cache);
2586 cancel_delayed_work_sync(&hdev->le_scan_disable);
2588 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2589 cancel_delayed_work_sync(&hdev->rpa_expired);
2592 hci_inquiry_cache_flush(hdev);
2593 hci_pend_le_actions_clear(hdev);
2594 hci_conn_hash_flush(hdev);
2595 hci_dev_unlock(hdev);
2597 hci_notify(hdev, HCI_DEV_DOWN);
2603 skb_queue_purge(&hdev->cmd_q);
2604 atomic_set(&hdev->cmd_cnt, 1);
2605 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2606 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2607 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2608 set_bit(HCI_INIT, &hdev->flags);
2609 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2610 clear_bit(HCI_INIT, &hdev->flags);
2613 /* flush cmd work */
2614 flush_work(&hdev->cmd_work);
2617 skb_queue_purge(&hdev->rx_q);
2618 skb_queue_purge(&hdev->cmd_q);
2619 skb_queue_purge(&hdev->raw_q);
2621 /* Drop last sent command */
2622 if (hdev->sent_cmd) {
2623 cancel_delayed_work_sync(&hdev->cmd_timer);
2624 kfree_skb(hdev->sent_cmd);
2625 hdev->sent_cmd = NULL;
2628 kfree_skb(hdev->recv_evt);
2629 hdev->recv_evt = NULL;
2631 /* After this point our queues are empty
2632 * and no tasks are scheduled. */
2636 hdev->flags &= BIT(HCI_RAW);
2637 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2639 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2640 if (hdev->dev_type == HCI_BREDR) {
2642 mgmt_powered(hdev, 0);
2643 hci_dev_unlock(hdev);
2647 /* Controller radio is available but is currently powered down */
2648 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2650 memset(hdev->eir, 0, sizeof(hdev->eir));
2651 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2652 bacpy(&hdev->random_addr, BDADDR_ANY);
2654 hci_req_unlock(hdev);
2660 int hci_dev_close(__u16 dev)
2662 struct hci_dev *hdev;
2665 hdev = hci_dev_get(dev);
2669 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2674 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2675 cancel_delayed_work(&hdev->power_off);
2677 err = hci_dev_do_close(hdev);
2684 int hci_dev_reset(__u16 dev)
2686 struct hci_dev *hdev;
2689 hdev = hci_dev_get(dev);
2695 if (!test_bit(HCI_UP, &hdev->flags)) {
2700 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2705 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2711 skb_queue_purge(&hdev->rx_q);
2712 skb_queue_purge(&hdev->cmd_q);
2715 hci_inquiry_cache_flush(hdev);
2716 hci_conn_hash_flush(hdev);
2717 hci_dev_unlock(hdev);
2722 atomic_set(&hdev->cmd_cnt, 1);
2723 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2725 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2728 hci_req_unlock(hdev);
2733 int hci_dev_reset_stat(__u16 dev)
2735 struct hci_dev *hdev;
2738 hdev = hci_dev_get(dev);
2742 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2747 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2752 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2759 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2761 bool conn_changed, discov_changed;
2763 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2765 if ((scan & SCAN_PAGE))
2766 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2769 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2772 if ((scan & SCAN_INQUIRY)) {
2773 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2776 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2777 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2781 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2784 if (conn_changed || discov_changed) {
2785 /* In case this was disabled through mgmt */
2786 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2788 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2789 mgmt_update_adv_data(hdev);
2791 mgmt_new_settings(hdev);
2795 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2797 struct hci_dev *hdev;
2798 struct hci_dev_req dr;
2801 if (copy_from_user(&dr, arg, sizeof(dr)))
2804 hdev = hci_dev_get(dr.dev_id);
2808 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2813 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2818 if (hdev->dev_type != HCI_BREDR) {
2823 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2830 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2835 if (!lmp_encrypt_capable(hdev)) {
2840 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2841 /* Auth must be enabled first */
2842 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2848 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2853 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2856 /* Ensure that the connectable and discoverable states
2857 * get correctly modified as this was a non-mgmt change.
2860 hci_update_scan_state(hdev, dr.dev_opt);
2864 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2868 case HCISETLINKMODE:
2869 hdev->link_mode = ((__u16) dr.dev_opt) &
2870 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2874 hdev->pkt_type = (__u16) dr.dev_opt;
2878 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2879 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2883 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2884 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2897 int hci_get_dev_list(void __user *arg)
2899 struct hci_dev *hdev;
2900 struct hci_dev_list_req *dl;
2901 struct hci_dev_req *dr;
2902 int n = 0, size, err;
2905 if (get_user(dev_num, (__u16 __user *) arg))
2908 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2911 size = sizeof(*dl) + dev_num * sizeof(*dr);
2913 dl = kzalloc(size, GFP_KERNEL);
2919 read_lock(&hci_dev_list_lock);
2920 list_for_each_entry(hdev, &hci_dev_list, list) {
2921 unsigned long flags = hdev->flags;
2923 /* When the auto-off is configured it means the transport
2924 * is running, but in that case still indicate that the
2925 * device is actually down.
2927 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2928 flags &= ~BIT(HCI_UP);
2930 (dr + n)->dev_id = hdev->id;
2931 (dr + n)->dev_opt = flags;
2936 read_unlock(&hci_dev_list_lock);
2939 size = sizeof(*dl) + n * sizeof(*dr);
2941 err = copy_to_user(arg, dl, size);
2944 return err ? -EFAULT : 0;
2947 int hci_get_dev_info(void __user *arg)
2949 struct hci_dev *hdev;
2950 struct hci_dev_info di;
2951 unsigned long flags;
2954 if (copy_from_user(&di, arg, sizeof(di)))
2957 hdev = hci_dev_get(di.dev_id);
2961 /* When the auto-off is configured it means the transport
2962 * is running, but in that case still indicate that the
2963 * device is actually down.
2965 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2966 flags = hdev->flags & ~BIT(HCI_UP);
2968 flags = hdev->flags;
2970 strcpy(di.name, hdev->name);
2971 di.bdaddr = hdev->bdaddr;
2972 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2974 di.pkt_type = hdev->pkt_type;
2975 if (lmp_bredr_capable(hdev)) {
2976 di.acl_mtu = hdev->acl_mtu;
2977 di.acl_pkts = hdev->acl_pkts;
2978 di.sco_mtu = hdev->sco_mtu;
2979 di.sco_pkts = hdev->sco_pkts;
2981 di.acl_mtu = hdev->le_mtu;
2982 di.acl_pkts = hdev->le_pkts;
2986 di.link_policy = hdev->link_policy;
2987 di.link_mode = hdev->link_mode;
2989 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2990 memcpy(&di.features, &hdev->features, sizeof(di.features));
2992 if (copy_to_user(arg, &di, sizeof(di)))
3000 /* ---- Interface to HCI drivers ---- */
3002 static int hci_rfkill_set_block(void *data, bool blocked)
3004 struct hci_dev *hdev = data;
3006 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
3008 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
3012 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3013 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3014 !test_bit(HCI_CONFIG, &hdev->dev_flags))
3015 hci_dev_do_close(hdev);
3017 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
3023 static const struct rfkill_ops hci_rfkill_ops = {
3024 .set_block = hci_rfkill_set_block,
3027 static void hci_power_on(struct work_struct *work)
3029 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
3032 BT_DBG("%s", hdev->name);
3034 err = hci_dev_do_open(hdev);
3036 mgmt_set_powered_failed(hdev, err);
3040 /* During the HCI setup phase, a few error conditions are
3041 * ignored and they need to be checked now. If they are still
3042 * valid, it is important to turn the device back off.
3044 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
3045 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
3046 (hdev->dev_type == HCI_BREDR &&
3047 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3048 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
3049 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3050 hci_dev_do_close(hdev);
3051 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
3052 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3053 HCI_AUTO_OFF_TIMEOUT);
3056 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
3057 /* For unconfigured devices, set the HCI_RAW flag
3058 * so that userspace can easily identify them.
3060 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3061 set_bit(HCI_RAW, &hdev->flags);
3063 /* For fully configured devices, this will send
3064 * the Index Added event. For unconfigured devices,
3065 * it will send Unconfigued Index Added event.
3067 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3068 * and no event will be send.
3070 mgmt_index_added(hdev);
3071 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
3072 /* When the controller is now configured, then it
3073 * is important to clear the HCI_RAW flag.
3075 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3076 clear_bit(HCI_RAW, &hdev->flags);
3078 /* Powering on the controller with HCI_CONFIG set only
3079 * happens with the transition from unconfigured to
3080 * configured. This will send the Index Added event.
3082 mgmt_index_added(hdev);
3086 static void hci_power_off(struct work_struct *work)
3088 struct hci_dev *hdev = container_of(work, struct hci_dev,
3091 BT_DBG("%s", hdev->name);
3093 hci_dev_do_close(hdev);
3096 static void hci_discov_off(struct work_struct *work)
3098 struct hci_dev *hdev;
3100 hdev = container_of(work, struct hci_dev, discov_off.work);
3102 BT_DBG("%s", hdev->name);
3104 mgmt_discoverable_timeout(hdev);
3107 void hci_uuids_clear(struct hci_dev *hdev)
3109 struct bt_uuid *uuid, *tmp;
3111 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3112 list_del(&uuid->list);
3117 void hci_link_keys_clear(struct hci_dev *hdev)
3119 struct list_head *p, *n;
3121 list_for_each_safe(p, n, &hdev->link_keys) {
3122 struct link_key *key;
3124 key = list_entry(p, struct link_key, list);
3131 void hci_smp_ltks_clear(struct hci_dev *hdev)
3133 struct smp_ltk *k, *tmp;
3135 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3141 void hci_smp_irks_clear(struct hci_dev *hdev)
3143 struct smp_irk *k, *tmp;
3145 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3151 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3155 list_for_each_entry(k, &hdev->link_keys, list)
3156 if (bacmp(bdaddr, &k->bdaddr) == 0)
3162 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3163 u8 key_type, u8 old_key_type)
3166 if (key_type < 0x03)
3169 /* Debug keys are insecure so don't store them persistently */
3170 if (key_type == HCI_LK_DEBUG_COMBINATION)
3173 /* Changed combination key and there's no previous one */
3174 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3177 /* Security mode 3 case */
3181 /* Neither local nor remote side had no-bonding as requirement */
3182 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3185 /* Local side had dedicated bonding as requirement */
3186 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3189 /* Remote side had dedicated bonding as requirement */
3190 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3193 /* If none of the above criteria match, then don't store the key
3198 static u8 ltk_role(u8 type)
3200 if (type == SMP_LTK)
3201 return HCI_ROLE_MASTER;
3203 return HCI_ROLE_SLAVE;
3206 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3211 list_for_each_entry(k, &hdev->long_term_keys, list) {
3212 if (k->ediv != ediv || k->rand != rand)
3215 if (ltk_role(k->type) != role)
3224 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3225 u8 addr_type, u8 role)
3229 list_for_each_entry(k, &hdev->long_term_keys, list)
3230 if (addr_type == k->bdaddr_type &&
3231 bacmp(bdaddr, &k->bdaddr) == 0 &&
3232 ltk_role(k->type) == role)
3238 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3240 struct smp_irk *irk;
3242 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3243 if (!bacmp(&irk->rpa, rpa))
3247 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3248 if (smp_irk_matches(hdev, irk->val, rpa)) {
3249 bacpy(&irk->rpa, rpa);
3257 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3260 struct smp_irk *irk;
3262 /* Identity Address must be public or static random */
3263 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3266 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3267 if (addr_type == irk->addr_type &&
3268 bacmp(bdaddr, &irk->bdaddr) == 0)
3275 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3276 bdaddr_t *bdaddr, u8 *val, u8 type,
3277 u8 pin_len, bool *persistent)
3279 struct link_key *key, *old_key;
3282 old_key = hci_find_link_key(hdev, bdaddr);
3284 old_key_type = old_key->type;
3287 old_key_type = conn ? conn->key_type : 0xff;
3288 key = kzalloc(sizeof(*key), GFP_KERNEL);
3291 list_add(&key->list, &hdev->link_keys);
3294 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3296 /* Some buggy controller combinations generate a changed
3297 * combination key for legacy pairing even when there's no
3299 if (type == HCI_LK_CHANGED_COMBINATION &&
3300 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3301 type = HCI_LK_COMBINATION;
3303 conn->key_type = type;
3306 bacpy(&key->bdaddr, bdaddr);
3307 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3308 key->pin_len = pin_len;
3310 if (type == HCI_LK_CHANGED_COMBINATION)
3311 key->type = old_key_type;
3316 *persistent = hci_persistent_key(hdev, conn, type,
3322 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3323 u8 addr_type, u8 type, u8 authenticated,
3324 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3326 struct smp_ltk *key, *old_key;
3327 u8 role = ltk_role(type);
3329 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
3333 key = kzalloc(sizeof(*key), GFP_KERNEL);
3336 list_add(&key->list, &hdev->long_term_keys);
3339 bacpy(&key->bdaddr, bdaddr);
3340 key->bdaddr_type = addr_type;
3341 memcpy(key->val, tk, sizeof(key->val));
3342 key->authenticated = authenticated;
3345 key->enc_size = enc_size;
3351 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3352 u8 addr_type, u8 val[16], bdaddr_t *rpa)
3354 struct smp_irk *irk;
3356 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3358 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3362 bacpy(&irk->bdaddr, bdaddr);
3363 irk->addr_type = addr_type;
3365 list_add(&irk->list, &hdev->identity_resolving_keys);
3368 memcpy(irk->val, val, 16);
3369 bacpy(&irk->rpa, rpa);
3374 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3376 struct link_key *key;
3378 key = hci_find_link_key(hdev, bdaddr);
3382 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3384 list_del(&key->list);
3390 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3392 struct smp_ltk *k, *tmp;
3395 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3396 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3399 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3406 return removed ? 0 : -ENOENT;
3409 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3411 struct smp_irk *k, *tmp;
3413 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3414 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3417 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3424 /* HCI command timer function */
3425 static void hci_cmd_timeout(struct work_struct *work)
3427 struct hci_dev *hdev = container_of(work, struct hci_dev,
3430 if (hdev->sent_cmd) {
3431 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3432 u16 opcode = __le16_to_cpu(sent->opcode);
3434 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3436 BT_ERR("%s command tx timeout", hdev->name);
3439 atomic_set(&hdev->cmd_cnt, 1);
3440 queue_work(hdev->workqueue, &hdev->cmd_work);
3443 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3446 struct oob_data *data;
3448 list_for_each_entry(data, &hdev->remote_oob_data, list)
3449 if (bacmp(bdaddr, &data->bdaddr) == 0)
3455 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3457 struct oob_data *data;
3459 data = hci_find_remote_oob_data(hdev, bdaddr);
3463 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3465 list_del(&data->list);
3471 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3473 struct oob_data *data, *n;
3475 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3476 list_del(&data->list);
3481 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3482 u8 *hash, u8 *randomizer)
3484 struct oob_data *data;
3486 data = hci_find_remote_oob_data(hdev, bdaddr);
3488 data = kmalloc(sizeof(*data), GFP_KERNEL);
3492 bacpy(&data->bdaddr, bdaddr);
3493 list_add(&data->list, &hdev->remote_oob_data);
3496 memcpy(data->hash192, hash, sizeof(data->hash192));
3497 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3499 memset(data->hash256, 0, sizeof(data->hash256));
3500 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3502 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3507 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3508 u8 *hash192, u8 *randomizer192,
3509 u8 *hash256, u8 *randomizer256)
3511 struct oob_data *data;
3513 data = hci_find_remote_oob_data(hdev, bdaddr);
3515 data = kmalloc(sizeof(*data), GFP_KERNEL);
3519 bacpy(&data->bdaddr, bdaddr);
3520 list_add(&data->list, &hdev->remote_oob_data);
3523 memcpy(data->hash192, hash192, sizeof(data->hash192));
3524 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3526 memcpy(data->hash256, hash256, sizeof(data->hash256));
3527 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3529 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3534 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3535 bdaddr_t *bdaddr, u8 type)
3537 struct bdaddr_list *b;
3539 list_for_each_entry(b, bdaddr_list, list) {
3540 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3547 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3549 struct list_head *p, *n;
3551 list_for_each_safe(p, n, bdaddr_list) {
3552 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3559 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3561 struct bdaddr_list *entry;
3563 if (!bacmp(bdaddr, BDADDR_ANY))
3566 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3569 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3573 bacpy(&entry->bdaddr, bdaddr);
3574 entry->bdaddr_type = type;
3576 list_add(&entry->list, list);
3581 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3583 struct bdaddr_list *entry;
3585 if (!bacmp(bdaddr, BDADDR_ANY)) {
3586 hci_bdaddr_list_clear(list);
3590 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3594 list_del(&entry->list);
3600 /* This function requires the caller holds hdev->lock */
3601 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3602 bdaddr_t *addr, u8 addr_type)
3604 struct hci_conn_params *params;
3606 /* The conn params list only contains identity addresses */
3607 if (!hci_is_identity_address(addr, addr_type))
3610 list_for_each_entry(params, &hdev->le_conn_params, list) {
3611 if (bacmp(¶ms->addr, addr) == 0 &&
3612 params->addr_type == addr_type) {
3620 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3622 struct hci_conn *conn;
3624 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3628 if (conn->dst_type != type)
3631 if (conn->state != BT_CONNECTED)
3637 /* This function requires the caller holds hdev->lock */
3638 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3639 bdaddr_t *addr, u8 addr_type)
3641 struct hci_conn_params *param;
3643 /* The list only contains identity addresses */
3644 if (!hci_is_identity_address(addr, addr_type))
3647 list_for_each_entry(param, list, action) {
3648 if (bacmp(¶m->addr, addr) == 0 &&
3649 param->addr_type == addr_type)
3656 /* This function requires the caller holds hdev->lock */
3657 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3658 bdaddr_t *addr, u8 addr_type)
3660 struct hci_conn_params *params;
3662 if (!hci_is_identity_address(addr, addr_type))
3665 params = hci_conn_params_lookup(hdev, addr, addr_type);
3669 params = kzalloc(sizeof(*params), GFP_KERNEL);
3671 BT_ERR("Out of memory");
3675 bacpy(¶ms->addr, addr);
3676 params->addr_type = addr_type;
3678 list_add(¶ms->list, &hdev->le_conn_params);
3679 INIT_LIST_HEAD(¶ms->action);
3681 params->conn_min_interval = hdev->le_conn_min_interval;
3682 params->conn_max_interval = hdev->le_conn_max_interval;
3683 params->conn_latency = hdev->le_conn_latency;
3684 params->supervision_timeout = hdev->le_supv_timeout;
3685 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3687 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3692 /* This function requires the caller holds hdev->lock */
3693 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3696 struct hci_conn_params *params;
3698 params = hci_conn_params_add(hdev, addr, addr_type);
3702 if (params->auto_connect == auto_connect)
3705 list_del_init(¶ms->action);
3707 switch (auto_connect) {
3708 case HCI_AUTO_CONN_DISABLED:
3709 case HCI_AUTO_CONN_LINK_LOSS:
3710 hci_update_background_scan(hdev);
3712 case HCI_AUTO_CONN_REPORT:
3713 list_add(¶ms->action, &hdev->pend_le_reports);
3714 hci_update_background_scan(hdev);
3716 case HCI_AUTO_CONN_DIRECT:
3717 case HCI_AUTO_CONN_ALWAYS:
3718 if (!is_connected(hdev, addr, addr_type)) {
3719 list_add(¶ms->action, &hdev->pend_le_conns);
3720 hci_update_background_scan(hdev);
3725 params->auto_connect = auto_connect;
3727 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3733 static void hci_conn_params_free(struct hci_conn_params *params)
3736 hci_conn_drop(params->conn);
3737 hci_conn_put(params->conn);
3740 list_del(¶ms->action);
3741 list_del(¶ms->list);
3745 /* This function requires the caller holds hdev->lock */
3746 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3748 struct hci_conn_params *params;
3750 params = hci_conn_params_lookup(hdev, addr, addr_type);
3754 hci_conn_params_free(params);
3756 hci_update_background_scan(hdev);
3758 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3761 /* This function requires the caller holds hdev->lock */
3762 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3764 struct hci_conn_params *params, *tmp;
3766 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3767 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3769 list_del(¶ms->list);
3773 BT_DBG("All LE disabled connection parameters were removed");
3776 /* This function requires the caller holds hdev->lock */
3777 void hci_conn_params_clear_all(struct hci_dev *hdev)
3779 struct hci_conn_params *params, *tmp;
3781 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3782 hci_conn_params_free(params);
3784 hci_update_background_scan(hdev);
3786 BT_DBG("All LE connection parameters were removed");
3789 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3792 BT_ERR("Failed to start inquiry: status %d", status);
3795 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3796 hci_dev_unlock(hdev);
3801 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3803 /* General inquiry access code (GIAC) */
3804 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3805 struct hci_request req;
3806 struct hci_cp_inquiry cp;
3810 BT_ERR("Failed to disable LE scanning: status %d", status);
3814 switch (hdev->discovery.type) {
3815 case DISCOV_TYPE_LE:
3817 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3818 hci_dev_unlock(hdev);
3821 case DISCOV_TYPE_INTERLEAVED:
3822 hci_req_init(&req, hdev);
3824 memset(&cp, 0, sizeof(cp));
3825 memcpy(&cp.lap, lap, sizeof(cp.lap));
3826 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3827 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3831 hci_inquiry_cache_flush(hdev);
3833 err = hci_req_run(&req, inquiry_complete);
3835 BT_ERR("Inquiry request failed: err %d", err);
3836 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3839 hci_dev_unlock(hdev);
3844 static void le_scan_disable_work(struct work_struct *work)
3846 struct hci_dev *hdev = container_of(work, struct hci_dev,
3847 le_scan_disable.work);
3848 struct hci_request req;
3851 BT_DBG("%s", hdev->name);
3853 hci_req_init(&req, hdev);
3855 hci_req_add_le_scan_disable(&req);
3857 err = hci_req_run(&req, le_scan_disable_work_complete);
3859 BT_ERR("Disable LE scanning request failed: err %d", err);
3862 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3864 struct hci_dev *hdev = req->hdev;
3866 /* If we're advertising or initiating an LE connection we can't
3867 * go ahead and change the random address at this time. This is
3868 * because the eventual initiator address used for the
3869 * subsequently created connection will be undefined (some
3870 * controllers use the new address and others the one we had
3871 * when the operation started).
3873 * In this kind of scenario skip the update and let the random
3874 * address be updated at the next cycle.
3876 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3877 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3878 BT_DBG("Deferring random address update");
3879 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
3883 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3886 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3889 struct hci_dev *hdev = req->hdev;
3892 /* If privacy is enabled use a resolvable private address. If
3893 * current RPA has expired or there is something else than
3894 * the current RPA in use, then generate a new one.
3896 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3899 *own_addr_type = ADDR_LE_DEV_RANDOM;
3901 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3902 !bacmp(&hdev->random_addr, &hdev->rpa))
3905 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
3907 BT_ERR("%s failed to generate new RPA", hdev->name);
3911 set_random_addr(req, &hdev->rpa);
3913 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3914 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3919 /* In case of required privacy without resolvable private address,
3920 * use an unresolvable private address. This is useful for active
3921 * scanning and non-connectable advertising.
3923 if (require_privacy) {
3926 get_random_bytes(&urpa, 6);
3927 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3929 *own_addr_type = ADDR_LE_DEV_RANDOM;
3930 set_random_addr(req, &urpa);
3934 /* If forcing static address is in use or there is no public
3935 * address use the static address as random address (but skip
3936 * the HCI command if the current random address is already the
3939 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3940 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3941 *own_addr_type = ADDR_LE_DEV_RANDOM;
3942 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3943 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3944 &hdev->static_addr);
3948 /* Neither privacy nor static address is being used so use a
3951 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3956 /* Copy the Identity Address of the controller.
3958 * If the controller has a public BD_ADDR, then by default use that one.
3959 * If this is a LE only controller without a public address, default to
3960 * the static random address.
3962 * For debugging purposes it is possible to force controllers with a
3963 * public address to use the static random address instead.
3965 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3968 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3969 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3970 bacpy(bdaddr, &hdev->static_addr);
3971 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3973 bacpy(bdaddr, &hdev->bdaddr);
3974 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3978 /* Alloc HCI device */
3979 struct hci_dev *hci_alloc_dev(void)
3981 struct hci_dev *hdev;
3983 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3987 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3988 hdev->esco_type = (ESCO_HV1);
3989 hdev->link_mode = (HCI_LM_ACCEPT);
3990 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3991 hdev->io_capability = 0x03; /* No Input No Output */
3992 hdev->manufacturer = 0xffff; /* Default to internal use */
3993 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3994 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3996 hdev->sniff_max_interval = 800;
3997 hdev->sniff_min_interval = 80;
3999 hdev->le_adv_channel_map = 0x07;
4000 hdev->le_adv_min_interval = 0x0800;
4001 hdev->le_adv_max_interval = 0x0800;
4002 hdev->le_scan_interval = 0x0060;
4003 hdev->le_scan_window = 0x0030;
4004 hdev->le_conn_min_interval = 0x0028;
4005 hdev->le_conn_max_interval = 0x0038;
4006 hdev->le_conn_latency = 0x0000;
4007 hdev->le_supv_timeout = 0x002a;
4009 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
4010 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
4011 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4012 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
4014 mutex_init(&hdev->lock);
4015 mutex_init(&hdev->req_lock);
4017 INIT_LIST_HEAD(&hdev->mgmt_pending);
4018 INIT_LIST_HEAD(&hdev->blacklist);
4019 INIT_LIST_HEAD(&hdev->whitelist);
4020 INIT_LIST_HEAD(&hdev->uuids);
4021 INIT_LIST_HEAD(&hdev->link_keys);
4022 INIT_LIST_HEAD(&hdev->long_term_keys);
4023 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
4024 INIT_LIST_HEAD(&hdev->remote_oob_data);
4025 INIT_LIST_HEAD(&hdev->le_white_list);
4026 INIT_LIST_HEAD(&hdev->le_conn_params);
4027 INIT_LIST_HEAD(&hdev->pend_le_conns);
4028 INIT_LIST_HEAD(&hdev->pend_le_reports);
4029 INIT_LIST_HEAD(&hdev->conn_hash.list);
4031 INIT_WORK(&hdev->rx_work, hci_rx_work);
4032 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4033 INIT_WORK(&hdev->tx_work, hci_tx_work);
4034 INIT_WORK(&hdev->power_on, hci_power_on);
4036 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4037 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4038 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4040 skb_queue_head_init(&hdev->rx_q);
4041 skb_queue_head_init(&hdev->cmd_q);
4042 skb_queue_head_init(&hdev->raw_q);
4044 init_waitqueue_head(&hdev->req_wait_q);
4046 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
4048 hci_init_sysfs(hdev);
4049 discovery_init(hdev);
4053 EXPORT_SYMBOL(hci_alloc_dev);
4055 /* Free HCI device */
4056 void hci_free_dev(struct hci_dev *hdev)
4058 /* will free via device release */
4059 put_device(&hdev->dev);
4061 EXPORT_SYMBOL(hci_free_dev);
4063 /* Register HCI device */
4064 int hci_register_dev(struct hci_dev *hdev)
4068 if (!hdev->open || !hdev->close || !hdev->send)
4071 /* Do not allow HCI_AMP devices to register at index 0,
4072 * so the index can be used as the AMP controller ID.
4074 switch (hdev->dev_type) {
4076 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4079 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4088 sprintf(hdev->name, "hci%d", id);
4091 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4093 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4094 WQ_MEM_RECLAIM, 1, hdev->name);
4095 if (!hdev->workqueue) {
4100 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4101 WQ_MEM_RECLAIM, 1, hdev->name);
4102 if (!hdev->req_workqueue) {
4103 destroy_workqueue(hdev->workqueue);
4108 if (!IS_ERR_OR_NULL(bt_debugfs))
4109 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4111 dev_set_name(&hdev->dev, "%s", hdev->name);
4113 error = device_add(&hdev->dev);
4117 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4118 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4121 if (rfkill_register(hdev->rfkill) < 0) {
4122 rfkill_destroy(hdev->rfkill);
4123 hdev->rfkill = NULL;
4127 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4128 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4130 set_bit(HCI_SETUP, &hdev->dev_flags);
4131 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4133 if (hdev->dev_type == HCI_BREDR) {
4134 /* Assume BR/EDR support until proven otherwise (such as
4135 * through reading supported features during init.
4137 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4140 write_lock(&hci_dev_list_lock);
4141 list_add(&hdev->list, &hci_dev_list);
4142 write_unlock(&hci_dev_list_lock);
4144 /* Devices that are marked for raw-only usage are unconfigured
4145 * and should not be included in normal operation.
4147 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4148 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4150 hci_notify(hdev, HCI_DEV_REG);
4153 queue_work(hdev->req_workqueue, &hdev->power_on);
4158 destroy_workqueue(hdev->workqueue);
4159 destroy_workqueue(hdev->req_workqueue);
4161 ida_simple_remove(&hci_index_ida, hdev->id);
4165 EXPORT_SYMBOL(hci_register_dev);
4167 /* Unregister HCI device */
4168 void hci_unregister_dev(struct hci_dev *hdev)
4172 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4174 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4178 write_lock(&hci_dev_list_lock);
4179 list_del(&hdev->list);
4180 write_unlock(&hci_dev_list_lock);
4182 hci_dev_do_close(hdev);
4184 for (i = 0; i < NUM_REASSEMBLY; i++)
4185 kfree_skb(hdev->reassembly[i]);
4187 cancel_work_sync(&hdev->power_on);
4189 if (!test_bit(HCI_INIT, &hdev->flags) &&
4190 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4191 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4193 mgmt_index_removed(hdev);
4194 hci_dev_unlock(hdev);
4197 /* mgmt_index_removed should take care of emptying the
4199 BUG_ON(!list_empty(&hdev->mgmt_pending));
4201 hci_notify(hdev, HCI_DEV_UNREG);
4204 rfkill_unregister(hdev->rfkill);
4205 rfkill_destroy(hdev->rfkill);
4208 smp_unregister(hdev);
4210 device_del(&hdev->dev);
4212 debugfs_remove_recursive(hdev->debugfs);
4214 destroy_workqueue(hdev->workqueue);
4215 destroy_workqueue(hdev->req_workqueue);
4218 hci_bdaddr_list_clear(&hdev->blacklist);
4219 hci_bdaddr_list_clear(&hdev->whitelist);
4220 hci_uuids_clear(hdev);
4221 hci_link_keys_clear(hdev);
4222 hci_smp_ltks_clear(hdev);
4223 hci_smp_irks_clear(hdev);
4224 hci_remote_oob_data_clear(hdev);
4225 hci_bdaddr_list_clear(&hdev->le_white_list);
4226 hci_conn_params_clear_all(hdev);
4227 hci_dev_unlock(hdev);
4231 ida_simple_remove(&hci_index_ida, id);
4233 EXPORT_SYMBOL(hci_unregister_dev);
4235 /* Suspend HCI device */
4236 int hci_suspend_dev(struct hci_dev *hdev)
4238 hci_notify(hdev, HCI_DEV_SUSPEND);
4241 EXPORT_SYMBOL(hci_suspend_dev);
4243 /* Resume HCI device */
4244 int hci_resume_dev(struct hci_dev *hdev)
4246 hci_notify(hdev, HCI_DEV_RESUME);
4249 EXPORT_SYMBOL(hci_resume_dev);
4251 /* Receive frame from HCI drivers */
4252 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4254 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4255 && !test_bit(HCI_INIT, &hdev->flags))) {
4261 bt_cb(skb)->incoming = 1;
4264 __net_timestamp(skb);
4266 skb_queue_tail(&hdev->rx_q, skb);
4267 queue_work(hdev->workqueue, &hdev->rx_work);
4271 EXPORT_SYMBOL(hci_recv_frame);
4273 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4274 int count, __u8 index)
4279 struct sk_buff *skb;
4280 struct bt_skb_cb *scb;
4282 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4283 index >= NUM_REASSEMBLY)
4286 skb = hdev->reassembly[index];
4290 case HCI_ACLDATA_PKT:
4291 len = HCI_MAX_FRAME_SIZE;
4292 hlen = HCI_ACL_HDR_SIZE;
4295 len = HCI_MAX_EVENT_SIZE;
4296 hlen = HCI_EVENT_HDR_SIZE;
4298 case HCI_SCODATA_PKT:
4299 len = HCI_MAX_SCO_SIZE;
4300 hlen = HCI_SCO_HDR_SIZE;
4304 skb = bt_skb_alloc(len, GFP_ATOMIC);
4308 scb = (void *) skb->cb;
4310 scb->pkt_type = type;
4312 hdev->reassembly[index] = skb;
4316 scb = (void *) skb->cb;
4317 len = min_t(uint, scb->expect, count);
4319 memcpy(skb_put(skb, len), data, len);
4328 if (skb->len == HCI_EVENT_HDR_SIZE) {
4329 struct hci_event_hdr *h = hci_event_hdr(skb);
4330 scb->expect = h->plen;
4332 if (skb_tailroom(skb) < scb->expect) {
4334 hdev->reassembly[index] = NULL;
4340 case HCI_ACLDATA_PKT:
4341 if (skb->len == HCI_ACL_HDR_SIZE) {
4342 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4343 scb->expect = __le16_to_cpu(h->dlen);
4345 if (skb_tailroom(skb) < scb->expect) {
4347 hdev->reassembly[index] = NULL;
4353 case HCI_SCODATA_PKT:
4354 if (skb->len == HCI_SCO_HDR_SIZE) {
4355 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4356 scb->expect = h->dlen;
4358 if (skb_tailroom(skb) < scb->expect) {
4360 hdev->reassembly[index] = NULL;
4367 if (scb->expect == 0) {
4368 /* Complete frame */
4370 bt_cb(skb)->pkt_type = type;
4371 hci_recv_frame(hdev, skb);
4373 hdev->reassembly[index] = NULL;
4381 #define STREAM_REASSEMBLY 0
4383 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4389 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4392 struct { char type; } *pkt;
4394 /* Start of the frame */
4401 type = bt_cb(skb)->pkt_type;
4403 rem = hci_reassembly(hdev, type, data, count,
4408 data += (count - rem);
4414 EXPORT_SYMBOL(hci_recv_stream_fragment);
4416 /* ---- Interface to upper protocols ---- */
4418 int hci_register_cb(struct hci_cb *cb)
4420 BT_DBG("%p name %s", cb, cb->name);
4422 write_lock(&hci_cb_list_lock);
4423 list_add(&cb->list, &hci_cb_list);
4424 write_unlock(&hci_cb_list_lock);
4428 EXPORT_SYMBOL(hci_register_cb);
4430 int hci_unregister_cb(struct hci_cb *cb)
4432 BT_DBG("%p name %s", cb, cb->name);
4434 write_lock(&hci_cb_list_lock);
4435 list_del(&cb->list);
4436 write_unlock(&hci_cb_list_lock);
4440 EXPORT_SYMBOL(hci_unregister_cb);
4442 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4446 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4449 __net_timestamp(skb);
4451 /* Send copy to monitor */
4452 hci_send_to_monitor(hdev, skb);
4454 if (atomic_read(&hdev->promisc)) {
4455 /* Send copy to the sockets */
4456 hci_send_to_sock(hdev, skb);
4459 /* Get rid of skb owner, prior to sending to the driver. */
4462 err = hdev->send(hdev, skb);
4464 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4469 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4471 skb_queue_head_init(&req->cmd_q);
4476 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4478 struct hci_dev *hdev = req->hdev;
4479 struct sk_buff *skb;
4480 unsigned long flags;
4482 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4484 /* If an error occurred during request building, remove all HCI
4485 * commands queued on the HCI request queue.
4488 skb_queue_purge(&req->cmd_q);
4492 /* Do not allow empty requests */
4493 if (skb_queue_empty(&req->cmd_q))
4496 skb = skb_peek_tail(&req->cmd_q);
4497 bt_cb(skb)->req.complete = complete;
4499 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4500 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4501 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4503 queue_work(hdev->workqueue, &hdev->cmd_work);
4508 bool hci_req_pending(struct hci_dev *hdev)
4510 return (hdev->req_status == HCI_REQ_PEND);
4513 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4514 u32 plen, const void *param)
4516 int len = HCI_COMMAND_HDR_SIZE + plen;
4517 struct hci_command_hdr *hdr;
4518 struct sk_buff *skb;
4520 skb = bt_skb_alloc(len, GFP_ATOMIC);
4524 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4525 hdr->opcode = cpu_to_le16(opcode);
4529 memcpy(skb_put(skb, plen), param, plen);
4531 BT_DBG("skb len %d", skb->len);
4533 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4534 bt_cb(skb)->opcode = opcode;
4539 /* Send HCI command */
4540 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4543 struct sk_buff *skb;
4545 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4547 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4549 BT_ERR("%s no memory for command", hdev->name);
4553 /* Stand-alone HCI commands must be flagged as
4554 * single-command requests.
4556 bt_cb(skb)->req.start = true;
4558 skb_queue_tail(&hdev->cmd_q, skb);
4559 queue_work(hdev->workqueue, &hdev->cmd_work);
4564 /* Queue a command to an asynchronous HCI request */
4565 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4566 const void *param, u8 event)
4568 struct hci_dev *hdev = req->hdev;
4569 struct sk_buff *skb;
4571 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4573 /* If an error occurred during request building, there is no point in
4574 * queueing the HCI command. We can simply return.
4579 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4581 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4582 hdev->name, opcode);
4587 if (skb_queue_empty(&req->cmd_q))
4588 bt_cb(skb)->req.start = true;
4590 bt_cb(skb)->req.event = event;
4592 skb_queue_tail(&req->cmd_q, skb);
4595 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4598 hci_req_add_ev(req, opcode, plen, param, 0);
4601 /* Get data from the previously sent command */
4602 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4604 struct hci_command_hdr *hdr;
4606 if (!hdev->sent_cmd)
4609 hdr = (void *) hdev->sent_cmd->data;
4611 if (hdr->opcode != cpu_to_le16(opcode))
4614 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4616 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4620 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4622 struct hci_acl_hdr *hdr;
4625 skb_push(skb, HCI_ACL_HDR_SIZE);
4626 skb_reset_transport_header(skb);
4627 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4628 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4629 hdr->dlen = cpu_to_le16(len);
4632 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4633 struct sk_buff *skb, __u16 flags)
4635 struct hci_conn *conn = chan->conn;
4636 struct hci_dev *hdev = conn->hdev;
4637 struct sk_buff *list;
4639 skb->len = skb_headlen(skb);
4642 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4644 switch (hdev->dev_type) {
4646 hci_add_acl_hdr(skb, conn->handle, flags);
4649 hci_add_acl_hdr(skb, chan->handle, flags);
4652 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4656 list = skb_shinfo(skb)->frag_list;
4658 /* Non fragmented */
4659 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4661 skb_queue_tail(queue, skb);
4664 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4666 skb_shinfo(skb)->frag_list = NULL;
4668 /* Queue all fragments atomically. We need to use spin_lock_bh
4669 * here because of 6LoWPAN links, as there this function is
4670 * called from softirq and using normal spin lock could cause
4673 spin_lock_bh(&queue->lock);
4675 __skb_queue_tail(queue, skb);
4677 flags &= ~ACL_START;
4680 skb = list; list = list->next;
4682 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4683 hci_add_acl_hdr(skb, conn->handle, flags);
4685 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4687 __skb_queue_tail(queue, skb);
4690 spin_unlock_bh(&queue->lock);
4694 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4696 struct hci_dev *hdev = chan->conn->hdev;
4698 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4700 hci_queue_acl(chan, &chan->data_q, skb, flags);
4702 queue_work(hdev->workqueue, &hdev->tx_work);
4706 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4708 struct hci_dev *hdev = conn->hdev;
4709 struct hci_sco_hdr hdr;
4711 BT_DBG("%s len %d", hdev->name, skb->len);
4713 hdr.handle = cpu_to_le16(conn->handle);
4714 hdr.dlen = skb->len;
4716 skb_push(skb, HCI_SCO_HDR_SIZE);
4717 skb_reset_transport_header(skb);
4718 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4720 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4722 skb_queue_tail(&conn->data_q, skb);
4723 queue_work(hdev->workqueue, &hdev->tx_work);
4726 /* ---- HCI TX task (outgoing data) ---- */
4728 /* HCI Connection scheduler */
4729 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4732 struct hci_conn_hash *h = &hdev->conn_hash;
4733 struct hci_conn *conn = NULL, *c;
4734 unsigned int num = 0, min = ~0;
4736 /* We don't have to lock device here. Connections are always
4737 * added and removed with TX task disabled. */
4741 list_for_each_entry_rcu(c, &h->list, list) {
4742 if (c->type != type || skb_queue_empty(&c->data_q))
4745 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4750 if (c->sent < min) {
4755 if (hci_conn_num(hdev, type) == num)
4764 switch (conn->type) {
4766 cnt = hdev->acl_cnt;
4770 cnt = hdev->sco_cnt;
4773 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4777 BT_ERR("Unknown link type");
4785 BT_DBG("conn %p quote %d", conn, *quote);
4789 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4791 struct hci_conn_hash *h = &hdev->conn_hash;
4794 BT_ERR("%s link tx timeout", hdev->name);
4798 /* Kill stalled connections */
4799 list_for_each_entry_rcu(c, &h->list, list) {
4800 if (c->type == type && c->sent) {
4801 BT_ERR("%s killing stalled connection %pMR",
4802 hdev->name, &c->dst);
4803 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4810 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4813 struct hci_conn_hash *h = &hdev->conn_hash;
4814 struct hci_chan *chan = NULL;
4815 unsigned int num = 0, min = ~0, cur_prio = 0;
4816 struct hci_conn *conn;
4817 int cnt, q, conn_num = 0;
4819 BT_DBG("%s", hdev->name);
4823 list_for_each_entry_rcu(conn, &h->list, list) {
4824 struct hci_chan *tmp;
4826 if (conn->type != type)
4829 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4834 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4835 struct sk_buff *skb;
4837 if (skb_queue_empty(&tmp->data_q))
4840 skb = skb_peek(&tmp->data_q);
4841 if (skb->priority < cur_prio)
4844 if (skb->priority > cur_prio) {
4847 cur_prio = skb->priority;
4852 if (conn->sent < min) {
4858 if (hci_conn_num(hdev, type) == conn_num)
4867 switch (chan->conn->type) {
4869 cnt = hdev->acl_cnt;
4872 cnt = hdev->block_cnt;
4876 cnt = hdev->sco_cnt;
4879 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4883 BT_ERR("Unknown link type");
4888 BT_DBG("chan %p quote %d", chan, *quote);
4892 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4894 struct hci_conn_hash *h = &hdev->conn_hash;
4895 struct hci_conn *conn;
4898 BT_DBG("%s", hdev->name);
4902 list_for_each_entry_rcu(conn, &h->list, list) {
4903 struct hci_chan *chan;
4905 if (conn->type != type)
4908 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4913 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4914 struct sk_buff *skb;
4921 if (skb_queue_empty(&chan->data_q))
4924 skb = skb_peek(&chan->data_q);
4925 if (skb->priority >= HCI_PRIO_MAX - 1)
4928 skb->priority = HCI_PRIO_MAX - 1;
4930 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4934 if (hci_conn_num(hdev, type) == num)
4942 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4944 /* Calculate count of blocks used by this packet */
4945 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4948 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4950 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4951 /* ACL tx timeout must be longer than maximum
4952 * link supervision timeout (40.9 seconds) */
4953 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4954 HCI_ACL_TX_TIMEOUT))
4955 hci_link_tx_to(hdev, ACL_LINK);
4959 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4961 unsigned int cnt = hdev->acl_cnt;
4962 struct hci_chan *chan;
4963 struct sk_buff *skb;
4966 __check_timeout(hdev, cnt);
4968 while (hdev->acl_cnt &&
4969 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4970 u32 priority = (skb_peek(&chan->data_q))->priority;
4971 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4972 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4973 skb->len, skb->priority);
4975 /* Stop if priority has changed */
4976 if (skb->priority < priority)
4979 skb = skb_dequeue(&chan->data_q);
4981 hci_conn_enter_active_mode(chan->conn,
4982 bt_cb(skb)->force_active);
4984 hci_send_frame(hdev, skb);
4985 hdev->acl_last_tx = jiffies;
4993 if (cnt != hdev->acl_cnt)
4994 hci_prio_recalculate(hdev, ACL_LINK);
4997 static void hci_sched_acl_blk(struct hci_dev *hdev)
4999 unsigned int cnt = hdev->block_cnt;
5000 struct hci_chan *chan;
5001 struct sk_buff *skb;
5005 __check_timeout(hdev, cnt);
5007 BT_DBG("%s", hdev->name);
5009 if (hdev->dev_type == HCI_AMP)
5014 while (hdev->block_cnt > 0 &&
5015 (chan = hci_chan_sent(hdev, type, "e))) {
5016 u32 priority = (skb_peek(&chan->data_q))->priority;
5017 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5020 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5021 skb->len, skb->priority);
5023 /* Stop if priority has changed */
5024 if (skb->priority < priority)
5027 skb = skb_dequeue(&chan->data_q);
5029 blocks = __get_blocks(hdev, skb);
5030 if (blocks > hdev->block_cnt)
5033 hci_conn_enter_active_mode(chan->conn,
5034 bt_cb(skb)->force_active);
5036 hci_send_frame(hdev, skb);
5037 hdev->acl_last_tx = jiffies;
5039 hdev->block_cnt -= blocks;
5042 chan->sent += blocks;
5043 chan->conn->sent += blocks;
5047 if (cnt != hdev->block_cnt)
5048 hci_prio_recalculate(hdev, type);
5051 static void hci_sched_acl(struct hci_dev *hdev)
5053 BT_DBG("%s", hdev->name);
5055 /* No ACL link over BR/EDR controller */
5056 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5059 /* No AMP link over AMP controller */
5060 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
5063 switch (hdev->flow_ctl_mode) {
5064 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5065 hci_sched_acl_pkt(hdev);
5068 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5069 hci_sched_acl_blk(hdev);
5075 static void hci_sched_sco(struct hci_dev *hdev)
5077 struct hci_conn *conn;
5078 struct sk_buff *skb;
5081 BT_DBG("%s", hdev->name);
5083 if (!hci_conn_num(hdev, SCO_LINK))
5086 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
5087 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5088 BT_DBG("skb %p len %d", skb, skb->len);
5089 hci_send_frame(hdev, skb);
5092 if (conn->sent == ~0)
5098 static void hci_sched_esco(struct hci_dev *hdev)
5100 struct hci_conn *conn;
5101 struct sk_buff *skb;
5104 BT_DBG("%s", hdev->name);
5106 if (!hci_conn_num(hdev, ESCO_LINK))
5109 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5111 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5112 BT_DBG("skb %p len %d", skb, skb->len);
5113 hci_send_frame(hdev, skb);
5116 if (conn->sent == ~0)
5122 static void hci_sched_le(struct hci_dev *hdev)
5124 struct hci_chan *chan;
5125 struct sk_buff *skb;
5126 int quote, cnt, tmp;
5128 BT_DBG("%s", hdev->name);
5130 if (!hci_conn_num(hdev, LE_LINK))
5133 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5134 /* LE tx timeout must be longer than maximum
5135 * link supervision timeout (40.9 seconds) */
5136 if (!hdev->le_cnt && hdev->le_pkts &&
5137 time_after(jiffies, hdev->le_last_tx + HZ * 45))
5138 hci_link_tx_to(hdev, LE_LINK);
5141 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5143 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
5144 u32 priority = (skb_peek(&chan->data_q))->priority;
5145 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5146 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5147 skb->len, skb->priority);
5149 /* Stop if priority has changed */
5150 if (skb->priority < priority)
5153 skb = skb_dequeue(&chan->data_q);
5155 hci_send_frame(hdev, skb);
5156 hdev->le_last_tx = jiffies;
5167 hdev->acl_cnt = cnt;
5170 hci_prio_recalculate(hdev, LE_LINK);
5173 static void hci_tx_work(struct work_struct *work)
5175 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5176 struct sk_buff *skb;
5178 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5179 hdev->sco_cnt, hdev->le_cnt);
5181 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5182 /* Schedule queues and send stuff to HCI driver */
5183 hci_sched_acl(hdev);
5184 hci_sched_sco(hdev);
5185 hci_sched_esco(hdev);
5189 /* Send next queued raw (unknown type) packet */
5190 while ((skb = skb_dequeue(&hdev->raw_q)))
5191 hci_send_frame(hdev, skb);
5194 /* ----- HCI RX task (incoming data processing) ----- */
5196 /* ACL data packet */
5197 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5199 struct hci_acl_hdr *hdr = (void *) skb->data;
5200 struct hci_conn *conn;
5201 __u16 handle, flags;
5203 skb_pull(skb, HCI_ACL_HDR_SIZE);
5205 handle = __le16_to_cpu(hdr->handle);
5206 flags = hci_flags(handle);
5207 handle = hci_handle(handle);
5209 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5212 hdev->stat.acl_rx++;
5215 conn = hci_conn_hash_lookup_handle(hdev, handle);
5216 hci_dev_unlock(hdev);
5219 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5221 /* Send to upper protocol */
5222 l2cap_recv_acldata(conn, skb, flags);
5225 BT_ERR("%s ACL packet for unknown connection handle %d",
5226 hdev->name, handle);
5232 /* SCO data packet */
5233 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5235 struct hci_sco_hdr *hdr = (void *) skb->data;
5236 struct hci_conn *conn;
5239 skb_pull(skb, HCI_SCO_HDR_SIZE);
5241 handle = __le16_to_cpu(hdr->handle);
5243 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5245 hdev->stat.sco_rx++;
5248 conn = hci_conn_hash_lookup_handle(hdev, handle);
5249 hci_dev_unlock(hdev);
5252 /* Send to upper protocol */
5253 sco_recv_scodata(conn, skb);
5256 BT_ERR("%s SCO packet for unknown connection handle %d",
5257 hdev->name, handle);
5263 static bool hci_req_is_complete(struct hci_dev *hdev)
5265 struct sk_buff *skb;
5267 skb = skb_peek(&hdev->cmd_q);
5271 return bt_cb(skb)->req.start;
5274 static void hci_resend_last(struct hci_dev *hdev)
5276 struct hci_command_hdr *sent;
5277 struct sk_buff *skb;
5280 if (!hdev->sent_cmd)
5283 sent = (void *) hdev->sent_cmd->data;
5284 opcode = __le16_to_cpu(sent->opcode);
5285 if (opcode == HCI_OP_RESET)
5288 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5292 skb_queue_head(&hdev->cmd_q, skb);
5293 queue_work(hdev->workqueue, &hdev->cmd_work);
5296 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5298 hci_req_complete_t req_complete = NULL;
5299 struct sk_buff *skb;
5300 unsigned long flags;
5302 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5304 /* If the completed command doesn't match the last one that was
5305 * sent we need to do special handling of it.
5307 if (!hci_sent_cmd_data(hdev, opcode)) {
5308 /* Some CSR based controllers generate a spontaneous
5309 * reset complete event during init and any pending
5310 * command will never be completed. In such a case we
5311 * need to resend whatever was the last sent
5314 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5315 hci_resend_last(hdev);
5320 /* If the command succeeded and there's still more commands in
5321 * this request the request is not yet complete.
5323 if (!status && !hci_req_is_complete(hdev))
5326 /* If this was the last command in a request the complete
5327 * callback would be found in hdev->sent_cmd instead of the
5328 * command queue (hdev->cmd_q).
5330 if (hdev->sent_cmd) {
5331 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5334 /* We must set the complete callback to NULL to
5335 * avoid calling the callback more than once if
5336 * this function gets called again.
5338 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5344 /* Remove all pending commands belonging to this request */
5345 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5346 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5347 if (bt_cb(skb)->req.start) {
5348 __skb_queue_head(&hdev->cmd_q, skb);
5352 req_complete = bt_cb(skb)->req.complete;
5355 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5359 req_complete(hdev, status);
5362 static void hci_rx_work(struct work_struct *work)
5364 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5365 struct sk_buff *skb;
5367 BT_DBG("%s", hdev->name);
5369 while ((skb = skb_dequeue(&hdev->rx_q))) {
5370 /* Send copy to monitor */
5371 hci_send_to_monitor(hdev, skb);
5373 if (atomic_read(&hdev->promisc)) {
5374 /* Send copy to the sockets */
5375 hci_send_to_sock(hdev, skb);
5378 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5383 if (test_bit(HCI_INIT, &hdev->flags)) {
5384 /* Don't process data packets in this states. */
5385 switch (bt_cb(skb)->pkt_type) {
5386 case HCI_ACLDATA_PKT:
5387 case HCI_SCODATA_PKT:
5394 switch (bt_cb(skb)->pkt_type) {
5396 BT_DBG("%s Event packet", hdev->name);
5397 hci_event_packet(hdev, skb);
5400 case HCI_ACLDATA_PKT:
5401 BT_DBG("%s ACL data packet", hdev->name);
5402 hci_acldata_packet(hdev, skb);
5405 case HCI_SCODATA_PKT:
5406 BT_DBG("%s SCO data packet", hdev->name);
5407 hci_scodata_packet(hdev, skb);
5417 static void hci_cmd_work(struct work_struct *work)
5419 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5420 struct sk_buff *skb;
5422 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5423 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5425 /* Send queued commands */
5426 if (atomic_read(&hdev->cmd_cnt)) {
5427 skb = skb_dequeue(&hdev->cmd_q);
5431 kfree_skb(hdev->sent_cmd);
5433 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5434 if (hdev->sent_cmd) {
5435 atomic_dec(&hdev->cmd_cnt);
5436 hci_send_frame(hdev, skb);
5437 if (test_bit(HCI_RESET, &hdev->flags))
5438 cancel_delayed_work(&hdev->cmd_timer);
5440 schedule_delayed_work(&hdev->cmd_timer,
5443 skb_queue_head(&hdev->cmd_q, skb);
5444 queue_work(hdev->workqueue, &hdev->cmd_work);
5449 void hci_req_add_le_scan_disable(struct hci_request *req)
5451 struct hci_cp_le_set_scan_enable cp;
5453 memset(&cp, 0, sizeof(cp));
5454 cp.enable = LE_SCAN_DISABLE;
5455 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5458 static void add_to_white_list(struct hci_request *req,
5459 struct hci_conn_params *params)
5461 struct hci_cp_le_add_to_white_list cp;
5463 cp.bdaddr_type = params->addr_type;
5464 bacpy(&cp.bdaddr, ¶ms->addr);
5466 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5469 static u8 update_white_list(struct hci_request *req)
5471 struct hci_dev *hdev = req->hdev;
5472 struct hci_conn_params *params;
5473 struct bdaddr_list *b;
5474 uint8_t white_list_entries = 0;
5476 /* Go through the current white list programmed into the
5477 * controller one by one and check if that address is still
5478 * in the list of pending connections or list of devices to
5479 * report. If not present in either list, then queue the
5480 * command to remove it from the controller.
5482 list_for_each_entry(b, &hdev->le_white_list, list) {
5483 struct hci_cp_le_del_from_white_list cp;
5485 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5486 &b->bdaddr, b->bdaddr_type) ||
5487 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5488 &b->bdaddr, b->bdaddr_type)) {
5489 white_list_entries++;
5493 cp.bdaddr_type = b->bdaddr_type;
5494 bacpy(&cp.bdaddr, &b->bdaddr);
5496 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5500 /* Since all no longer valid white list entries have been
5501 * removed, walk through the list of pending connections
5502 * and ensure that any new device gets programmed into
5505 * If the list of the devices is larger than the list of
5506 * available white list entries in the controller, then
5507 * just abort and return filer policy value to not use the
5510 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5511 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5512 ¶ms->addr, params->addr_type))
5515 if (white_list_entries >= hdev->le_white_list_size) {
5516 /* Select filter policy to accept all advertising */
5520 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
5521 params->addr_type)) {
5522 /* White list can not be used with RPAs */
5526 white_list_entries++;
5527 add_to_white_list(req, params);
5530 /* After adding all new pending connections, walk through
5531 * the list of pending reports and also add these to the
5532 * white list if there is still space.
5534 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5535 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5536 ¶ms->addr, params->addr_type))
5539 if (white_list_entries >= hdev->le_white_list_size) {
5540 /* Select filter policy to accept all advertising */
5544 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
5545 params->addr_type)) {
5546 /* White list can not be used with RPAs */
5550 white_list_entries++;
5551 add_to_white_list(req, params);
5554 /* Select filter policy to use white list */
5558 void hci_req_add_le_passive_scan(struct hci_request *req)
5560 struct hci_cp_le_set_scan_param param_cp;
5561 struct hci_cp_le_set_scan_enable enable_cp;
5562 struct hci_dev *hdev = req->hdev;
5566 /* Set require_privacy to false since no SCAN_REQ are send
5567 * during passive scanning. Not using an unresolvable address
5568 * here is important so that peer devices using direct
5569 * advertising with our address will be correctly reported
5570 * by the controller.
5572 if (hci_update_random_address(req, false, &own_addr_type))
5575 /* Adding or removing entries from the white list must
5576 * happen before enabling scanning. The controller does
5577 * not allow white list modification while scanning.
5579 filter_policy = update_white_list(req);
5581 memset(¶m_cp, 0, sizeof(param_cp));
5582 param_cp.type = LE_SCAN_PASSIVE;
5583 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5584 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5585 param_cp.own_address_type = own_addr_type;
5586 param_cp.filter_policy = filter_policy;
5587 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5590 memset(&enable_cp, 0, sizeof(enable_cp));
5591 enable_cp.enable = LE_SCAN_ENABLE;
5592 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5593 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5597 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5600 BT_DBG("HCI request failed to update background scanning: "
5601 "status 0x%2.2x", status);
5604 /* This function controls the background scanning based on hdev->pend_le_conns
5605 * list. If there are pending LE connection we start the background scanning,
5606 * otherwise we stop it.
5608 * This function requires the caller holds hdev->lock.
5610 void hci_update_background_scan(struct hci_dev *hdev)
5612 struct hci_request req;
5613 struct hci_conn *conn;
5616 if (!test_bit(HCI_UP, &hdev->flags) ||
5617 test_bit(HCI_INIT, &hdev->flags) ||
5618 test_bit(HCI_SETUP, &hdev->dev_flags) ||
5619 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5620 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5621 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5624 /* No point in doing scanning if LE support hasn't been enabled */
5625 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5628 /* If discovery is active don't interfere with it */
5629 if (hdev->discovery.state != DISCOVERY_STOPPED)
5632 hci_req_init(&req, hdev);
5634 if (list_empty(&hdev->pend_le_conns) &&
5635 list_empty(&hdev->pend_le_reports)) {
5636 /* If there is no pending LE connections or devices
5637 * to be scanned for, we should stop the background
5641 /* If controller is not scanning we are done. */
5642 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5645 hci_req_add_le_scan_disable(&req);
5647 BT_DBG("%s stopping background scanning", hdev->name);
5649 /* If there is at least one pending LE connection, we should
5650 * keep the background scan running.
5653 /* If controller is connecting, we should not start scanning
5654 * since some controllers are not able to scan and connect at
5657 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5661 /* If controller is currently scanning, we stop it to ensure we
5662 * don't miss any advertising (due to duplicates filter).
5664 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5665 hci_req_add_le_scan_disable(&req);
5667 hci_req_add_le_passive_scan(&req);
5669 BT_DBG("%s starting background scanning", hdev->name);
5672 err = hci_req_run(&req, update_background_scan_complete);
5674 BT_ERR("Failed to run HCI request: err %d", err);
5677 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5679 struct bdaddr_list *b;
5681 list_for_each_entry(b, &hdev->whitelist, list) {
5682 struct hci_conn *conn;
5684 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5688 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5695 void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5699 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5702 if (!hdev_is_powered(hdev))
5705 if (mgmt_powering_down(hdev))
5708 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
5709 disconnected_whitelist_entries(hdev))
5712 scan = SCAN_DISABLED;
5714 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5717 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5718 scan |= SCAN_INQUIRY;
5721 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5723 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);