2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
57 /* ----- HCI requests ----- */
59 #define HCI_REQ_DONE 0
60 #define HCI_REQ_PEND 1
61 #define HCI_REQ_CANCELED 2
63 #define hci_req_lock(d) mutex_lock(&d->req_lock)
64 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
66 /* ---- HCI notifications ---- */
68 static void hci_notify(struct hci_dev *hdev, int event)
70 hci_sock_dev_event(hdev, event);
73 /* ---- HCI debugfs entries ---- */
75 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
78 struct hci_dev *hdev = file->private_data;
81 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
90 struct hci_dev *hdev = file->private_data;
93 size_t buf_size = min(count, (sizeof(buf)-1));
97 if (!test_bit(HCI_UP, &hdev->flags))
100 if (copy_from_user(buf, user_buf, buf_size))
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117 hci_req_unlock(hdev);
122 err = -bt_to_errno(skb->data[0]);
128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
133 static const struct file_operations dut_mode_fops = {
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
140 static int features_show(struct seq_file *f, void *ptr)
142 struct hci_dev *hdev = f->private;
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
161 hci_dev_unlock(hdev);
166 static int features_open(struct inode *inode, struct file *file)
168 return single_open(file, features_show, inode->i_private);
171 static const struct file_operations features_fops = {
172 .open = features_open,
175 .release = single_release,
178 static int blacklist_show(struct seq_file *f, void *p)
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
184 list_for_each_entry(b, &hdev->blacklist, list)
185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
186 hci_dev_unlock(hdev);
191 static int blacklist_open(struct inode *inode, struct file *file)
193 return single_open(file, blacklist_show, inode->i_private);
196 static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
200 .release = single_release,
203 static int whitelist_show(struct seq_file *f, void *p)
205 struct hci_dev *hdev = f->private;
206 struct bdaddr_list *b;
209 list_for_each_entry(b, &hdev->whitelist, list)
210 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
211 hci_dev_unlock(hdev);
216 static int whitelist_open(struct inode *inode, struct file *file)
218 return single_open(file, whitelist_show, inode->i_private);
221 static const struct file_operations whitelist_fops = {
222 .open = whitelist_open,
225 .release = single_release,
228 static int uuids_show(struct seq_file *f, void *p)
230 struct hci_dev *hdev = f->private;
231 struct bt_uuid *uuid;
234 list_for_each_entry(uuid, &hdev->uuids, list) {
237 /* The Bluetooth UUID values are stored in big endian,
238 * but with reversed byte order. So convert them into
239 * the right order for the %pUb modifier.
241 for (i = 0; i < 16; i++)
242 val[i] = uuid->uuid[15 - i];
244 seq_printf(f, "%pUb\n", val);
246 hci_dev_unlock(hdev);
251 static int uuids_open(struct inode *inode, struct file *file)
253 return single_open(file, uuids_show, inode->i_private);
256 static const struct file_operations uuids_fops = {
260 .release = single_release,
263 static int inquiry_cache_show(struct seq_file *f, void *p)
265 struct hci_dev *hdev = f->private;
266 struct discovery_state *cache = &hdev->discovery;
267 struct inquiry_entry *e;
271 list_for_each_entry(e, &cache->all, all) {
272 struct inquiry_data *data = &e->data;
273 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
275 data->pscan_rep_mode, data->pscan_period_mode,
276 data->pscan_mode, data->dev_class[2],
277 data->dev_class[1], data->dev_class[0],
278 __le16_to_cpu(data->clock_offset),
279 data->rssi, data->ssp_mode, e->timestamp);
282 hci_dev_unlock(hdev);
287 static int inquiry_cache_open(struct inode *inode, struct file *file)
289 return single_open(file, inquiry_cache_show, inode->i_private);
292 static const struct file_operations inquiry_cache_fops = {
293 .open = inquiry_cache_open,
296 .release = single_release,
299 static int link_keys_show(struct seq_file *f, void *ptr)
301 struct hci_dev *hdev = f->private;
302 struct list_head *p, *n;
305 list_for_each_safe(p, n, &hdev->link_keys) {
306 struct link_key *key = list_entry(p, struct link_key, list);
307 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
308 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
310 hci_dev_unlock(hdev);
315 static int link_keys_open(struct inode *inode, struct file *file)
317 return single_open(file, link_keys_show, inode->i_private);
320 static const struct file_operations link_keys_fops = {
321 .open = link_keys_open,
324 .release = single_release,
327 static int dev_class_show(struct seq_file *f, void *ptr)
329 struct hci_dev *hdev = f->private;
332 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
333 hdev->dev_class[1], hdev->dev_class[0]);
334 hci_dev_unlock(hdev);
339 static int dev_class_open(struct inode *inode, struct file *file)
341 return single_open(file, dev_class_show, inode->i_private);
344 static const struct file_operations dev_class_fops = {
345 .open = dev_class_open,
348 .release = single_release,
351 static int voice_setting_get(void *data, u64 *val)
353 struct hci_dev *hdev = data;
356 *val = hdev->voice_setting;
357 hci_dev_unlock(hdev);
362 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
363 NULL, "0x%4.4llx\n");
365 static int auto_accept_delay_set(void *data, u64 val)
367 struct hci_dev *hdev = data;
370 hdev->auto_accept_delay = val;
371 hci_dev_unlock(hdev);
376 static int auto_accept_delay_get(void *data, u64 *val)
378 struct hci_dev *hdev = data;
381 *val = hdev->auto_accept_delay;
382 hci_dev_unlock(hdev);
387 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
388 auto_accept_delay_set, "%llu\n");
390 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
391 size_t count, loff_t *ppos)
393 struct hci_dev *hdev = file->private_data;
396 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
399 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
402 static ssize_t force_sc_support_write(struct file *file,
403 const char __user *user_buf,
404 size_t count, loff_t *ppos)
406 struct hci_dev *hdev = file->private_data;
408 size_t buf_size = min(count, (sizeof(buf)-1));
411 if (test_bit(HCI_UP, &hdev->flags))
414 if (copy_from_user(buf, user_buf, buf_size))
417 buf[buf_size] = '\0';
418 if (strtobool(buf, &enable))
421 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
424 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
429 static const struct file_operations force_sc_support_fops = {
431 .read = force_sc_support_read,
432 .write = force_sc_support_write,
433 .llseek = default_llseek,
436 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
437 size_t count, loff_t *ppos)
439 struct hci_dev *hdev = file->private_data;
442 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
445 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
448 static const struct file_operations sc_only_mode_fops = {
450 .read = sc_only_mode_read,
451 .llseek = default_llseek,
454 static int idle_timeout_set(void *data, u64 val)
456 struct hci_dev *hdev = data;
458 if (val != 0 && (val < 500 || val > 3600000))
462 hdev->idle_timeout = val;
463 hci_dev_unlock(hdev);
468 static int idle_timeout_get(void *data, u64 *val)
470 struct hci_dev *hdev = data;
473 *val = hdev->idle_timeout;
474 hci_dev_unlock(hdev);
479 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
480 idle_timeout_set, "%llu\n");
482 static int rpa_timeout_set(void *data, u64 val)
484 struct hci_dev *hdev = data;
486 /* Require the RPA timeout to be at least 30 seconds and at most
489 if (val < 30 || val > (60 * 60 * 24))
493 hdev->rpa_timeout = val;
494 hci_dev_unlock(hdev);
499 static int rpa_timeout_get(void *data, u64 *val)
501 struct hci_dev *hdev = data;
504 *val = hdev->rpa_timeout;
505 hci_dev_unlock(hdev);
510 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
511 rpa_timeout_set, "%llu\n");
513 static int sniff_min_interval_set(void *data, u64 val)
515 struct hci_dev *hdev = data;
517 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
521 hdev->sniff_min_interval = val;
522 hci_dev_unlock(hdev);
527 static int sniff_min_interval_get(void *data, u64 *val)
529 struct hci_dev *hdev = data;
532 *val = hdev->sniff_min_interval;
533 hci_dev_unlock(hdev);
538 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
539 sniff_min_interval_set, "%llu\n");
541 static int sniff_max_interval_set(void *data, u64 val)
543 struct hci_dev *hdev = data;
545 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
549 hdev->sniff_max_interval = val;
550 hci_dev_unlock(hdev);
555 static int sniff_max_interval_get(void *data, u64 *val)
557 struct hci_dev *hdev = data;
560 *val = hdev->sniff_max_interval;
561 hci_dev_unlock(hdev);
566 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
567 sniff_max_interval_set, "%llu\n");
569 static int conn_info_min_age_set(void *data, u64 val)
571 struct hci_dev *hdev = data;
573 if (val == 0 || val > hdev->conn_info_max_age)
577 hdev->conn_info_min_age = val;
578 hci_dev_unlock(hdev);
583 static int conn_info_min_age_get(void *data, u64 *val)
585 struct hci_dev *hdev = data;
588 *val = hdev->conn_info_min_age;
589 hci_dev_unlock(hdev);
594 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
595 conn_info_min_age_set, "%llu\n");
597 static int conn_info_max_age_set(void *data, u64 val)
599 struct hci_dev *hdev = data;
601 if (val == 0 || val < hdev->conn_info_min_age)
605 hdev->conn_info_max_age = val;
606 hci_dev_unlock(hdev);
611 static int conn_info_max_age_get(void *data, u64 *val)
613 struct hci_dev *hdev = data;
616 *val = hdev->conn_info_max_age;
617 hci_dev_unlock(hdev);
622 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
623 conn_info_max_age_set, "%llu\n");
625 static int identity_show(struct seq_file *f, void *p)
627 struct hci_dev *hdev = f->private;
633 hci_copy_identity_address(hdev, &addr, &addr_type);
635 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
636 16, hdev->irk, &hdev->rpa);
638 hci_dev_unlock(hdev);
643 static int identity_open(struct inode *inode, struct file *file)
645 return single_open(file, identity_show, inode->i_private);
648 static const struct file_operations identity_fops = {
649 .open = identity_open,
652 .release = single_release,
655 static int random_address_show(struct seq_file *f, void *p)
657 struct hci_dev *hdev = f->private;
660 seq_printf(f, "%pMR\n", &hdev->random_addr);
661 hci_dev_unlock(hdev);
666 static int random_address_open(struct inode *inode, struct file *file)
668 return single_open(file, random_address_show, inode->i_private);
671 static const struct file_operations random_address_fops = {
672 .open = random_address_open,
675 .release = single_release,
678 static int static_address_show(struct seq_file *f, void *p)
680 struct hci_dev *hdev = f->private;
683 seq_printf(f, "%pMR\n", &hdev->static_addr);
684 hci_dev_unlock(hdev);
689 static int static_address_open(struct inode *inode, struct file *file)
691 return single_open(file, static_address_show, inode->i_private);
694 static const struct file_operations static_address_fops = {
695 .open = static_address_open,
698 .release = single_release,
701 static ssize_t force_static_address_read(struct file *file,
702 char __user *user_buf,
703 size_t count, loff_t *ppos)
705 struct hci_dev *hdev = file->private_data;
708 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
711 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
714 static ssize_t force_static_address_write(struct file *file,
715 const char __user *user_buf,
716 size_t count, loff_t *ppos)
718 struct hci_dev *hdev = file->private_data;
720 size_t buf_size = min(count, (sizeof(buf)-1));
723 if (test_bit(HCI_UP, &hdev->flags))
726 if (copy_from_user(buf, user_buf, buf_size))
729 buf[buf_size] = '\0';
730 if (strtobool(buf, &enable))
733 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
736 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
741 static const struct file_operations force_static_address_fops = {
743 .read = force_static_address_read,
744 .write = force_static_address_write,
745 .llseek = default_llseek,
748 static int white_list_show(struct seq_file *f, void *ptr)
750 struct hci_dev *hdev = f->private;
751 struct bdaddr_list *b;
754 list_for_each_entry(b, &hdev->le_white_list, list)
755 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
756 hci_dev_unlock(hdev);
761 static int white_list_open(struct inode *inode, struct file *file)
763 return single_open(file, white_list_show, inode->i_private);
766 static const struct file_operations white_list_fops = {
767 .open = white_list_open,
770 .release = single_release,
773 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
775 struct hci_dev *hdev = f->private;
776 struct list_head *p, *n;
779 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
780 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
781 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
782 &irk->bdaddr, irk->addr_type,
783 16, irk->val, &irk->rpa);
785 hci_dev_unlock(hdev);
790 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
792 return single_open(file, identity_resolving_keys_show,
796 static const struct file_operations identity_resolving_keys_fops = {
797 .open = identity_resolving_keys_open,
800 .release = single_release,
803 static int long_term_keys_show(struct seq_file *f, void *ptr)
805 struct hci_dev *hdev = f->private;
806 struct list_head *p, *n;
809 list_for_each_safe(p, n, &hdev->long_term_keys) {
810 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
811 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
812 <k->bdaddr, ltk->bdaddr_type, ltk->authenticated,
813 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
814 __le64_to_cpu(ltk->rand), 16, ltk->val);
816 hci_dev_unlock(hdev);
821 static int long_term_keys_open(struct inode *inode, struct file *file)
823 return single_open(file, long_term_keys_show, inode->i_private);
826 static const struct file_operations long_term_keys_fops = {
827 .open = long_term_keys_open,
830 .release = single_release,
833 static int conn_min_interval_set(void *data, u64 val)
835 struct hci_dev *hdev = data;
837 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
841 hdev->le_conn_min_interval = val;
842 hci_dev_unlock(hdev);
847 static int conn_min_interval_get(void *data, u64 *val)
849 struct hci_dev *hdev = data;
852 *val = hdev->le_conn_min_interval;
853 hci_dev_unlock(hdev);
858 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
859 conn_min_interval_set, "%llu\n");
861 static int conn_max_interval_set(void *data, u64 val)
863 struct hci_dev *hdev = data;
865 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
869 hdev->le_conn_max_interval = val;
870 hci_dev_unlock(hdev);
875 static int conn_max_interval_get(void *data, u64 *val)
877 struct hci_dev *hdev = data;
880 *val = hdev->le_conn_max_interval;
881 hci_dev_unlock(hdev);
886 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
887 conn_max_interval_set, "%llu\n");
889 static int conn_latency_set(void *data, u64 val)
891 struct hci_dev *hdev = data;
897 hdev->le_conn_latency = val;
898 hci_dev_unlock(hdev);
903 static int conn_latency_get(void *data, u64 *val)
905 struct hci_dev *hdev = data;
908 *val = hdev->le_conn_latency;
909 hci_dev_unlock(hdev);
914 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
915 conn_latency_set, "%llu\n");
917 static int supervision_timeout_set(void *data, u64 val)
919 struct hci_dev *hdev = data;
921 if (val < 0x000a || val > 0x0c80)
925 hdev->le_supv_timeout = val;
926 hci_dev_unlock(hdev);
931 static int supervision_timeout_get(void *data, u64 *val)
933 struct hci_dev *hdev = data;
936 *val = hdev->le_supv_timeout;
937 hci_dev_unlock(hdev);
942 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
943 supervision_timeout_set, "%llu\n");
945 static int adv_channel_map_set(void *data, u64 val)
947 struct hci_dev *hdev = data;
949 if (val < 0x01 || val > 0x07)
953 hdev->le_adv_channel_map = val;
954 hci_dev_unlock(hdev);
959 static int adv_channel_map_get(void *data, u64 *val)
961 struct hci_dev *hdev = data;
964 *val = hdev->le_adv_channel_map;
965 hci_dev_unlock(hdev);
970 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
971 adv_channel_map_set, "%llu\n");
973 static int device_list_show(struct seq_file *f, void *ptr)
975 struct hci_dev *hdev = f->private;
976 struct hci_conn_params *p;
979 list_for_each_entry(p, &hdev->le_conn_params, list) {
980 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
983 hci_dev_unlock(hdev);
988 static int device_list_open(struct inode *inode, struct file *file)
990 return single_open(file, device_list_show, inode->i_private);
993 static const struct file_operations device_list_fops = {
994 .open = device_list_open,
997 .release = single_release,
1000 /* ---- HCI requests ---- */
1002 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1004 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1006 if (hdev->req_status == HCI_REQ_PEND) {
1007 hdev->req_result = result;
1008 hdev->req_status = HCI_REQ_DONE;
1009 wake_up_interruptible(&hdev->req_wait_q);
1013 static void hci_req_cancel(struct hci_dev *hdev, int err)
1015 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1017 if (hdev->req_status == HCI_REQ_PEND) {
1018 hdev->req_result = err;
1019 hdev->req_status = HCI_REQ_CANCELED;
1020 wake_up_interruptible(&hdev->req_wait_q);
1024 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1027 struct hci_ev_cmd_complete *ev;
1028 struct hci_event_hdr *hdr;
1029 struct sk_buff *skb;
1033 skb = hdev->recv_evt;
1034 hdev->recv_evt = NULL;
1036 hci_dev_unlock(hdev);
1039 return ERR_PTR(-ENODATA);
1041 if (skb->len < sizeof(*hdr)) {
1042 BT_ERR("Too short HCI event");
1046 hdr = (void *) skb->data;
1047 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1050 if (hdr->evt != event)
1055 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1056 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1060 if (skb->len < sizeof(*ev)) {
1061 BT_ERR("Too short cmd_complete event");
1065 ev = (void *) skb->data;
1066 skb_pull(skb, sizeof(*ev));
1068 if (opcode == __le16_to_cpu(ev->opcode))
1071 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1072 __le16_to_cpu(ev->opcode));
1076 return ERR_PTR(-ENODATA);
1079 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1080 const void *param, u8 event, u32 timeout)
1082 DECLARE_WAITQUEUE(wait, current);
1083 struct hci_request req;
1086 BT_DBG("%s", hdev->name);
1088 hci_req_init(&req, hdev);
1090 hci_req_add_ev(&req, opcode, plen, param, event);
1092 hdev->req_status = HCI_REQ_PEND;
1094 err = hci_req_run(&req, hci_req_sync_complete);
1096 return ERR_PTR(err);
1098 add_wait_queue(&hdev->req_wait_q, &wait);
1099 set_current_state(TASK_INTERRUPTIBLE);
1101 schedule_timeout(timeout);
1103 remove_wait_queue(&hdev->req_wait_q, &wait);
1105 if (signal_pending(current))
1106 return ERR_PTR(-EINTR);
1108 switch (hdev->req_status) {
1110 err = -bt_to_errno(hdev->req_result);
1113 case HCI_REQ_CANCELED:
1114 err = -hdev->req_result;
1122 hdev->req_status = hdev->req_result = 0;
1124 BT_DBG("%s end: err %d", hdev->name, err);
1127 return ERR_PTR(err);
1129 return hci_get_cmd_complete(hdev, opcode, event);
1131 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1133 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1134 const void *param, u32 timeout)
1136 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1138 EXPORT_SYMBOL(__hci_cmd_sync);
1140 /* Execute request and wait for completion. */
1141 static int __hci_req_sync(struct hci_dev *hdev,
1142 void (*func)(struct hci_request *req,
1144 unsigned long opt, __u32 timeout)
1146 struct hci_request req;
1147 DECLARE_WAITQUEUE(wait, current);
1150 BT_DBG("%s start", hdev->name);
1152 hci_req_init(&req, hdev);
1154 hdev->req_status = HCI_REQ_PEND;
1158 err = hci_req_run(&req, hci_req_sync_complete);
1160 hdev->req_status = 0;
1162 /* ENODATA means the HCI request command queue is empty.
1163 * This can happen when a request with conditionals doesn't
1164 * trigger any commands to be sent. This is normal behavior
1165 * and should not trigger an error return.
1167 if (err == -ENODATA)
1173 add_wait_queue(&hdev->req_wait_q, &wait);
1174 set_current_state(TASK_INTERRUPTIBLE);
1176 schedule_timeout(timeout);
1178 remove_wait_queue(&hdev->req_wait_q, &wait);
1180 if (signal_pending(current))
1183 switch (hdev->req_status) {
1185 err = -bt_to_errno(hdev->req_result);
1188 case HCI_REQ_CANCELED:
1189 err = -hdev->req_result;
1197 hdev->req_status = hdev->req_result = 0;
1199 BT_DBG("%s end: err %d", hdev->name, err);
1204 static int hci_req_sync(struct hci_dev *hdev,
1205 void (*req)(struct hci_request *req,
1207 unsigned long opt, __u32 timeout)
1211 if (!test_bit(HCI_UP, &hdev->flags))
1214 /* Serialize all requests */
1216 ret = __hci_req_sync(hdev, req, opt, timeout);
1217 hci_req_unlock(hdev);
1222 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1224 BT_DBG("%s %ld", req->hdev->name, opt);
1227 set_bit(HCI_RESET, &req->hdev->flags);
1228 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1231 static void bredr_init(struct hci_request *req)
1233 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1235 /* Read Local Supported Features */
1236 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1238 /* Read Local Version */
1239 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1241 /* Read BD Address */
1242 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1245 static void amp_init(struct hci_request *req)
1247 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1249 /* Read Local Version */
1250 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1252 /* Read Local Supported Commands */
1253 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1255 /* Read Local Supported Features */
1256 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1258 /* Read Local AMP Info */
1259 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1261 /* Read Data Blk size */
1262 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1264 /* Read Flow Control Mode */
1265 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1267 /* Read Location Data */
1268 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1271 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1273 struct hci_dev *hdev = req->hdev;
1275 BT_DBG("%s %ld", hdev->name, opt);
1278 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1279 hci_reset_req(req, 0);
1281 switch (hdev->dev_type) {
1291 BT_ERR("Unknown device type %d", hdev->dev_type);
1296 static void bredr_setup(struct hci_request *req)
1298 struct hci_dev *hdev = req->hdev;
1303 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1304 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1306 /* Read Class of Device */
1307 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1309 /* Read Local Name */
1310 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1312 /* Read Voice Setting */
1313 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1315 /* Read Number of Supported IAC */
1316 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1318 /* Read Current IAC LAP */
1319 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1321 /* Clear Event Filters */
1322 flt_type = HCI_FLT_CLEAR_ALL;
1323 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1325 /* Connection accept timeout ~20 secs */
1326 param = cpu_to_le16(0x7d00);
1327 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
1329 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1330 * but it does not support page scan related HCI commands.
1332 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1333 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1334 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1338 static void le_setup(struct hci_request *req)
1340 struct hci_dev *hdev = req->hdev;
1342 /* Read LE Buffer Size */
1343 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1345 /* Read LE Local Supported Features */
1346 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1348 /* Read LE Supported States */
1349 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1351 /* Read LE White List Size */
1352 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1354 /* Clear LE White List */
1355 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1357 /* LE-only controllers have LE implicitly enabled */
1358 if (!lmp_bredr_capable(hdev))
1359 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1362 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1364 if (lmp_ext_inq_capable(hdev))
1367 if (lmp_inq_rssi_capable(hdev))
1370 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1371 hdev->lmp_subver == 0x0757)
1374 if (hdev->manufacturer == 15) {
1375 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1377 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1379 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1383 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1384 hdev->lmp_subver == 0x1805)
1390 static void hci_setup_inquiry_mode(struct hci_request *req)
1394 mode = hci_get_inquiry_mode(req->hdev);
1396 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1399 static void hci_setup_event_mask(struct hci_request *req)
1401 struct hci_dev *hdev = req->hdev;
1403 /* The second byte is 0xff instead of 0x9f (two reserved bits
1404 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1405 * command otherwise.
1407 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1409 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1410 * any event mask for pre 1.2 devices.
1412 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1415 if (lmp_bredr_capable(hdev)) {
1416 events[4] |= 0x01; /* Flow Specification Complete */
1417 events[4] |= 0x02; /* Inquiry Result with RSSI */
1418 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1419 events[5] |= 0x08; /* Synchronous Connection Complete */
1420 events[5] |= 0x10; /* Synchronous Connection Changed */
1422 /* Use a different default for LE-only devices */
1423 memset(events, 0, sizeof(events));
1424 events[0] |= 0x10; /* Disconnection Complete */
1425 events[1] |= 0x08; /* Read Remote Version Information Complete */
1426 events[1] |= 0x20; /* Command Complete */
1427 events[1] |= 0x40; /* Command Status */
1428 events[1] |= 0x80; /* Hardware Error */
1429 events[2] |= 0x04; /* Number of Completed Packets */
1430 events[3] |= 0x02; /* Data Buffer Overflow */
1432 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1433 events[0] |= 0x80; /* Encryption Change */
1434 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1438 if (lmp_inq_rssi_capable(hdev))
1439 events[4] |= 0x02; /* Inquiry Result with RSSI */
1441 if (lmp_sniffsubr_capable(hdev))
1442 events[5] |= 0x20; /* Sniff Subrating */
1444 if (lmp_pause_enc_capable(hdev))
1445 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1447 if (lmp_ext_inq_capable(hdev))
1448 events[5] |= 0x40; /* Extended Inquiry Result */
1450 if (lmp_no_flush_capable(hdev))
1451 events[7] |= 0x01; /* Enhanced Flush Complete */
1453 if (lmp_lsto_capable(hdev))
1454 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1456 if (lmp_ssp_capable(hdev)) {
1457 events[6] |= 0x01; /* IO Capability Request */
1458 events[6] |= 0x02; /* IO Capability Response */
1459 events[6] |= 0x04; /* User Confirmation Request */
1460 events[6] |= 0x08; /* User Passkey Request */
1461 events[6] |= 0x10; /* Remote OOB Data Request */
1462 events[6] |= 0x20; /* Simple Pairing Complete */
1463 events[7] |= 0x04; /* User Passkey Notification */
1464 events[7] |= 0x08; /* Keypress Notification */
1465 events[7] |= 0x10; /* Remote Host Supported
1466 * Features Notification
1470 if (lmp_le_capable(hdev))
1471 events[7] |= 0x20; /* LE Meta-Event */
1473 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1476 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1478 struct hci_dev *hdev = req->hdev;
1480 if (lmp_bredr_capable(hdev))
1483 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1485 if (lmp_le_capable(hdev))
1488 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1489 * local supported commands HCI command.
1491 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1492 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1494 if (lmp_ssp_capable(hdev)) {
1495 /* When SSP is available, then the host features page
1496 * should also be available as well. However some
1497 * controllers list the max_page as 0 as long as SSP
1498 * has not been enabled. To achieve proper debugging
1499 * output, force the minimum max_page to 1 at least.
1501 hdev->max_page = 0x01;
1503 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1505 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1506 sizeof(mode), &mode);
1508 struct hci_cp_write_eir cp;
1510 memset(hdev->eir, 0, sizeof(hdev->eir));
1511 memset(&cp, 0, sizeof(cp));
1513 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1517 if (lmp_inq_rssi_capable(hdev))
1518 hci_setup_inquiry_mode(req);
1520 if (lmp_inq_tx_pwr_capable(hdev))
1521 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1523 if (lmp_ext_feat_capable(hdev)) {
1524 struct hci_cp_read_local_ext_features cp;
1527 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1531 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1533 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1538 static void hci_setup_link_policy(struct hci_request *req)
1540 struct hci_dev *hdev = req->hdev;
1541 struct hci_cp_write_def_link_policy cp;
1542 u16 link_policy = 0;
1544 if (lmp_rswitch_capable(hdev))
1545 link_policy |= HCI_LP_RSWITCH;
1546 if (lmp_hold_capable(hdev))
1547 link_policy |= HCI_LP_HOLD;
1548 if (lmp_sniff_capable(hdev))
1549 link_policy |= HCI_LP_SNIFF;
1550 if (lmp_park_capable(hdev))
1551 link_policy |= HCI_LP_PARK;
1553 cp.policy = cpu_to_le16(link_policy);
1554 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1557 static void hci_set_le_support(struct hci_request *req)
1559 struct hci_dev *hdev = req->hdev;
1560 struct hci_cp_write_le_host_supported cp;
1562 /* LE-only devices do not support explicit enablement */
1563 if (!lmp_bredr_capable(hdev))
1566 memset(&cp, 0, sizeof(cp));
1568 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1570 cp.simul = lmp_le_br_capable(hdev);
1573 if (cp.le != lmp_host_le_capable(hdev))
1574 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1578 static void hci_set_event_mask_page_2(struct hci_request *req)
1580 struct hci_dev *hdev = req->hdev;
1581 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1583 /* If Connectionless Slave Broadcast master role is supported
1584 * enable all necessary events for it.
1586 if (lmp_csb_master_capable(hdev)) {
1587 events[1] |= 0x40; /* Triggered Clock Capture */
1588 events[1] |= 0x80; /* Synchronization Train Complete */
1589 events[2] |= 0x10; /* Slave Page Response Timeout */
1590 events[2] |= 0x20; /* CSB Channel Map Change */
1593 /* If Connectionless Slave Broadcast slave role is supported
1594 * enable all necessary events for it.
1596 if (lmp_csb_slave_capable(hdev)) {
1597 events[2] |= 0x01; /* Synchronization Train Received */
1598 events[2] |= 0x02; /* CSB Receive */
1599 events[2] |= 0x04; /* CSB Timeout */
1600 events[2] |= 0x08; /* Truncated Page Complete */
1603 /* Enable Authenticated Payload Timeout Expired event if supported */
1604 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1607 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1610 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1612 struct hci_dev *hdev = req->hdev;
1615 hci_setup_event_mask(req);
1617 /* Some Broadcom based Bluetooth controllers do not support the
1618 * Delete Stored Link Key command. They are clearly indicating its
1619 * absence in the bit mask of supported commands.
1621 * Check the supported commands and only if the the command is marked
1622 * as supported send it. If not supported assume that the controller
1623 * does not have actual support for stored link keys which makes this
1624 * command redundant anyway.
1626 * Some controllers indicate that they support handling deleting
1627 * stored link keys, but they don't. The quirk lets a driver
1628 * just disable this command.
1630 if (hdev->commands[6] & 0x80 &&
1631 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1632 struct hci_cp_delete_stored_link_key cp;
1634 bacpy(&cp.bdaddr, BDADDR_ANY);
1635 cp.delete_all = 0x01;
1636 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1640 if (hdev->commands[5] & 0x10)
1641 hci_setup_link_policy(req);
1643 if (lmp_le_capable(hdev)) {
1646 memset(events, 0, sizeof(events));
1649 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1650 events[0] |= 0x10; /* LE Long Term Key Request */
1652 /* If controller supports the Connection Parameters Request
1653 * Link Layer Procedure, enable the corresponding event.
1655 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1656 events[0] |= 0x20; /* LE Remote Connection
1660 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1663 if (hdev->commands[25] & 0x40) {
1664 /* Read LE Advertising Channel TX Power */
1665 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1668 hci_set_le_support(req);
1671 /* Read features beyond page 1 if available */
1672 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1673 struct hci_cp_read_local_ext_features cp;
1676 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1681 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1683 struct hci_dev *hdev = req->hdev;
1685 /* Set event mask page 2 if the HCI command for it is supported */
1686 if (hdev->commands[22] & 0x04)
1687 hci_set_event_mask_page_2(req);
1689 /* Check for Synchronization Train support */
1690 if (lmp_sync_train_capable(hdev))
1691 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1693 /* Enable Secure Connections if supported and configured */
1694 if ((lmp_sc_capable(hdev) ||
1695 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1696 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1698 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1699 sizeof(support), &support);
1703 static int __hci_init(struct hci_dev *hdev)
1707 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1711 /* The Device Under Test (DUT) mode is special and available for
1712 * all controller types. So just create it early on.
1714 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1715 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1719 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1720 * BR/EDR/LE type controllers. AMP controllers only need the
1723 if (hdev->dev_type != HCI_BREDR)
1726 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1730 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1734 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1738 /* Only create debugfs entries during the initial setup
1739 * phase and not every time the controller gets powered on.
1741 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1744 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1746 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1747 &hdev->manufacturer);
1748 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1749 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1750 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1752 debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1754 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1756 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1757 &conn_info_min_age_fops);
1758 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1759 &conn_info_max_age_fops);
1761 if (lmp_bredr_capable(hdev)) {
1762 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1763 hdev, &inquiry_cache_fops);
1764 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1765 hdev, &link_keys_fops);
1766 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1767 hdev, &dev_class_fops);
1768 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1769 hdev, &voice_setting_fops);
1772 if (lmp_ssp_capable(hdev)) {
1773 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1774 hdev, &auto_accept_delay_fops);
1775 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1776 hdev, &force_sc_support_fops);
1777 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1778 hdev, &sc_only_mode_fops);
1781 if (lmp_sniff_capable(hdev)) {
1782 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1783 hdev, &idle_timeout_fops);
1784 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1785 hdev, &sniff_min_interval_fops);
1786 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1787 hdev, &sniff_max_interval_fops);
1790 if (lmp_le_capable(hdev)) {
1791 debugfs_create_file("identity", 0400, hdev->debugfs,
1792 hdev, &identity_fops);
1793 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1794 hdev, &rpa_timeout_fops);
1795 debugfs_create_file("random_address", 0444, hdev->debugfs,
1796 hdev, &random_address_fops);
1797 debugfs_create_file("static_address", 0444, hdev->debugfs,
1798 hdev, &static_address_fops);
1800 /* For controllers with a public address, provide a debug
1801 * option to force the usage of the configured static
1802 * address. By default the public address is used.
1804 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1805 debugfs_create_file("force_static_address", 0644,
1806 hdev->debugfs, hdev,
1807 &force_static_address_fops);
1809 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1810 &hdev->le_white_list_size);
1811 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1813 debugfs_create_file("identity_resolving_keys", 0400,
1814 hdev->debugfs, hdev,
1815 &identity_resolving_keys_fops);
1816 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1817 hdev, &long_term_keys_fops);
1818 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1819 hdev, &conn_min_interval_fops);
1820 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1821 hdev, &conn_max_interval_fops);
1822 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1823 hdev, &conn_latency_fops);
1824 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1825 hdev, &supervision_timeout_fops);
1826 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1827 hdev, &adv_channel_map_fops);
1828 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1830 debugfs_create_u16("discov_interleaved_timeout", 0644,
1832 &hdev->discov_interleaved_timeout);
1838 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1840 struct hci_dev *hdev = req->hdev;
1842 BT_DBG("%s %ld", hdev->name, opt);
1845 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1846 hci_reset_req(req, 0);
1848 /* Read Local Version */
1849 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1851 /* Read BD Address */
1852 if (hdev->set_bdaddr)
1853 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1856 static int __hci_unconf_init(struct hci_dev *hdev)
1860 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1863 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1870 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1874 BT_DBG("%s %x", req->hdev->name, scan);
1876 /* Inquiry and Page scans */
1877 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1880 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1884 BT_DBG("%s %x", req->hdev->name, auth);
1886 /* Authentication */
1887 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1890 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1894 BT_DBG("%s %x", req->hdev->name, encrypt);
1897 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1900 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1902 __le16 policy = cpu_to_le16(opt);
1904 BT_DBG("%s %x", req->hdev->name, policy);
1906 /* Default link policy */
1907 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1910 /* Get HCI device by index.
1911 * Device is held on return. */
1912 struct hci_dev *hci_dev_get(int index)
1914 struct hci_dev *hdev = NULL, *d;
1916 BT_DBG("%d", index);
1921 read_lock(&hci_dev_list_lock);
1922 list_for_each_entry(d, &hci_dev_list, list) {
1923 if (d->id == index) {
1924 hdev = hci_dev_hold(d);
1928 read_unlock(&hci_dev_list_lock);
1932 /* ---- Inquiry support ---- */
1934 bool hci_discovery_active(struct hci_dev *hdev)
1936 struct discovery_state *discov = &hdev->discovery;
1938 switch (discov->state) {
1939 case DISCOVERY_FINDING:
1940 case DISCOVERY_RESOLVING:
1948 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1950 int old_state = hdev->discovery.state;
1952 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1954 if (old_state == state)
1957 hdev->discovery.state = state;
1960 case DISCOVERY_STOPPED:
1961 hci_update_background_scan(hdev);
1963 if (old_state != DISCOVERY_STARTING)
1964 mgmt_discovering(hdev, 0);
1966 case DISCOVERY_STARTING:
1968 case DISCOVERY_FINDING:
1969 mgmt_discovering(hdev, 1);
1971 case DISCOVERY_RESOLVING:
1973 case DISCOVERY_STOPPING:
1978 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1980 struct discovery_state *cache = &hdev->discovery;
1981 struct inquiry_entry *p, *n;
1983 list_for_each_entry_safe(p, n, &cache->all, all) {
1988 INIT_LIST_HEAD(&cache->unknown);
1989 INIT_LIST_HEAD(&cache->resolve);
1992 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1995 struct discovery_state *cache = &hdev->discovery;
1996 struct inquiry_entry *e;
1998 BT_DBG("cache %p, %pMR", cache, bdaddr);
2000 list_for_each_entry(e, &cache->all, all) {
2001 if (!bacmp(&e->data.bdaddr, bdaddr))
2008 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2011 struct discovery_state *cache = &hdev->discovery;
2012 struct inquiry_entry *e;
2014 BT_DBG("cache %p, %pMR", cache, bdaddr);
2016 list_for_each_entry(e, &cache->unknown, list) {
2017 if (!bacmp(&e->data.bdaddr, bdaddr))
2024 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2028 struct discovery_state *cache = &hdev->discovery;
2029 struct inquiry_entry *e;
2031 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2033 list_for_each_entry(e, &cache->resolve, list) {
2034 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2036 if (!bacmp(&e->data.bdaddr, bdaddr))
2043 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2044 struct inquiry_entry *ie)
2046 struct discovery_state *cache = &hdev->discovery;
2047 struct list_head *pos = &cache->resolve;
2048 struct inquiry_entry *p;
2050 list_del(&ie->list);
2052 list_for_each_entry(p, &cache->resolve, list) {
2053 if (p->name_state != NAME_PENDING &&
2054 abs(p->data.rssi) >= abs(ie->data.rssi))
2059 list_add(&ie->list, pos);
2062 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2065 struct discovery_state *cache = &hdev->discovery;
2066 struct inquiry_entry *ie;
2069 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2071 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2073 if (!data->ssp_mode)
2074 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2076 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2078 if (!ie->data.ssp_mode)
2079 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2081 if (ie->name_state == NAME_NEEDED &&
2082 data->rssi != ie->data.rssi) {
2083 ie->data.rssi = data->rssi;
2084 hci_inquiry_cache_update_resolve(hdev, ie);
2090 /* Entry not in the cache. Add new one. */
2091 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
2093 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2097 list_add(&ie->all, &cache->all);
2100 ie->name_state = NAME_KNOWN;
2102 ie->name_state = NAME_NOT_KNOWN;
2103 list_add(&ie->list, &cache->unknown);
2107 if (name_known && ie->name_state != NAME_KNOWN &&
2108 ie->name_state != NAME_PENDING) {
2109 ie->name_state = NAME_KNOWN;
2110 list_del(&ie->list);
2113 memcpy(&ie->data, data, sizeof(*data));
2114 ie->timestamp = jiffies;
2115 cache->timestamp = jiffies;
2117 if (ie->name_state == NAME_NOT_KNOWN)
2118 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2124 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2126 struct discovery_state *cache = &hdev->discovery;
2127 struct inquiry_info *info = (struct inquiry_info *) buf;
2128 struct inquiry_entry *e;
2131 list_for_each_entry(e, &cache->all, all) {
2132 struct inquiry_data *data = &e->data;
2137 bacpy(&info->bdaddr, &data->bdaddr);
2138 info->pscan_rep_mode = data->pscan_rep_mode;
2139 info->pscan_period_mode = data->pscan_period_mode;
2140 info->pscan_mode = data->pscan_mode;
2141 memcpy(info->dev_class, data->dev_class, 3);
2142 info->clock_offset = data->clock_offset;
2148 BT_DBG("cache %p, copied %d", cache, copied);
2152 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2154 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2155 struct hci_dev *hdev = req->hdev;
2156 struct hci_cp_inquiry cp;
2158 BT_DBG("%s", hdev->name);
2160 if (test_bit(HCI_INQUIRY, &hdev->flags))
2164 memcpy(&cp.lap, &ir->lap, 3);
2165 cp.length = ir->length;
2166 cp.num_rsp = ir->num_rsp;
2167 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2170 static int wait_inquiry(void *word)
2173 return signal_pending(current);
2176 int hci_inquiry(void __user *arg)
2178 __u8 __user *ptr = arg;
2179 struct hci_inquiry_req ir;
2180 struct hci_dev *hdev;
2181 int err = 0, do_inquiry = 0, max_rsp;
2185 if (copy_from_user(&ir, ptr, sizeof(ir)))
2188 hdev = hci_dev_get(ir.dev_id);
2192 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2197 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2202 if (hdev->dev_type != HCI_BREDR) {
2207 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2213 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2214 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2215 hci_inquiry_cache_flush(hdev);
2218 hci_dev_unlock(hdev);
2220 timeo = ir.length * msecs_to_jiffies(2000);
2223 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2228 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2229 * cleared). If it is interrupted by a signal, return -EINTR.
2231 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2232 TASK_INTERRUPTIBLE))
2236 /* for unlimited number of responses we will use buffer with
2239 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2241 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2242 * copy it to the user space.
2244 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2251 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2252 hci_dev_unlock(hdev);
2254 BT_DBG("num_rsp %d", ir.num_rsp);
2256 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2258 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2271 static int hci_dev_do_open(struct hci_dev *hdev)
2275 BT_DBG("%s %p", hdev->name, hdev);
2279 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2284 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2285 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2286 /* Check for rfkill but allow the HCI setup stage to
2287 * proceed (which in itself doesn't cause any RF activity).
2289 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2294 /* Check for valid public address or a configured static
2295 * random adddress, but let the HCI setup proceed to
2296 * be able to determine if there is a public address
2299 * In case of user channel usage, it is not important
2300 * if a public address or static random address is
2303 * This check is only valid for BR/EDR controllers
2304 * since AMP controllers do not have an address.
2306 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2307 hdev->dev_type == HCI_BREDR &&
2308 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2309 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2310 ret = -EADDRNOTAVAIL;
2315 if (test_bit(HCI_UP, &hdev->flags)) {
2320 if (hdev->open(hdev)) {
2325 atomic_set(&hdev->cmd_cnt, 1);
2326 set_bit(HCI_INIT, &hdev->flags);
2328 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2330 ret = hdev->setup(hdev);
2332 /* The transport driver can set these quirks before
2333 * creating the HCI device or in its setup callback.
2335 * In case any of them is set, the controller has to
2336 * start up as unconfigured.
2338 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2339 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2340 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2342 /* For an unconfigured controller it is required to
2343 * read at least the version information provided by
2344 * the Read Local Version Information command.
2346 * If the set_bdaddr driver callback is provided, then
2347 * also the original Bluetooth public device address
2348 * will be read using the Read BD Address command.
2350 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2351 ret = __hci_unconf_init(hdev);
2354 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2355 /* If public address change is configured, ensure that
2356 * the address gets programmed. If the driver does not
2357 * support changing the public address, fail the power
2360 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2362 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2364 ret = -EADDRNOTAVAIL;
2368 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2369 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2370 ret = __hci_init(hdev);
2373 clear_bit(HCI_INIT, &hdev->flags);
2377 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2378 set_bit(HCI_UP, &hdev->flags);
2379 hci_notify(hdev, HCI_DEV_UP);
2380 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2381 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2382 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2383 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2384 hdev->dev_type == HCI_BREDR) {
2386 mgmt_powered(hdev, 1);
2387 hci_dev_unlock(hdev);
2390 /* Init failed, cleanup */
2391 flush_work(&hdev->tx_work);
2392 flush_work(&hdev->cmd_work);
2393 flush_work(&hdev->rx_work);
2395 skb_queue_purge(&hdev->cmd_q);
2396 skb_queue_purge(&hdev->rx_q);
2401 if (hdev->sent_cmd) {
2402 kfree_skb(hdev->sent_cmd);
2403 hdev->sent_cmd = NULL;
2407 hdev->flags &= BIT(HCI_RAW);
2411 hci_req_unlock(hdev);
2415 /* ---- HCI ioctl helpers ---- */
2417 int hci_dev_open(__u16 dev)
2419 struct hci_dev *hdev;
2422 hdev = hci_dev_get(dev);
2426 /* Devices that are marked as unconfigured can only be powered
2427 * up as user channel. Trying to bring them up as normal devices
2428 * will result into a failure. Only user channel operation is
2431 * When this function is called for a user channel, the flag
2432 * HCI_USER_CHANNEL will be set first before attempting to
2435 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2436 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2441 /* We need to ensure that no other power on/off work is pending
2442 * before proceeding to call hci_dev_do_open. This is
2443 * particularly important if the setup procedure has not yet
2446 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2447 cancel_delayed_work(&hdev->power_off);
2449 /* After this call it is guaranteed that the setup procedure
2450 * has finished. This means that error conditions like RFKILL
2451 * or no valid public or static random address apply.
2453 flush_workqueue(hdev->req_workqueue);
2455 /* For controllers not using the management interface and that
2456 * are brought up using legacy ioctl, set the HCI_PAIRABLE bit
2457 * so that pairing works for them. Once the management interface
2458 * is in use this bit will be cleared again and userspace has
2459 * to explicitly enable it.
2461 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2462 !test_bit(HCI_MGMT, &hdev->dev_flags))
2463 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2465 err = hci_dev_do_open(hdev);
2472 /* This function requires the caller holds hdev->lock */
2473 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2475 struct hci_conn_params *p;
2477 list_for_each_entry(p, &hdev->le_conn_params, list)
2478 list_del_init(&p->action);
2480 BT_DBG("All LE pending actions cleared");
2483 static int hci_dev_do_close(struct hci_dev *hdev)
2485 BT_DBG("%s %p", hdev->name, hdev);
2487 cancel_delayed_work(&hdev->power_off);
2489 hci_req_cancel(hdev, ENODEV);
2492 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2493 cancel_delayed_work_sync(&hdev->cmd_timer);
2494 hci_req_unlock(hdev);
2498 /* Flush RX and TX works */
2499 flush_work(&hdev->tx_work);
2500 flush_work(&hdev->rx_work);
2502 if (hdev->discov_timeout > 0) {
2503 cancel_delayed_work(&hdev->discov_off);
2504 hdev->discov_timeout = 0;
2505 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2506 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2509 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2510 cancel_delayed_work(&hdev->service_cache);
2512 cancel_delayed_work_sync(&hdev->le_scan_disable);
2514 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2515 cancel_delayed_work_sync(&hdev->rpa_expired);
2518 hci_inquiry_cache_flush(hdev);
2519 hci_conn_hash_flush(hdev);
2520 hci_pend_le_actions_clear(hdev);
2521 hci_dev_unlock(hdev);
2523 hci_notify(hdev, HCI_DEV_DOWN);
2529 skb_queue_purge(&hdev->cmd_q);
2530 atomic_set(&hdev->cmd_cnt, 1);
2531 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2532 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2533 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2534 set_bit(HCI_INIT, &hdev->flags);
2535 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2536 clear_bit(HCI_INIT, &hdev->flags);
2539 /* flush cmd work */
2540 flush_work(&hdev->cmd_work);
2543 skb_queue_purge(&hdev->rx_q);
2544 skb_queue_purge(&hdev->cmd_q);
2545 skb_queue_purge(&hdev->raw_q);
2547 /* Drop last sent command */
2548 if (hdev->sent_cmd) {
2549 cancel_delayed_work_sync(&hdev->cmd_timer);
2550 kfree_skb(hdev->sent_cmd);
2551 hdev->sent_cmd = NULL;
2554 kfree_skb(hdev->recv_evt);
2555 hdev->recv_evt = NULL;
2557 /* After this point our queues are empty
2558 * and no tasks are scheduled. */
2562 hdev->flags &= BIT(HCI_RAW);
2563 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2565 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2566 if (hdev->dev_type == HCI_BREDR) {
2568 mgmt_powered(hdev, 0);
2569 hci_dev_unlock(hdev);
2573 /* Controller radio is available but is currently powered down */
2574 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2576 memset(hdev->eir, 0, sizeof(hdev->eir));
2577 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2578 bacpy(&hdev->random_addr, BDADDR_ANY);
2580 hci_req_unlock(hdev);
2586 int hci_dev_close(__u16 dev)
2588 struct hci_dev *hdev;
2591 hdev = hci_dev_get(dev);
2595 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2600 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2601 cancel_delayed_work(&hdev->power_off);
2603 err = hci_dev_do_close(hdev);
2610 int hci_dev_reset(__u16 dev)
2612 struct hci_dev *hdev;
2615 hdev = hci_dev_get(dev);
2621 if (!test_bit(HCI_UP, &hdev->flags)) {
2626 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2631 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2637 skb_queue_purge(&hdev->rx_q);
2638 skb_queue_purge(&hdev->cmd_q);
2641 hci_inquiry_cache_flush(hdev);
2642 hci_conn_hash_flush(hdev);
2643 hci_dev_unlock(hdev);
2648 atomic_set(&hdev->cmd_cnt, 1);
2649 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2651 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2654 hci_req_unlock(hdev);
2659 int hci_dev_reset_stat(__u16 dev)
2661 struct hci_dev *hdev;
2664 hdev = hci_dev_get(dev);
2668 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2673 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2678 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2685 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2687 bool conn_changed, discov_changed;
2689 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2691 if ((scan & SCAN_PAGE))
2692 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2695 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2698 if ((scan & SCAN_INQUIRY)) {
2699 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2702 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2703 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2707 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2710 if (conn_changed || discov_changed) {
2711 /* In case this was disabled through mgmt */
2712 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2714 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2715 mgmt_update_adv_data(hdev);
2717 mgmt_new_settings(hdev);
2721 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2723 struct hci_dev *hdev;
2724 struct hci_dev_req dr;
2727 if (copy_from_user(&dr, arg, sizeof(dr)))
2730 hdev = hci_dev_get(dr.dev_id);
2734 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2739 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2744 if (hdev->dev_type != HCI_BREDR) {
2749 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2756 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2761 if (!lmp_encrypt_capable(hdev)) {
2766 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2767 /* Auth must be enabled first */
2768 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2774 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2779 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2782 /* Ensure that the connectable and discoverable states
2783 * get correctly modified as this was a non-mgmt change.
2786 hci_update_scan_state(hdev, dr.dev_opt);
2790 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2794 case HCISETLINKMODE:
2795 hdev->link_mode = ((__u16) dr.dev_opt) &
2796 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2800 hdev->pkt_type = (__u16) dr.dev_opt;
2804 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2805 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2809 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2810 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2823 int hci_get_dev_list(void __user *arg)
2825 struct hci_dev *hdev;
2826 struct hci_dev_list_req *dl;
2827 struct hci_dev_req *dr;
2828 int n = 0, size, err;
2831 if (get_user(dev_num, (__u16 __user *) arg))
2834 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2837 size = sizeof(*dl) + dev_num * sizeof(*dr);
2839 dl = kzalloc(size, GFP_KERNEL);
2845 read_lock(&hci_dev_list_lock);
2846 list_for_each_entry(hdev, &hci_dev_list, list) {
2847 unsigned long flags = hdev->flags;
2849 /* When the auto-off is configured it means the transport
2850 * is running, but in that case still indicate that the
2851 * device is actually down.
2853 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2854 flags &= ~BIT(HCI_UP);
2856 (dr + n)->dev_id = hdev->id;
2857 (dr + n)->dev_opt = flags;
2862 read_unlock(&hci_dev_list_lock);
2865 size = sizeof(*dl) + n * sizeof(*dr);
2867 err = copy_to_user(arg, dl, size);
2870 return err ? -EFAULT : 0;
2873 int hci_get_dev_info(void __user *arg)
2875 struct hci_dev *hdev;
2876 struct hci_dev_info di;
2877 unsigned long flags;
2880 if (copy_from_user(&di, arg, sizeof(di)))
2883 hdev = hci_dev_get(di.dev_id);
2887 /* When the auto-off is configured it means the transport
2888 * is running, but in that case still indicate that the
2889 * device is actually down.
2891 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2892 flags = hdev->flags & ~BIT(HCI_UP);
2894 flags = hdev->flags;
2896 strcpy(di.name, hdev->name);
2897 di.bdaddr = hdev->bdaddr;
2898 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2900 di.pkt_type = hdev->pkt_type;
2901 if (lmp_bredr_capable(hdev)) {
2902 di.acl_mtu = hdev->acl_mtu;
2903 di.acl_pkts = hdev->acl_pkts;
2904 di.sco_mtu = hdev->sco_mtu;
2905 di.sco_pkts = hdev->sco_pkts;
2907 di.acl_mtu = hdev->le_mtu;
2908 di.acl_pkts = hdev->le_pkts;
2912 di.link_policy = hdev->link_policy;
2913 di.link_mode = hdev->link_mode;
2915 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2916 memcpy(&di.features, &hdev->features, sizeof(di.features));
2918 if (copy_to_user(arg, &di, sizeof(di)))
2926 /* ---- Interface to HCI drivers ---- */
2928 static int hci_rfkill_set_block(void *data, bool blocked)
2930 struct hci_dev *hdev = data;
2932 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2934 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2938 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2939 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2940 !test_bit(HCI_CONFIG, &hdev->dev_flags))
2941 hci_dev_do_close(hdev);
2943 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2949 static const struct rfkill_ops hci_rfkill_ops = {
2950 .set_block = hci_rfkill_set_block,
2953 static void hci_power_on(struct work_struct *work)
2955 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2958 BT_DBG("%s", hdev->name);
2960 err = hci_dev_do_open(hdev);
2962 mgmt_set_powered_failed(hdev, err);
2966 /* During the HCI setup phase, a few error conditions are
2967 * ignored and they need to be checked now. If they are still
2968 * valid, it is important to turn the device back off.
2970 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2971 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2972 (hdev->dev_type == HCI_BREDR &&
2973 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2974 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2975 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2976 hci_dev_do_close(hdev);
2977 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2978 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2979 HCI_AUTO_OFF_TIMEOUT);
2982 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2983 /* For unconfigured devices, set the HCI_RAW flag
2984 * so that userspace can easily identify them.
2986 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2987 set_bit(HCI_RAW, &hdev->flags);
2989 /* For fully configured devices, this will send
2990 * the Index Added event. For unconfigured devices,
2991 * it will send Unconfigued Index Added event.
2993 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2994 * and no event will be send.
2996 mgmt_index_added(hdev);
2997 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2998 /* When the controller is now configured, then it
2999 * is important to clear the HCI_RAW flag.
3001 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3002 clear_bit(HCI_RAW, &hdev->flags);
3004 /* Powering on the controller with HCI_CONFIG set only
3005 * happens with the transition from unconfigured to
3006 * configured. This will send the Index Added event.
3008 mgmt_index_added(hdev);
3012 static void hci_power_off(struct work_struct *work)
3014 struct hci_dev *hdev = container_of(work, struct hci_dev,
3017 BT_DBG("%s", hdev->name);
3019 hci_dev_do_close(hdev);
3022 static void hci_discov_off(struct work_struct *work)
3024 struct hci_dev *hdev;
3026 hdev = container_of(work, struct hci_dev, discov_off.work);
3028 BT_DBG("%s", hdev->name);
3030 mgmt_discoverable_timeout(hdev);
3033 void hci_uuids_clear(struct hci_dev *hdev)
3035 struct bt_uuid *uuid, *tmp;
3037 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3038 list_del(&uuid->list);
3043 void hci_link_keys_clear(struct hci_dev *hdev)
3045 struct list_head *p, *n;
3047 list_for_each_safe(p, n, &hdev->link_keys) {
3048 struct link_key *key;
3050 key = list_entry(p, struct link_key, list);
3057 void hci_smp_ltks_clear(struct hci_dev *hdev)
3059 struct smp_ltk *k, *tmp;
3061 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3067 void hci_smp_irks_clear(struct hci_dev *hdev)
3069 struct smp_irk *k, *tmp;
3071 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3077 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3081 list_for_each_entry(k, &hdev->link_keys, list)
3082 if (bacmp(bdaddr, &k->bdaddr) == 0)
3088 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3089 u8 key_type, u8 old_key_type)
3092 if (key_type < 0x03)
3095 /* Debug keys are insecure so don't store them persistently */
3096 if (key_type == HCI_LK_DEBUG_COMBINATION)
3099 /* Changed combination key and there's no previous one */
3100 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3103 /* Security mode 3 case */
3107 /* Neither local nor remote side had no-bonding as requirement */
3108 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3111 /* Local side had dedicated bonding as requirement */
3112 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3115 /* Remote side had dedicated bonding as requirement */
3116 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3119 /* If none of the above criteria match, then don't store the key
3124 static u8 ltk_role(u8 type)
3126 if (type == SMP_LTK)
3127 return HCI_ROLE_MASTER;
3129 return HCI_ROLE_SLAVE;
3132 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3137 list_for_each_entry(k, &hdev->long_term_keys, list) {
3138 if (k->ediv != ediv || k->rand != rand)
3141 if (ltk_role(k->type) != role)
3150 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3151 u8 addr_type, u8 role)
3155 list_for_each_entry(k, &hdev->long_term_keys, list)
3156 if (addr_type == k->bdaddr_type &&
3157 bacmp(bdaddr, &k->bdaddr) == 0 &&
3158 ltk_role(k->type) == role)
3164 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3166 struct smp_irk *irk;
3168 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3169 if (!bacmp(&irk->rpa, rpa))
3173 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3174 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3175 bacpy(&irk->rpa, rpa);
3183 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3186 struct smp_irk *irk;
3188 /* Identity Address must be public or static random */
3189 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3192 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3193 if (addr_type == irk->addr_type &&
3194 bacmp(bdaddr, &irk->bdaddr) == 0)
3201 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3202 bdaddr_t *bdaddr, u8 *val, u8 type,
3203 u8 pin_len, bool *persistent)
3205 struct link_key *key, *old_key;
3208 old_key = hci_find_link_key(hdev, bdaddr);
3210 old_key_type = old_key->type;
3213 old_key_type = conn ? conn->key_type : 0xff;
3214 key = kzalloc(sizeof(*key), GFP_KERNEL);
3217 list_add(&key->list, &hdev->link_keys);
3220 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3222 /* Some buggy controller combinations generate a changed
3223 * combination key for legacy pairing even when there's no
3225 if (type == HCI_LK_CHANGED_COMBINATION &&
3226 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3227 type = HCI_LK_COMBINATION;
3229 conn->key_type = type;
3232 bacpy(&key->bdaddr, bdaddr);
3233 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3234 key->pin_len = pin_len;
3236 if (type == HCI_LK_CHANGED_COMBINATION)
3237 key->type = old_key_type;
3242 *persistent = hci_persistent_key(hdev, conn, type,
3248 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3249 u8 addr_type, u8 type, u8 authenticated,
3250 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3252 struct smp_ltk *key, *old_key;
3253 u8 role = ltk_role(type);
3255 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
3259 key = kzalloc(sizeof(*key), GFP_KERNEL);
3262 list_add(&key->list, &hdev->long_term_keys);
3265 bacpy(&key->bdaddr, bdaddr);
3266 key->bdaddr_type = addr_type;
3267 memcpy(key->val, tk, sizeof(key->val));
3268 key->authenticated = authenticated;
3271 key->enc_size = enc_size;
3277 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3278 u8 addr_type, u8 val[16], bdaddr_t *rpa)
3280 struct smp_irk *irk;
3282 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3284 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3288 bacpy(&irk->bdaddr, bdaddr);
3289 irk->addr_type = addr_type;
3291 list_add(&irk->list, &hdev->identity_resolving_keys);
3294 memcpy(irk->val, val, 16);
3295 bacpy(&irk->rpa, rpa);
3300 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3302 struct link_key *key;
3304 key = hci_find_link_key(hdev, bdaddr);
3308 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3310 list_del(&key->list);
3316 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3318 struct smp_ltk *k, *tmp;
3321 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3322 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3325 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3332 return removed ? 0 : -ENOENT;
3335 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3337 struct smp_irk *k, *tmp;
3339 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3340 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3343 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3350 /* HCI command timer function */
3351 static void hci_cmd_timeout(struct work_struct *work)
3353 struct hci_dev *hdev = container_of(work, struct hci_dev,
3356 if (hdev->sent_cmd) {
3357 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3358 u16 opcode = __le16_to_cpu(sent->opcode);
3360 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3362 BT_ERR("%s command tx timeout", hdev->name);
3365 atomic_set(&hdev->cmd_cnt, 1);
3366 queue_work(hdev->workqueue, &hdev->cmd_work);
3369 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3372 struct oob_data *data;
3374 list_for_each_entry(data, &hdev->remote_oob_data, list)
3375 if (bacmp(bdaddr, &data->bdaddr) == 0)
3381 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3383 struct oob_data *data;
3385 data = hci_find_remote_oob_data(hdev, bdaddr);
3389 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3391 list_del(&data->list);
3397 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3399 struct oob_data *data, *n;
3401 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3402 list_del(&data->list);
3407 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3408 u8 *hash, u8 *randomizer)
3410 struct oob_data *data;
3412 data = hci_find_remote_oob_data(hdev, bdaddr);
3414 data = kmalloc(sizeof(*data), GFP_KERNEL);
3418 bacpy(&data->bdaddr, bdaddr);
3419 list_add(&data->list, &hdev->remote_oob_data);
3422 memcpy(data->hash192, hash, sizeof(data->hash192));
3423 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3425 memset(data->hash256, 0, sizeof(data->hash256));
3426 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3428 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3433 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3434 u8 *hash192, u8 *randomizer192,
3435 u8 *hash256, u8 *randomizer256)
3437 struct oob_data *data;
3439 data = hci_find_remote_oob_data(hdev, bdaddr);
3441 data = kmalloc(sizeof(*data), GFP_KERNEL);
3445 bacpy(&data->bdaddr, bdaddr);
3446 list_add(&data->list, &hdev->remote_oob_data);
3449 memcpy(data->hash192, hash192, sizeof(data->hash192));
3450 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3452 memcpy(data->hash256, hash256, sizeof(data->hash256));
3453 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3455 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3460 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3461 bdaddr_t *bdaddr, u8 type)
3463 struct bdaddr_list *b;
3465 list_for_each_entry(b, bdaddr_list, list) {
3466 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3473 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3475 struct list_head *p, *n;
3477 list_for_each_safe(p, n, bdaddr_list) {
3478 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3485 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3487 struct bdaddr_list *entry;
3489 if (!bacmp(bdaddr, BDADDR_ANY))
3492 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3495 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3499 bacpy(&entry->bdaddr, bdaddr);
3500 entry->bdaddr_type = type;
3502 list_add(&entry->list, list);
3507 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3509 struct bdaddr_list *entry;
3511 if (!bacmp(bdaddr, BDADDR_ANY)) {
3512 hci_bdaddr_list_clear(list);
3516 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3520 list_del(&entry->list);
3526 /* This function requires the caller holds hdev->lock */
3527 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3528 bdaddr_t *addr, u8 addr_type)
3530 struct hci_conn_params *params;
3532 /* The conn params list only contains identity addresses */
3533 if (!hci_is_identity_address(addr, addr_type))
3536 list_for_each_entry(params, &hdev->le_conn_params, list) {
3537 if (bacmp(¶ms->addr, addr) == 0 &&
3538 params->addr_type == addr_type) {
3546 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3548 struct hci_conn *conn;
3550 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3554 if (conn->dst_type != type)
3557 if (conn->state != BT_CONNECTED)
3563 /* This function requires the caller holds hdev->lock */
3564 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3565 bdaddr_t *addr, u8 addr_type)
3567 struct hci_conn_params *param;
3569 /* The list only contains identity addresses */
3570 if (!hci_is_identity_address(addr, addr_type))
3573 list_for_each_entry(param, list, action) {
3574 if (bacmp(¶m->addr, addr) == 0 &&
3575 param->addr_type == addr_type)
3582 /* This function requires the caller holds hdev->lock */
3583 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3584 bdaddr_t *addr, u8 addr_type)
3586 struct hci_conn_params *params;
3588 if (!hci_is_identity_address(addr, addr_type))
3591 params = hci_conn_params_lookup(hdev, addr, addr_type);
3595 params = kzalloc(sizeof(*params), GFP_KERNEL);
3597 BT_ERR("Out of memory");
3601 bacpy(¶ms->addr, addr);
3602 params->addr_type = addr_type;
3604 list_add(¶ms->list, &hdev->le_conn_params);
3605 INIT_LIST_HEAD(¶ms->action);
3607 params->conn_min_interval = hdev->le_conn_min_interval;
3608 params->conn_max_interval = hdev->le_conn_max_interval;
3609 params->conn_latency = hdev->le_conn_latency;
3610 params->supervision_timeout = hdev->le_supv_timeout;
3611 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3613 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3618 /* This function requires the caller holds hdev->lock */
3619 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3622 struct hci_conn_params *params;
3624 params = hci_conn_params_add(hdev, addr, addr_type);
3628 if (params->auto_connect == auto_connect)
3631 list_del_init(¶ms->action);
3633 switch (auto_connect) {
3634 case HCI_AUTO_CONN_DISABLED:
3635 case HCI_AUTO_CONN_LINK_LOSS:
3636 hci_update_background_scan(hdev);
3638 case HCI_AUTO_CONN_REPORT:
3639 list_add(¶ms->action, &hdev->pend_le_reports);
3640 hci_update_background_scan(hdev);
3642 case HCI_AUTO_CONN_ALWAYS:
3643 if (!is_connected(hdev, addr, addr_type)) {
3644 list_add(¶ms->action, &hdev->pend_le_conns);
3645 hci_update_background_scan(hdev);
3650 params->auto_connect = auto_connect;
3652 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3658 /* This function requires the caller holds hdev->lock */
3659 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3661 struct hci_conn_params *params;
3663 params = hci_conn_params_lookup(hdev, addr, addr_type);
3667 list_del(¶ms->action);
3668 list_del(¶ms->list);
3671 hci_update_background_scan(hdev);
3673 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3676 /* This function requires the caller holds hdev->lock */
3677 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3679 struct hci_conn_params *params, *tmp;
3681 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3682 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3684 list_del(¶ms->list);
3688 BT_DBG("All LE disabled connection parameters were removed");
3691 /* This function requires the caller holds hdev->lock */
3692 void hci_conn_params_clear_all(struct hci_dev *hdev)
3694 struct hci_conn_params *params, *tmp;
3696 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3697 list_del(¶ms->action);
3698 list_del(¶ms->list);
3702 hci_update_background_scan(hdev);
3704 BT_DBG("All LE connection parameters were removed");
3707 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3710 BT_ERR("Failed to start inquiry: status %d", status);
3713 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3714 hci_dev_unlock(hdev);
3719 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3721 /* General inquiry access code (GIAC) */
3722 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3723 struct hci_request req;
3724 struct hci_cp_inquiry cp;
3728 BT_ERR("Failed to disable LE scanning: status %d", status);
3732 switch (hdev->discovery.type) {
3733 case DISCOV_TYPE_LE:
3735 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3736 hci_dev_unlock(hdev);
3739 case DISCOV_TYPE_INTERLEAVED:
3740 hci_req_init(&req, hdev);
3742 memset(&cp, 0, sizeof(cp));
3743 memcpy(&cp.lap, lap, sizeof(cp.lap));
3744 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3745 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3749 hci_inquiry_cache_flush(hdev);
3751 err = hci_req_run(&req, inquiry_complete);
3753 BT_ERR("Inquiry request failed: err %d", err);
3754 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3757 hci_dev_unlock(hdev);
3762 static void le_scan_disable_work(struct work_struct *work)
3764 struct hci_dev *hdev = container_of(work, struct hci_dev,
3765 le_scan_disable.work);
3766 struct hci_request req;
3769 BT_DBG("%s", hdev->name);
3771 hci_req_init(&req, hdev);
3773 hci_req_add_le_scan_disable(&req);
3775 err = hci_req_run(&req, le_scan_disable_work_complete);
3777 BT_ERR("Disable LE scanning request failed: err %d", err);
3780 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3782 struct hci_dev *hdev = req->hdev;
3784 /* If we're advertising or initiating an LE connection we can't
3785 * go ahead and change the random address at this time. This is
3786 * because the eventual initiator address used for the
3787 * subsequently created connection will be undefined (some
3788 * controllers use the new address and others the one we had
3789 * when the operation started).
3791 * In this kind of scenario skip the update and let the random
3792 * address be updated at the next cycle.
3794 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3795 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3796 BT_DBG("Deferring random address update");
3800 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3803 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3806 struct hci_dev *hdev = req->hdev;
3809 /* If privacy is enabled use a resolvable private address. If
3810 * current RPA has expired or there is something else than
3811 * the current RPA in use, then generate a new one.
3813 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3816 *own_addr_type = ADDR_LE_DEV_RANDOM;
3818 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3819 !bacmp(&hdev->random_addr, &hdev->rpa))
3822 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3824 BT_ERR("%s failed to generate new RPA", hdev->name);
3828 set_random_addr(req, &hdev->rpa);
3830 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3831 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3836 /* In case of required privacy without resolvable private address,
3837 * use an unresolvable private address. This is useful for active
3838 * scanning and non-connectable advertising.
3840 if (require_privacy) {
3843 get_random_bytes(&urpa, 6);
3844 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3846 *own_addr_type = ADDR_LE_DEV_RANDOM;
3847 set_random_addr(req, &urpa);
3851 /* If forcing static address is in use or there is no public
3852 * address use the static address as random address (but skip
3853 * the HCI command if the current random address is already the
3856 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3857 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3858 *own_addr_type = ADDR_LE_DEV_RANDOM;
3859 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3860 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3861 &hdev->static_addr);
3865 /* Neither privacy nor static address is being used so use a
3868 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3873 /* Copy the Identity Address of the controller.
3875 * If the controller has a public BD_ADDR, then by default use that one.
3876 * If this is a LE only controller without a public address, default to
3877 * the static random address.
3879 * For debugging purposes it is possible to force controllers with a
3880 * public address to use the static random address instead.
3882 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3885 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3886 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3887 bacpy(bdaddr, &hdev->static_addr);
3888 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3890 bacpy(bdaddr, &hdev->bdaddr);
3891 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3895 /* Alloc HCI device */
3896 struct hci_dev *hci_alloc_dev(void)
3898 struct hci_dev *hdev;
3900 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3904 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3905 hdev->esco_type = (ESCO_HV1);
3906 hdev->link_mode = (HCI_LM_ACCEPT);
3907 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3908 hdev->io_capability = 0x03; /* No Input No Output */
3909 hdev->manufacturer = 0xffff; /* Default to internal use */
3910 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3911 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3913 hdev->sniff_max_interval = 800;
3914 hdev->sniff_min_interval = 80;
3916 hdev->le_adv_channel_map = 0x07;
3917 hdev->le_scan_interval = 0x0060;
3918 hdev->le_scan_window = 0x0030;
3919 hdev->le_conn_min_interval = 0x0028;
3920 hdev->le_conn_max_interval = 0x0038;
3921 hdev->le_conn_latency = 0x0000;
3922 hdev->le_supv_timeout = 0x002a;
3924 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3925 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3926 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3927 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3929 mutex_init(&hdev->lock);
3930 mutex_init(&hdev->req_lock);
3932 INIT_LIST_HEAD(&hdev->mgmt_pending);
3933 INIT_LIST_HEAD(&hdev->blacklist);
3934 INIT_LIST_HEAD(&hdev->whitelist);
3935 INIT_LIST_HEAD(&hdev->uuids);
3936 INIT_LIST_HEAD(&hdev->link_keys);
3937 INIT_LIST_HEAD(&hdev->long_term_keys);
3938 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3939 INIT_LIST_HEAD(&hdev->remote_oob_data);
3940 INIT_LIST_HEAD(&hdev->le_white_list);
3941 INIT_LIST_HEAD(&hdev->le_conn_params);
3942 INIT_LIST_HEAD(&hdev->pend_le_conns);
3943 INIT_LIST_HEAD(&hdev->pend_le_reports);
3944 INIT_LIST_HEAD(&hdev->conn_hash.list);
3946 INIT_WORK(&hdev->rx_work, hci_rx_work);
3947 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3948 INIT_WORK(&hdev->tx_work, hci_tx_work);
3949 INIT_WORK(&hdev->power_on, hci_power_on);
3951 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3952 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3953 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3955 skb_queue_head_init(&hdev->rx_q);
3956 skb_queue_head_init(&hdev->cmd_q);
3957 skb_queue_head_init(&hdev->raw_q);
3959 init_waitqueue_head(&hdev->req_wait_q);
3961 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3963 hci_init_sysfs(hdev);
3964 discovery_init(hdev);
3968 EXPORT_SYMBOL(hci_alloc_dev);
3970 /* Free HCI device */
3971 void hci_free_dev(struct hci_dev *hdev)
3973 /* will free via device release */
3974 put_device(&hdev->dev);
3976 EXPORT_SYMBOL(hci_free_dev);
3978 /* Register HCI device */
3979 int hci_register_dev(struct hci_dev *hdev)
3983 if (!hdev->open || !hdev->close || !hdev->send)
3986 /* Do not allow HCI_AMP devices to register at index 0,
3987 * so the index can be used as the AMP controller ID.
3989 switch (hdev->dev_type) {
3991 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3994 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4003 sprintf(hdev->name, "hci%d", id);
4006 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4008 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4009 WQ_MEM_RECLAIM, 1, hdev->name);
4010 if (!hdev->workqueue) {
4015 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4016 WQ_MEM_RECLAIM, 1, hdev->name);
4017 if (!hdev->req_workqueue) {
4018 destroy_workqueue(hdev->workqueue);
4023 if (!IS_ERR_OR_NULL(bt_debugfs))
4024 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4026 dev_set_name(&hdev->dev, "%s", hdev->name);
4028 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
4030 if (IS_ERR(hdev->tfm_aes)) {
4031 BT_ERR("Unable to create crypto context");
4032 error = PTR_ERR(hdev->tfm_aes);
4033 hdev->tfm_aes = NULL;
4037 error = device_add(&hdev->dev);
4041 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4042 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4045 if (rfkill_register(hdev->rfkill) < 0) {
4046 rfkill_destroy(hdev->rfkill);
4047 hdev->rfkill = NULL;
4051 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4052 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4054 set_bit(HCI_SETUP, &hdev->dev_flags);
4055 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4057 if (hdev->dev_type == HCI_BREDR) {
4058 /* Assume BR/EDR support until proven otherwise (such as
4059 * through reading supported features during init.
4061 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4064 write_lock(&hci_dev_list_lock);
4065 list_add(&hdev->list, &hci_dev_list);
4066 write_unlock(&hci_dev_list_lock);
4068 /* Devices that are marked for raw-only usage are unconfigured
4069 * and should not be included in normal operation.
4071 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4072 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4074 hci_notify(hdev, HCI_DEV_REG);
4077 queue_work(hdev->req_workqueue, &hdev->power_on);
4082 crypto_free_blkcipher(hdev->tfm_aes);
4084 destroy_workqueue(hdev->workqueue);
4085 destroy_workqueue(hdev->req_workqueue);
4087 ida_simple_remove(&hci_index_ida, hdev->id);
4091 EXPORT_SYMBOL(hci_register_dev);
4093 /* Unregister HCI device */
4094 void hci_unregister_dev(struct hci_dev *hdev)
4098 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4100 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4104 write_lock(&hci_dev_list_lock);
4105 list_del(&hdev->list);
4106 write_unlock(&hci_dev_list_lock);
4108 hci_dev_do_close(hdev);
4110 for (i = 0; i < NUM_REASSEMBLY; i++)
4111 kfree_skb(hdev->reassembly[i]);
4113 cancel_work_sync(&hdev->power_on);
4115 if (!test_bit(HCI_INIT, &hdev->flags) &&
4116 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4117 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4119 mgmt_index_removed(hdev);
4120 hci_dev_unlock(hdev);
4123 /* mgmt_index_removed should take care of emptying the
4125 BUG_ON(!list_empty(&hdev->mgmt_pending));
4127 hci_notify(hdev, HCI_DEV_UNREG);
4130 rfkill_unregister(hdev->rfkill);
4131 rfkill_destroy(hdev->rfkill);
4135 crypto_free_blkcipher(hdev->tfm_aes);
4137 device_del(&hdev->dev);
4139 debugfs_remove_recursive(hdev->debugfs);
4141 destroy_workqueue(hdev->workqueue);
4142 destroy_workqueue(hdev->req_workqueue);
4145 hci_bdaddr_list_clear(&hdev->blacklist);
4146 hci_bdaddr_list_clear(&hdev->whitelist);
4147 hci_uuids_clear(hdev);
4148 hci_link_keys_clear(hdev);
4149 hci_smp_ltks_clear(hdev);
4150 hci_smp_irks_clear(hdev);
4151 hci_remote_oob_data_clear(hdev);
4152 hci_bdaddr_list_clear(&hdev->le_white_list);
4153 hci_conn_params_clear_all(hdev);
4154 hci_dev_unlock(hdev);
4158 ida_simple_remove(&hci_index_ida, id);
4160 EXPORT_SYMBOL(hci_unregister_dev);
4162 /* Suspend HCI device */
4163 int hci_suspend_dev(struct hci_dev *hdev)
4165 hci_notify(hdev, HCI_DEV_SUSPEND);
4168 EXPORT_SYMBOL(hci_suspend_dev);
4170 /* Resume HCI device */
4171 int hci_resume_dev(struct hci_dev *hdev)
4173 hci_notify(hdev, HCI_DEV_RESUME);
4176 EXPORT_SYMBOL(hci_resume_dev);
4178 /* Receive frame from HCI drivers */
4179 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4181 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4182 && !test_bit(HCI_INIT, &hdev->flags))) {
4188 bt_cb(skb)->incoming = 1;
4191 __net_timestamp(skb);
4193 skb_queue_tail(&hdev->rx_q, skb);
4194 queue_work(hdev->workqueue, &hdev->rx_work);
4198 EXPORT_SYMBOL(hci_recv_frame);
4200 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4201 int count, __u8 index)
4206 struct sk_buff *skb;
4207 struct bt_skb_cb *scb;
4209 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4210 index >= NUM_REASSEMBLY)
4213 skb = hdev->reassembly[index];
4217 case HCI_ACLDATA_PKT:
4218 len = HCI_MAX_FRAME_SIZE;
4219 hlen = HCI_ACL_HDR_SIZE;
4222 len = HCI_MAX_EVENT_SIZE;
4223 hlen = HCI_EVENT_HDR_SIZE;
4225 case HCI_SCODATA_PKT:
4226 len = HCI_MAX_SCO_SIZE;
4227 hlen = HCI_SCO_HDR_SIZE;
4231 skb = bt_skb_alloc(len, GFP_ATOMIC);
4235 scb = (void *) skb->cb;
4237 scb->pkt_type = type;
4239 hdev->reassembly[index] = skb;
4243 scb = (void *) skb->cb;
4244 len = min_t(uint, scb->expect, count);
4246 memcpy(skb_put(skb, len), data, len);
4255 if (skb->len == HCI_EVENT_HDR_SIZE) {
4256 struct hci_event_hdr *h = hci_event_hdr(skb);
4257 scb->expect = h->plen;
4259 if (skb_tailroom(skb) < scb->expect) {
4261 hdev->reassembly[index] = NULL;
4267 case HCI_ACLDATA_PKT:
4268 if (skb->len == HCI_ACL_HDR_SIZE) {
4269 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4270 scb->expect = __le16_to_cpu(h->dlen);
4272 if (skb_tailroom(skb) < scb->expect) {
4274 hdev->reassembly[index] = NULL;
4280 case HCI_SCODATA_PKT:
4281 if (skb->len == HCI_SCO_HDR_SIZE) {
4282 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4283 scb->expect = h->dlen;
4285 if (skb_tailroom(skb) < scb->expect) {
4287 hdev->reassembly[index] = NULL;
4294 if (scb->expect == 0) {
4295 /* Complete frame */
4297 bt_cb(skb)->pkt_type = type;
4298 hci_recv_frame(hdev, skb);
4300 hdev->reassembly[index] = NULL;
4308 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4312 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4316 rem = hci_reassembly(hdev, type, data, count, type - 1);
4320 data += (count - rem);
4326 EXPORT_SYMBOL(hci_recv_fragment);
4328 #define STREAM_REASSEMBLY 0
4330 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4336 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4339 struct { char type; } *pkt;
4341 /* Start of the frame */
4348 type = bt_cb(skb)->pkt_type;
4350 rem = hci_reassembly(hdev, type, data, count,
4355 data += (count - rem);
4361 EXPORT_SYMBOL(hci_recv_stream_fragment);
4363 /* ---- Interface to upper protocols ---- */
4365 int hci_register_cb(struct hci_cb *cb)
4367 BT_DBG("%p name %s", cb, cb->name);
4369 write_lock(&hci_cb_list_lock);
4370 list_add(&cb->list, &hci_cb_list);
4371 write_unlock(&hci_cb_list_lock);
4375 EXPORT_SYMBOL(hci_register_cb);
4377 int hci_unregister_cb(struct hci_cb *cb)
4379 BT_DBG("%p name %s", cb, cb->name);
4381 write_lock(&hci_cb_list_lock);
4382 list_del(&cb->list);
4383 write_unlock(&hci_cb_list_lock);
4387 EXPORT_SYMBOL(hci_unregister_cb);
4389 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4393 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4396 __net_timestamp(skb);
4398 /* Send copy to monitor */
4399 hci_send_to_monitor(hdev, skb);
4401 if (atomic_read(&hdev->promisc)) {
4402 /* Send copy to the sockets */
4403 hci_send_to_sock(hdev, skb);
4406 /* Get rid of skb owner, prior to sending to the driver. */
4409 err = hdev->send(hdev, skb);
4411 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4416 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4418 skb_queue_head_init(&req->cmd_q);
4423 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4425 struct hci_dev *hdev = req->hdev;
4426 struct sk_buff *skb;
4427 unsigned long flags;
4429 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4431 /* If an error occured during request building, remove all HCI
4432 * commands queued on the HCI request queue.
4435 skb_queue_purge(&req->cmd_q);
4439 /* Do not allow empty requests */
4440 if (skb_queue_empty(&req->cmd_q))
4443 skb = skb_peek_tail(&req->cmd_q);
4444 bt_cb(skb)->req.complete = complete;
4446 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4447 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4448 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4450 queue_work(hdev->workqueue, &hdev->cmd_work);
4455 bool hci_req_pending(struct hci_dev *hdev)
4457 return (hdev->req_status == HCI_REQ_PEND);
4460 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4461 u32 plen, const void *param)
4463 int len = HCI_COMMAND_HDR_SIZE + plen;
4464 struct hci_command_hdr *hdr;
4465 struct sk_buff *skb;
4467 skb = bt_skb_alloc(len, GFP_ATOMIC);
4471 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4472 hdr->opcode = cpu_to_le16(opcode);
4476 memcpy(skb_put(skb, plen), param, plen);
4478 BT_DBG("skb len %d", skb->len);
4480 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4485 /* Send HCI command */
4486 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4489 struct sk_buff *skb;
4491 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4493 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4495 BT_ERR("%s no memory for command", hdev->name);
4499 /* Stand-alone HCI commands must be flaged as
4500 * single-command requests.
4502 bt_cb(skb)->req.start = true;
4504 skb_queue_tail(&hdev->cmd_q, skb);
4505 queue_work(hdev->workqueue, &hdev->cmd_work);
4510 /* Queue a command to an asynchronous HCI request */
4511 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4512 const void *param, u8 event)
4514 struct hci_dev *hdev = req->hdev;
4515 struct sk_buff *skb;
4517 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4519 /* If an error occured during request building, there is no point in
4520 * queueing the HCI command. We can simply return.
4525 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4527 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4528 hdev->name, opcode);
4533 if (skb_queue_empty(&req->cmd_q))
4534 bt_cb(skb)->req.start = true;
4536 bt_cb(skb)->req.event = event;
4538 skb_queue_tail(&req->cmd_q, skb);
4541 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4544 hci_req_add_ev(req, opcode, plen, param, 0);
4547 /* Get data from the previously sent command */
4548 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4550 struct hci_command_hdr *hdr;
4552 if (!hdev->sent_cmd)
4555 hdr = (void *) hdev->sent_cmd->data;
4557 if (hdr->opcode != cpu_to_le16(opcode))
4560 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4562 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4566 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4568 struct hci_acl_hdr *hdr;
4571 skb_push(skb, HCI_ACL_HDR_SIZE);
4572 skb_reset_transport_header(skb);
4573 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4574 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4575 hdr->dlen = cpu_to_le16(len);
4578 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4579 struct sk_buff *skb, __u16 flags)
4581 struct hci_conn *conn = chan->conn;
4582 struct hci_dev *hdev = conn->hdev;
4583 struct sk_buff *list;
4585 skb->len = skb_headlen(skb);
4588 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4590 switch (hdev->dev_type) {
4592 hci_add_acl_hdr(skb, conn->handle, flags);
4595 hci_add_acl_hdr(skb, chan->handle, flags);
4598 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4602 list = skb_shinfo(skb)->frag_list;
4604 /* Non fragmented */
4605 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4607 skb_queue_tail(queue, skb);
4610 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4612 skb_shinfo(skb)->frag_list = NULL;
4614 /* Queue all fragments atomically */
4615 spin_lock(&queue->lock);
4617 __skb_queue_tail(queue, skb);
4619 flags &= ~ACL_START;
4622 skb = list; list = list->next;
4624 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4625 hci_add_acl_hdr(skb, conn->handle, flags);
4627 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4629 __skb_queue_tail(queue, skb);
4632 spin_unlock(&queue->lock);
4636 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4638 struct hci_dev *hdev = chan->conn->hdev;
4640 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4642 hci_queue_acl(chan, &chan->data_q, skb, flags);
4644 queue_work(hdev->workqueue, &hdev->tx_work);
4648 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4650 struct hci_dev *hdev = conn->hdev;
4651 struct hci_sco_hdr hdr;
4653 BT_DBG("%s len %d", hdev->name, skb->len);
4655 hdr.handle = cpu_to_le16(conn->handle);
4656 hdr.dlen = skb->len;
4658 skb_push(skb, HCI_SCO_HDR_SIZE);
4659 skb_reset_transport_header(skb);
4660 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4662 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4664 skb_queue_tail(&conn->data_q, skb);
4665 queue_work(hdev->workqueue, &hdev->tx_work);
4668 /* ---- HCI TX task (outgoing data) ---- */
4670 /* HCI Connection scheduler */
4671 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4674 struct hci_conn_hash *h = &hdev->conn_hash;
4675 struct hci_conn *conn = NULL, *c;
4676 unsigned int num = 0, min = ~0;
4678 /* We don't have to lock device here. Connections are always
4679 * added and removed with TX task disabled. */
4683 list_for_each_entry_rcu(c, &h->list, list) {
4684 if (c->type != type || skb_queue_empty(&c->data_q))
4687 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4692 if (c->sent < min) {
4697 if (hci_conn_num(hdev, type) == num)
4706 switch (conn->type) {
4708 cnt = hdev->acl_cnt;
4712 cnt = hdev->sco_cnt;
4715 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4719 BT_ERR("Unknown link type");
4727 BT_DBG("conn %p quote %d", conn, *quote);
4731 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4733 struct hci_conn_hash *h = &hdev->conn_hash;
4736 BT_ERR("%s link tx timeout", hdev->name);
4740 /* Kill stalled connections */
4741 list_for_each_entry_rcu(c, &h->list, list) {
4742 if (c->type == type && c->sent) {
4743 BT_ERR("%s killing stalled connection %pMR",
4744 hdev->name, &c->dst);
4745 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4752 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4755 struct hci_conn_hash *h = &hdev->conn_hash;
4756 struct hci_chan *chan = NULL;
4757 unsigned int num = 0, min = ~0, cur_prio = 0;
4758 struct hci_conn *conn;
4759 int cnt, q, conn_num = 0;
4761 BT_DBG("%s", hdev->name);
4765 list_for_each_entry_rcu(conn, &h->list, list) {
4766 struct hci_chan *tmp;
4768 if (conn->type != type)
4771 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4776 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4777 struct sk_buff *skb;
4779 if (skb_queue_empty(&tmp->data_q))
4782 skb = skb_peek(&tmp->data_q);
4783 if (skb->priority < cur_prio)
4786 if (skb->priority > cur_prio) {
4789 cur_prio = skb->priority;
4794 if (conn->sent < min) {
4800 if (hci_conn_num(hdev, type) == conn_num)
4809 switch (chan->conn->type) {
4811 cnt = hdev->acl_cnt;
4814 cnt = hdev->block_cnt;
4818 cnt = hdev->sco_cnt;
4821 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4825 BT_ERR("Unknown link type");
4830 BT_DBG("chan %p quote %d", chan, *quote);
4834 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4836 struct hci_conn_hash *h = &hdev->conn_hash;
4837 struct hci_conn *conn;
4840 BT_DBG("%s", hdev->name);
4844 list_for_each_entry_rcu(conn, &h->list, list) {
4845 struct hci_chan *chan;
4847 if (conn->type != type)
4850 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4855 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4856 struct sk_buff *skb;
4863 if (skb_queue_empty(&chan->data_q))
4866 skb = skb_peek(&chan->data_q);
4867 if (skb->priority >= HCI_PRIO_MAX - 1)
4870 skb->priority = HCI_PRIO_MAX - 1;
4872 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4876 if (hci_conn_num(hdev, type) == num)
4884 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4886 /* Calculate count of blocks used by this packet */
4887 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4890 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4892 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4893 /* ACL tx timeout must be longer than maximum
4894 * link supervision timeout (40.9 seconds) */
4895 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4896 HCI_ACL_TX_TIMEOUT))
4897 hci_link_tx_to(hdev, ACL_LINK);
4901 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4903 unsigned int cnt = hdev->acl_cnt;
4904 struct hci_chan *chan;
4905 struct sk_buff *skb;
4908 __check_timeout(hdev, cnt);
4910 while (hdev->acl_cnt &&
4911 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4912 u32 priority = (skb_peek(&chan->data_q))->priority;
4913 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4914 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4915 skb->len, skb->priority);
4917 /* Stop if priority has changed */
4918 if (skb->priority < priority)
4921 skb = skb_dequeue(&chan->data_q);
4923 hci_conn_enter_active_mode(chan->conn,
4924 bt_cb(skb)->force_active);
4926 hci_send_frame(hdev, skb);
4927 hdev->acl_last_tx = jiffies;
4935 if (cnt != hdev->acl_cnt)
4936 hci_prio_recalculate(hdev, ACL_LINK);
4939 static void hci_sched_acl_blk(struct hci_dev *hdev)
4941 unsigned int cnt = hdev->block_cnt;
4942 struct hci_chan *chan;
4943 struct sk_buff *skb;
4947 __check_timeout(hdev, cnt);
4949 BT_DBG("%s", hdev->name);
4951 if (hdev->dev_type == HCI_AMP)
4956 while (hdev->block_cnt > 0 &&
4957 (chan = hci_chan_sent(hdev, type, "e))) {
4958 u32 priority = (skb_peek(&chan->data_q))->priority;
4959 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4962 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4963 skb->len, skb->priority);
4965 /* Stop if priority has changed */
4966 if (skb->priority < priority)
4969 skb = skb_dequeue(&chan->data_q);
4971 blocks = __get_blocks(hdev, skb);
4972 if (blocks > hdev->block_cnt)
4975 hci_conn_enter_active_mode(chan->conn,
4976 bt_cb(skb)->force_active);
4978 hci_send_frame(hdev, skb);
4979 hdev->acl_last_tx = jiffies;
4981 hdev->block_cnt -= blocks;
4984 chan->sent += blocks;
4985 chan->conn->sent += blocks;
4989 if (cnt != hdev->block_cnt)
4990 hci_prio_recalculate(hdev, type);
4993 static void hci_sched_acl(struct hci_dev *hdev)
4995 BT_DBG("%s", hdev->name);
4997 /* No ACL link over BR/EDR controller */
4998 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5001 /* No AMP link over AMP controller */
5002 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
5005 switch (hdev->flow_ctl_mode) {
5006 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5007 hci_sched_acl_pkt(hdev);
5010 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5011 hci_sched_acl_blk(hdev);
5017 static void hci_sched_sco(struct hci_dev *hdev)
5019 struct hci_conn *conn;
5020 struct sk_buff *skb;
5023 BT_DBG("%s", hdev->name);
5025 if (!hci_conn_num(hdev, SCO_LINK))
5028 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
5029 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5030 BT_DBG("skb %p len %d", skb, skb->len);
5031 hci_send_frame(hdev, skb);
5034 if (conn->sent == ~0)
5040 static void hci_sched_esco(struct hci_dev *hdev)
5042 struct hci_conn *conn;
5043 struct sk_buff *skb;
5046 BT_DBG("%s", hdev->name);
5048 if (!hci_conn_num(hdev, ESCO_LINK))
5051 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5053 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5054 BT_DBG("skb %p len %d", skb, skb->len);
5055 hci_send_frame(hdev, skb);
5058 if (conn->sent == ~0)
5064 static void hci_sched_le(struct hci_dev *hdev)
5066 struct hci_chan *chan;
5067 struct sk_buff *skb;
5068 int quote, cnt, tmp;
5070 BT_DBG("%s", hdev->name);
5072 if (!hci_conn_num(hdev, LE_LINK))
5075 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5076 /* LE tx timeout must be longer than maximum
5077 * link supervision timeout (40.9 seconds) */
5078 if (!hdev->le_cnt && hdev->le_pkts &&
5079 time_after(jiffies, hdev->le_last_tx + HZ * 45))
5080 hci_link_tx_to(hdev, LE_LINK);
5083 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5085 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
5086 u32 priority = (skb_peek(&chan->data_q))->priority;
5087 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5088 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5089 skb->len, skb->priority);
5091 /* Stop if priority has changed */
5092 if (skb->priority < priority)
5095 skb = skb_dequeue(&chan->data_q);
5097 hci_send_frame(hdev, skb);
5098 hdev->le_last_tx = jiffies;
5109 hdev->acl_cnt = cnt;
5112 hci_prio_recalculate(hdev, LE_LINK);
5115 static void hci_tx_work(struct work_struct *work)
5117 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5118 struct sk_buff *skb;
5120 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5121 hdev->sco_cnt, hdev->le_cnt);
5123 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5124 /* Schedule queues and send stuff to HCI driver */
5125 hci_sched_acl(hdev);
5126 hci_sched_sco(hdev);
5127 hci_sched_esco(hdev);
5131 /* Send next queued raw (unknown type) packet */
5132 while ((skb = skb_dequeue(&hdev->raw_q)))
5133 hci_send_frame(hdev, skb);
5136 /* ----- HCI RX task (incoming data processing) ----- */
5138 /* ACL data packet */
5139 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5141 struct hci_acl_hdr *hdr = (void *) skb->data;
5142 struct hci_conn *conn;
5143 __u16 handle, flags;
5145 skb_pull(skb, HCI_ACL_HDR_SIZE);
5147 handle = __le16_to_cpu(hdr->handle);
5148 flags = hci_flags(handle);
5149 handle = hci_handle(handle);
5151 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5154 hdev->stat.acl_rx++;
5157 conn = hci_conn_hash_lookup_handle(hdev, handle);
5158 hci_dev_unlock(hdev);
5161 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5163 /* Send to upper protocol */
5164 l2cap_recv_acldata(conn, skb, flags);
5167 BT_ERR("%s ACL packet for unknown connection handle %d",
5168 hdev->name, handle);
5174 /* SCO data packet */
5175 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5177 struct hci_sco_hdr *hdr = (void *) skb->data;
5178 struct hci_conn *conn;
5181 skb_pull(skb, HCI_SCO_HDR_SIZE);
5183 handle = __le16_to_cpu(hdr->handle);
5185 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5187 hdev->stat.sco_rx++;
5190 conn = hci_conn_hash_lookup_handle(hdev, handle);
5191 hci_dev_unlock(hdev);
5194 /* Send to upper protocol */
5195 sco_recv_scodata(conn, skb);
5198 BT_ERR("%s SCO packet for unknown connection handle %d",
5199 hdev->name, handle);
5205 static bool hci_req_is_complete(struct hci_dev *hdev)
5207 struct sk_buff *skb;
5209 skb = skb_peek(&hdev->cmd_q);
5213 return bt_cb(skb)->req.start;
5216 static void hci_resend_last(struct hci_dev *hdev)
5218 struct hci_command_hdr *sent;
5219 struct sk_buff *skb;
5222 if (!hdev->sent_cmd)
5225 sent = (void *) hdev->sent_cmd->data;
5226 opcode = __le16_to_cpu(sent->opcode);
5227 if (opcode == HCI_OP_RESET)
5230 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5234 skb_queue_head(&hdev->cmd_q, skb);
5235 queue_work(hdev->workqueue, &hdev->cmd_work);
5238 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5240 hci_req_complete_t req_complete = NULL;
5241 struct sk_buff *skb;
5242 unsigned long flags;
5244 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5246 /* If the completed command doesn't match the last one that was
5247 * sent we need to do special handling of it.
5249 if (!hci_sent_cmd_data(hdev, opcode)) {
5250 /* Some CSR based controllers generate a spontaneous
5251 * reset complete event during init and any pending
5252 * command will never be completed. In such a case we
5253 * need to resend whatever was the last sent
5256 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5257 hci_resend_last(hdev);
5262 /* If the command succeeded and there's still more commands in
5263 * this request the request is not yet complete.
5265 if (!status && !hci_req_is_complete(hdev))
5268 /* If this was the last command in a request the complete
5269 * callback would be found in hdev->sent_cmd instead of the
5270 * command queue (hdev->cmd_q).
5272 if (hdev->sent_cmd) {
5273 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5276 /* We must set the complete callback to NULL to
5277 * avoid calling the callback more than once if
5278 * this function gets called again.
5280 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5286 /* Remove all pending commands belonging to this request */
5287 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5288 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5289 if (bt_cb(skb)->req.start) {
5290 __skb_queue_head(&hdev->cmd_q, skb);
5294 req_complete = bt_cb(skb)->req.complete;
5297 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5301 req_complete(hdev, status);
5304 static void hci_rx_work(struct work_struct *work)
5306 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5307 struct sk_buff *skb;
5309 BT_DBG("%s", hdev->name);
5311 while ((skb = skb_dequeue(&hdev->rx_q))) {
5312 /* Send copy to monitor */
5313 hci_send_to_monitor(hdev, skb);
5315 if (atomic_read(&hdev->promisc)) {
5316 /* Send copy to the sockets */
5317 hci_send_to_sock(hdev, skb);
5320 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5325 if (test_bit(HCI_INIT, &hdev->flags)) {
5326 /* Don't process data packets in this states. */
5327 switch (bt_cb(skb)->pkt_type) {
5328 case HCI_ACLDATA_PKT:
5329 case HCI_SCODATA_PKT:
5336 switch (bt_cb(skb)->pkt_type) {
5338 BT_DBG("%s Event packet", hdev->name);
5339 hci_event_packet(hdev, skb);
5342 case HCI_ACLDATA_PKT:
5343 BT_DBG("%s ACL data packet", hdev->name);
5344 hci_acldata_packet(hdev, skb);
5347 case HCI_SCODATA_PKT:
5348 BT_DBG("%s SCO data packet", hdev->name);
5349 hci_scodata_packet(hdev, skb);
5359 static void hci_cmd_work(struct work_struct *work)
5361 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5362 struct sk_buff *skb;
5364 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5365 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5367 /* Send queued commands */
5368 if (atomic_read(&hdev->cmd_cnt)) {
5369 skb = skb_dequeue(&hdev->cmd_q);
5373 kfree_skb(hdev->sent_cmd);
5375 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5376 if (hdev->sent_cmd) {
5377 atomic_dec(&hdev->cmd_cnt);
5378 hci_send_frame(hdev, skb);
5379 if (test_bit(HCI_RESET, &hdev->flags))
5380 cancel_delayed_work(&hdev->cmd_timer);
5382 schedule_delayed_work(&hdev->cmd_timer,
5385 skb_queue_head(&hdev->cmd_q, skb);
5386 queue_work(hdev->workqueue, &hdev->cmd_work);
5391 void hci_req_add_le_scan_disable(struct hci_request *req)
5393 struct hci_cp_le_set_scan_enable cp;
5395 memset(&cp, 0, sizeof(cp));
5396 cp.enable = LE_SCAN_DISABLE;
5397 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5400 void hci_req_add_le_passive_scan(struct hci_request *req)
5402 struct hci_cp_le_set_scan_param param_cp;
5403 struct hci_cp_le_set_scan_enable enable_cp;
5404 struct hci_dev *hdev = req->hdev;
5407 /* Set require_privacy to false since no SCAN_REQ are send
5408 * during passive scanning. Not using an unresolvable address
5409 * here is important so that peer devices using direct
5410 * advertising with our address will be correctly reported
5411 * by the controller.
5413 if (hci_update_random_address(req, false, &own_addr_type))
5416 memset(¶m_cp, 0, sizeof(param_cp));
5417 param_cp.type = LE_SCAN_PASSIVE;
5418 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5419 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5420 param_cp.own_address_type = own_addr_type;
5421 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5424 memset(&enable_cp, 0, sizeof(enable_cp));
5425 enable_cp.enable = LE_SCAN_ENABLE;
5426 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5427 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5431 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5434 BT_DBG("HCI request failed to update background scanning: "
5435 "status 0x%2.2x", status);
5438 /* This function controls the background scanning based on hdev->pend_le_conns
5439 * list. If there are pending LE connection we start the background scanning,
5440 * otherwise we stop it.
5442 * This function requires the caller holds hdev->lock.
5444 void hci_update_background_scan(struct hci_dev *hdev)
5446 struct hci_request req;
5447 struct hci_conn *conn;
5450 if (!test_bit(HCI_UP, &hdev->flags) ||
5451 test_bit(HCI_INIT, &hdev->flags) ||
5452 test_bit(HCI_SETUP, &hdev->dev_flags) ||
5453 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5454 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5455 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5458 /* No point in doing scanning if LE support hasn't been enabled */
5459 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5462 /* If discovery is active don't interfere with it */
5463 if (hdev->discovery.state != DISCOVERY_STOPPED)
5466 hci_req_init(&req, hdev);
5468 if (list_empty(&hdev->pend_le_conns) &&
5469 list_empty(&hdev->pend_le_reports)) {
5470 /* If there is no pending LE connections or devices
5471 * to be scanned for, we should stop the background
5475 /* If controller is not scanning we are done. */
5476 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5479 hci_req_add_le_scan_disable(&req);
5481 BT_DBG("%s stopping background scanning", hdev->name);
5483 /* If there is at least one pending LE connection, we should
5484 * keep the background scan running.
5487 /* If controller is connecting, we should not start scanning
5488 * since some controllers are not able to scan and connect at
5491 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5495 /* If controller is currently scanning, we stop it to ensure we
5496 * don't miss any advertising (due to duplicates filter).
5498 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5499 hci_req_add_le_scan_disable(&req);
5501 hci_req_add_le_passive_scan(&req);
5503 BT_DBG("%s starting background scanning", hdev->name);
5506 err = hci_req_run(&req, update_background_scan_complete);
5508 BT_ERR("Failed to run HCI request: err %d", err);