2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
57 /* ---- HCI notifications ---- */
59 static void hci_notify(struct hci_dev *hdev, int event)
61 hci_sock_dev_event(hdev, event);
64 /* ---- HCI debugfs entries ---- */
66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
69 struct hci_dev *hdev = file->private_data;
72 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
81 struct hci_dev *hdev = file->private_data;
84 size_t buf_size = min(count, (sizeof(buf)-1));
88 if (!test_bit(HCI_UP, &hdev->flags))
91 if (copy_from_user(buf, user_buf, buf_size))
95 if (strtobool(buf, &enable))
98 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
103 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
106 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
108 hci_req_unlock(hdev);
113 err = -bt_to_errno(skb->data[0]);
119 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
124 static const struct file_operations dut_mode_fops = {
126 .read = dut_mode_read,
127 .write = dut_mode_write,
128 .llseek = default_llseek,
131 static int features_show(struct seq_file *f, void *ptr)
133 struct hci_dev *hdev = f->private;
137 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
138 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
139 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140 hdev->features[p][0], hdev->features[p][1],
141 hdev->features[p][2], hdev->features[p][3],
142 hdev->features[p][4], hdev->features[p][5],
143 hdev->features[p][6], hdev->features[p][7]);
145 if (lmp_le_capable(hdev))
146 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148 hdev->le_features[0], hdev->le_features[1],
149 hdev->le_features[2], hdev->le_features[3],
150 hdev->le_features[4], hdev->le_features[5],
151 hdev->le_features[6], hdev->le_features[7]);
152 hci_dev_unlock(hdev);
157 static int features_open(struct inode *inode, struct file *file)
159 return single_open(file, features_show, inode->i_private);
162 static const struct file_operations features_fops = {
163 .open = features_open,
166 .release = single_release,
169 static int blacklist_show(struct seq_file *f, void *p)
171 struct hci_dev *hdev = f->private;
172 struct bdaddr_list *b;
175 list_for_each_entry(b, &hdev->blacklist, list)
176 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
177 hci_dev_unlock(hdev);
182 static int blacklist_open(struct inode *inode, struct file *file)
184 return single_open(file, blacklist_show, inode->i_private);
187 static const struct file_operations blacklist_fops = {
188 .open = blacklist_open,
191 .release = single_release,
194 static int uuids_show(struct seq_file *f, void *p)
196 struct hci_dev *hdev = f->private;
197 struct bt_uuid *uuid;
200 list_for_each_entry(uuid, &hdev->uuids, list) {
203 /* The Bluetooth UUID values are stored in big endian,
204 * but with reversed byte order. So convert them into
205 * the right order for the %pUb modifier.
207 for (i = 0; i < 16; i++)
208 val[i] = uuid->uuid[15 - i];
210 seq_printf(f, "%pUb\n", val);
212 hci_dev_unlock(hdev);
217 static int uuids_open(struct inode *inode, struct file *file)
219 return single_open(file, uuids_show, inode->i_private);
222 static const struct file_operations uuids_fops = {
226 .release = single_release,
229 static int inquiry_cache_show(struct seq_file *f, void *p)
231 struct hci_dev *hdev = f->private;
232 struct discovery_state *cache = &hdev->discovery;
233 struct inquiry_entry *e;
237 list_for_each_entry(e, &cache->all, all) {
238 struct inquiry_data *data = &e->data;
239 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
241 data->pscan_rep_mode, data->pscan_period_mode,
242 data->pscan_mode, data->dev_class[2],
243 data->dev_class[1], data->dev_class[0],
244 __le16_to_cpu(data->clock_offset),
245 data->rssi, data->ssp_mode, e->timestamp);
248 hci_dev_unlock(hdev);
253 static int inquiry_cache_open(struct inode *inode, struct file *file)
255 return single_open(file, inquiry_cache_show, inode->i_private);
258 static const struct file_operations inquiry_cache_fops = {
259 .open = inquiry_cache_open,
262 .release = single_release,
265 static int link_keys_show(struct seq_file *f, void *ptr)
267 struct hci_dev *hdev = f->private;
268 struct list_head *p, *n;
271 list_for_each_safe(p, n, &hdev->link_keys) {
272 struct link_key *key = list_entry(p, struct link_key, list);
273 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
274 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
276 hci_dev_unlock(hdev);
281 static int link_keys_open(struct inode *inode, struct file *file)
283 return single_open(file, link_keys_show, inode->i_private);
286 static const struct file_operations link_keys_fops = {
287 .open = link_keys_open,
290 .release = single_release,
293 static int dev_class_show(struct seq_file *f, void *ptr)
295 struct hci_dev *hdev = f->private;
298 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
299 hdev->dev_class[1], hdev->dev_class[0]);
300 hci_dev_unlock(hdev);
305 static int dev_class_open(struct inode *inode, struct file *file)
307 return single_open(file, dev_class_show, inode->i_private);
310 static const struct file_operations dev_class_fops = {
311 .open = dev_class_open,
314 .release = single_release,
317 static int voice_setting_get(void *data, u64 *val)
319 struct hci_dev *hdev = data;
322 *val = hdev->voice_setting;
323 hci_dev_unlock(hdev);
328 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
329 NULL, "0x%4.4llx\n");
331 static int auto_accept_delay_set(void *data, u64 val)
333 struct hci_dev *hdev = data;
336 hdev->auto_accept_delay = val;
337 hci_dev_unlock(hdev);
342 static int auto_accept_delay_get(void *data, u64 *val)
344 struct hci_dev *hdev = data;
347 *val = hdev->auto_accept_delay;
348 hci_dev_unlock(hdev);
353 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
354 auto_accept_delay_set, "%llu\n");
356 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
357 size_t count, loff_t *ppos)
359 struct hci_dev *hdev = file->private_data;
362 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
365 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
368 static ssize_t force_sc_support_write(struct file *file,
369 const char __user *user_buf,
370 size_t count, loff_t *ppos)
372 struct hci_dev *hdev = file->private_data;
374 size_t buf_size = min(count, (sizeof(buf)-1));
377 if (test_bit(HCI_UP, &hdev->flags))
380 if (copy_from_user(buf, user_buf, buf_size))
383 buf[buf_size] = '\0';
384 if (strtobool(buf, &enable))
387 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
390 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
395 static const struct file_operations force_sc_support_fops = {
397 .read = force_sc_support_read,
398 .write = force_sc_support_write,
399 .llseek = default_llseek,
402 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
403 size_t count, loff_t *ppos)
405 struct hci_dev *hdev = file->private_data;
408 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
411 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
414 static const struct file_operations sc_only_mode_fops = {
416 .read = sc_only_mode_read,
417 .llseek = default_llseek,
420 static int idle_timeout_set(void *data, u64 val)
422 struct hci_dev *hdev = data;
424 if (val != 0 && (val < 500 || val > 3600000))
428 hdev->idle_timeout = val;
429 hci_dev_unlock(hdev);
434 static int idle_timeout_get(void *data, u64 *val)
436 struct hci_dev *hdev = data;
439 *val = hdev->idle_timeout;
440 hci_dev_unlock(hdev);
445 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
446 idle_timeout_set, "%llu\n");
448 static int rpa_timeout_set(void *data, u64 val)
450 struct hci_dev *hdev = data;
452 /* Require the RPA timeout to be at least 30 seconds and at most
455 if (val < 30 || val > (60 * 60 * 24))
459 hdev->rpa_timeout = val;
460 hci_dev_unlock(hdev);
465 static int rpa_timeout_get(void *data, u64 *val)
467 struct hci_dev *hdev = data;
470 *val = hdev->rpa_timeout;
471 hci_dev_unlock(hdev);
476 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
477 rpa_timeout_set, "%llu\n");
479 static int sniff_min_interval_set(void *data, u64 val)
481 struct hci_dev *hdev = data;
483 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
487 hdev->sniff_min_interval = val;
488 hci_dev_unlock(hdev);
493 static int sniff_min_interval_get(void *data, u64 *val)
495 struct hci_dev *hdev = data;
498 *val = hdev->sniff_min_interval;
499 hci_dev_unlock(hdev);
504 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
505 sniff_min_interval_set, "%llu\n");
507 static int sniff_max_interval_set(void *data, u64 val)
509 struct hci_dev *hdev = data;
511 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
515 hdev->sniff_max_interval = val;
516 hci_dev_unlock(hdev);
521 static int sniff_max_interval_get(void *data, u64 *val)
523 struct hci_dev *hdev = data;
526 *val = hdev->sniff_max_interval;
527 hci_dev_unlock(hdev);
532 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
533 sniff_max_interval_set, "%llu\n");
535 static int conn_info_min_age_set(void *data, u64 val)
537 struct hci_dev *hdev = data;
539 if (val == 0 || val > hdev->conn_info_max_age)
543 hdev->conn_info_min_age = val;
544 hci_dev_unlock(hdev);
549 static int conn_info_min_age_get(void *data, u64 *val)
551 struct hci_dev *hdev = data;
554 *val = hdev->conn_info_min_age;
555 hci_dev_unlock(hdev);
560 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
561 conn_info_min_age_set, "%llu\n");
563 static int conn_info_max_age_set(void *data, u64 val)
565 struct hci_dev *hdev = data;
567 if (val == 0 || val < hdev->conn_info_min_age)
571 hdev->conn_info_max_age = val;
572 hci_dev_unlock(hdev);
577 static int conn_info_max_age_get(void *data, u64 *val)
579 struct hci_dev *hdev = data;
582 *val = hdev->conn_info_max_age;
583 hci_dev_unlock(hdev);
588 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
589 conn_info_max_age_set, "%llu\n");
591 static int identity_show(struct seq_file *f, void *p)
593 struct hci_dev *hdev = f->private;
599 hci_copy_identity_address(hdev, &addr, &addr_type);
601 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
602 16, hdev->irk, &hdev->rpa);
604 hci_dev_unlock(hdev);
609 static int identity_open(struct inode *inode, struct file *file)
611 return single_open(file, identity_show, inode->i_private);
614 static const struct file_operations identity_fops = {
615 .open = identity_open,
618 .release = single_release,
621 static int random_address_show(struct seq_file *f, void *p)
623 struct hci_dev *hdev = f->private;
626 seq_printf(f, "%pMR\n", &hdev->random_addr);
627 hci_dev_unlock(hdev);
632 static int random_address_open(struct inode *inode, struct file *file)
634 return single_open(file, random_address_show, inode->i_private);
637 static const struct file_operations random_address_fops = {
638 .open = random_address_open,
641 .release = single_release,
644 static int static_address_show(struct seq_file *f, void *p)
646 struct hci_dev *hdev = f->private;
649 seq_printf(f, "%pMR\n", &hdev->static_addr);
650 hci_dev_unlock(hdev);
655 static int static_address_open(struct inode *inode, struct file *file)
657 return single_open(file, static_address_show, inode->i_private);
660 static const struct file_operations static_address_fops = {
661 .open = static_address_open,
664 .release = single_release,
667 static ssize_t force_static_address_read(struct file *file,
668 char __user *user_buf,
669 size_t count, loff_t *ppos)
671 struct hci_dev *hdev = file->private_data;
674 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
677 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
680 static ssize_t force_static_address_write(struct file *file,
681 const char __user *user_buf,
682 size_t count, loff_t *ppos)
684 struct hci_dev *hdev = file->private_data;
686 size_t buf_size = min(count, (sizeof(buf)-1));
689 if (test_bit(HCI_UP, &hdev->flags))
692 if (copy_from_user(buf, user_buf, buf_size))
695 buf[buf_size] = '\0';
696 if (strtobool(buf, &enable))
699 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
702 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
707 static const struct file_operations force_static_address_fops = {
709 .read = force_static_address_read,
710 .write = force_static_address_write,
711 .llseek = default_llseek,
714 static int white_list_show(struct seq_file *f, void *ptr)
716 struct hci_dev *hdev = f->private;
717 struct bdaddr_list *b;
720 list_for_each_entry(b, &hdev->le_white_list, list)
721 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
722 hci_dev_unlock(hdev);
727 static int white_list_open(struct inode *inode, struct file *file)
729 return single_open(file, white_list_show, inode->i_private);
732 static const struct file_operations white_list_fops = {
733 .open = white_list_open,
736 .release = single_release,
739 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
741 struct hci_dev *hdev = f->private;
742 struct list_head *p, *n;
745 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
746 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
747 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
748 &irk->bdaddr, irk->addr_type,
749 16, irk->val, &irk->rpa);
751 hci_dev_unlock(hdev);
756 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
758 return single_open(file, identity_resolving_keys_show,
762 static const struct file_operations identity_resolving_keys_fops = {
763 .open = identity_resolving_keys_open,
766 .release = single_release,
769 static int long_term_keys_show(struct seq_file *f, void *ptr)
771 struct hci_dev *hdev = f->private;
772 struct list_head *p, *n;
775 list_for_each_safe(p, n, &hdev->long_term_keys) {
776 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
777 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
778 <k->bdaddr, ltk->bdaddr_type, ltk->authenticated,
779 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
780 __le64_to_cpu(ltk->rand), 16, ltk->val);
782 hci_dev_unlock(hdev);
787 static int long_term_keys_open(struct inode *inode, struct file *file)
789 return single_open(file, long_term_keys_show, inode->i_private);
792 static const struct file_operations long_term_keys_fops = {
793 .open = long_term_keys_open,
796 .release = single_release,
799 static int conn_min_interval_set(void *data, u64 val)
801 struct hci_dev *hdev = data;
803 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
807 hdev->le_conn_min_interval = val;
808 hci_dev_unlock(hdev);
813 static int conn_min_interval_get(void *data, u64 *val)
815 struct hci_dev *hdev = data;
818 *val = hdev->le_conn_min_interval;
819 hci_dev_unlock(hdev);
824 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
825 conn_min_interval_set, "%llu\n");
827 static int conn_max_interval_set(void *data, u64 val)
829 struct hci_dev *hdev = data;
831 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
835 hdev->le_conn_max_interval = val;
836 hci_dev_unlock(hdev);
841 static int conn_max_interval_get(void *data, u64 *val)
843 struct hci_dev *hdev = data;
846 *val = hdev->le_conn_max_interval;
847 hci_dev_unlock(hdev);
852 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
853 conn_max_interval_set, "%llu\n");
855 static int conn_latency_set(void *data, u64 val)
857 struct hci_dev *hdev = data;
863 hdev->le_conn_latency = val;
864 hci_dev_unlock(hdev);
869 static int conn_latency_get(void *data, u64 *val)
871 struct hci_dev *hdev = data;
874 *val = hdev->le_conn_latency;
875 hci_dev_unlock(hdev);
880 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
881 conn_latency_set, "%llu\n");
883 static int supervision_timeout_set(void *data, u64 val)
885 struct hci_dev *hdev = data;
887 if (val < 0x000a || val > 0x0c80)
891 hdev->le_supv_timeout = val;
892 hci_dev_unlock(hdev);
897 static int supervision_timeout_get(void *data, u64 *val)
899 struct hci_dev *hdev = data;
902 *val = hdev->le_supv_timeout;
903 hci_dev_unlock(hdev);
908 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
909 supervision_timeout_set, "%llu\n");
911 static int adv_channel_map_set(void *data, u64 val)
913 struct hci_dev *hdev = data;
915 if (val < 0x01 || val > 0x07)
919 hdev->le_adv_channel_map = val;
920 hci_dev_unlock(hdev);
925 static int adv_channel_map_get(void *data, u64 *val)
927 struct hci_dev *hdev = data;
930 *val = hdev->le_adv_channel_map;
931 hci_dev_unlock(hdev);
936 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
937 adv_channel_map_set, "%llu\n");
939 static int device_list_show(struct seq_file *f, void *ptr)
941 struct hci_dev *hdev = f->private;
942 struct hci_conn_params *p;
945 list_for_each_entry(p, &hdev->le_conn_params, list) {
946 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
949 hci_dev_unlock(hdev);
954 static int device_list_open(struct inode *inode, struct file *file)
956 return single_open(file, device_list_show, inode->i_private);
959 static const struct file_operations device_list_fops = {
960 .open = device_list_open,
963 .release = single_release,
966 /* ---- HCI requests ---- */
968 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
970 BT_DBG("%s result 0x%2.2x", hdev->name, result);
972 if (hdev->req_status == HCI_REQ_PEND) {
973 hdev->req_result = result;
974 hdev->req_status = HCI_REQ_DONE;
975 wake_up_interruptible(&hdev->req_wait_q);
979 static void hci_req_cancel(struct hci_dev *hdev, int err)
981 BT_DBG("%s err 0x%2.2x", hdev->name, err);
983 if (hdev->req_status == HCI_REQ_PEND) {
984 hdev->req_result = err;
985 hdev->req_status = HCI_REQ_CANCELED;
986 wake_up_interruptible(&hdev->req_wait_q);
990 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
993 struct hci_ev_cmd_complete *ev;
994 struct hci_event_hdr *hdr;
999 skb = hdev->recv_evt;
1000 hdev->recv_evt = NULL;
1002 hci_dev_unlock(hdev);
1005 return ERR_PTR(-ENODATA);
1007 if (skb->len < sizeof(*hdr)) {
1008 BT_ERR("Too short HCI event");
1012 hdr = (void *) skb->data;
1013 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1016 if (hdr->evt != event)
1021 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1022 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1026 if (skb->len < sizeof(*ev)) {
1027 BT_ERR("Too short cmd_complete event");
1031 ev = (void *) skb->data;
1032 skb_pull(skb, sizeof(*ev));
1034 if (opcode == __le16_to_cpu(ev->opcode))
1037 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1038 __le16_to_cpu(ev->opcode));
1042 return ERR_PTR(-ENODATA);
1045 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1046 const void *param, u8 event, u32 timeout)
1048 DECLARE_WAITQUEUE(wait, current);
1049 struct hci_request req;
1052 BT_DBG("%s", hdev->name);
1054 hci_req_init(&req, hdev);
1056 hci_req_add_ev(&req, opcode, plen, param, event);
1058 hdev->req_status = HCI_REQ_PEND;
1060 err = hci_req_run(&req, hci_req_sync_complete);
1062 return ERR_PTR(err);
1064 add_wait_queue(&hdev->req_wait_q, &wait);
1065 set_current_state(TASK_INTERRUPTIBLE);
1067 schedule_timeout(timeout);
1069 remove_wait_queue(&hdev->req_wait_q, &wait);
1071 if (signal_pending(current))
1072 return ERR_PTR(-EINTR);
1074 switch (hdev->req_status) {
1076 err = -bt_to_errno(hdev->req_result);
1079 case HCI_REQ_CANCELED:
1080 err = -hdev->req_result;
1088 hdev->req_status = hdev->req_result = 0;
1090 BT_DBG("%s end: err %d", hdev->name, err);
1093 return ERR_PTR(err);
1095 return hci_get_cmd_complete(hdev, opcode, event);
1097 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1099 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1100 const void *param, u32 timeout)
1102 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1104 EXPORT_SYMBOL(__hci_cmd_sync);
1106 /* Execute request and wait for completion. */
1107 static int __hci_req_sync(struct hci_dev *hdev,
1108 void (*func)(struct hci_request *req,
1110 unsigned long opt, __u32 timeout)
1112 struct hci_request req;
1113 DECLARE_WAITQUEUE(wait, current);
1116 BT_DBG("%s start", hdev->name);
1118 hci_req_init(&req, hdev);
1120 hdev->req_status = HCI_REQ_PEND;
1124 err = hci_req_run(&req, hci_req_sync_complete);
1126 hdev->req_status = 0;
1128 /* ENODATA means the HCI request command queue is empty.
1129 * This can happen when a request with conditionals doesn't
1130 * trigger any commands to be sent. This is normal behavior
1131 * and should not trigger an error return.
1133 if (err == -ENODATA)
1139 add_wait_queue(&hdev->req_wait_q, &wait);
1140 set_current_state(TASK_INTERRUPTIBLE);
1142 schedule_timeout(timeout);
1144 remove_wait_queue(&hdev->req_wait_q, &wait);
1146 if (signal_pending(current))
1149 switch (hdev->req_status) {
1151 err = -bt_to_errno(hdev->req_result);
1154 case HCI_REQ_CANCELED:
1155 err = -hdev->req_result;
1163 hdev->req_status = hdev->req_result = 0;
1165 BT_DBG("%s end: err %d", hdev->name, err);
1170 static int hci_req_sync(struct hci_dev *hdev,
1171 void (*req)(struct hci_request *req,
1173 unsigned long opt, __u32 timeout)
1177 if (!test_bit(HCI_UP, &hdev->flags))
1180 /* Serialize all requests */
1182 ret = __hci_req_sync(hdev, req, opt, timeout);
1183 hci_req_unlock(hdev);
1188 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1190 BT_DBG("%s %ld", req->hdev->name, opt);
1193 set_bit(HCI_RESET, &req->hdev->flags);
1194 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1197 static void bredr_init(struct hci_request *req)
1199 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1201 /* Read Local Supported Features */
1202 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1204 /* Read Local Version */
1205 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1207 /* Read BD Address */
1208 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1211 static void amp_init(struct hci_request *req)
1213 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1215 /* Read Local Version */
1216 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1218 /* Read Local Supported Commands */
1219 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1221 /* Read Local Supported Features */
1222 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1224 /* Read Local AMP Info */
1225 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1227 /* Read Data Blk size */
1228 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1230 /* Read Flow Control Mode */
1231 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1233 /* Read Location Data */
1234 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1237 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1239 struct hci_dev *hdev = req->hdev;
1241 BT_DBG("%s %ld", hdev->name, opt);
1244 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1245 hci_reset_req(req, 0);
1247 switch (hdev->dev_type) {
1257 BT_ERR("Unknown device type %d", hdev->dev_type);
1262 static void bredr_setup(struct hci_request *req)
1264 struct hci_dev *hdev = req->hdev;
1269 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1270 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1272 /* Read Class of Device */
1273 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1275 /* Read Local Name */
1276 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1278 /* Read Voice Setting */
1279 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1281 /* Read Number of Supported IAC */
1282 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1284 /* Read Current IAC LAP */
1285 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1287 /* Clear Event Filters */
1288 flt_type = HCI_FLT_CLEAR_ALL;
1289 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1291 /* Connection accept timeout ~20 secs */
1292 param = cpu_to_le16(0x7d00);
1293 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
1295 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1296 * but it does not support page scan related HCI commands.
1298 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1299 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1300 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1304 static void le_setup(struct hci_request *req)
1306 struct hci_dev *hdev = req->hdev;
1308 /* Read LE Buffer Size */
1309 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1311 /* Read LE Local Supported Features */
1312 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1314 /* Read LE Supported States */
1315 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1317 /* Read LE Advertising Channel TX Power */
1318 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1320 /* Read LE White List Size */
1321 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1323 /* Clear LE White List */
1324 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1326 /* LE-only controllers have LE implicitly enabled */
1327 if (!lmp_bredr_capable(hdev))
1328 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1331 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1333 if (lmp_ext_inq_capable(hdev))
1336 if (lmp_inq_rssi_capable(hdev))
1339 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1340 hdev->lmp_subver == 0x0757)
1343 if (hdev->manufacturer == 15) {
1344 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1346 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1348 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1352 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1353 hdev->lmp_subver == 0x1805)
1359 static void hci_setup_inquiry_mode(struct hci_request *req)
1363 mode = hci_get_inquiry_mode(req->hdev);
1365 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1368 static void hci_setup_event_mask(struct hci_request *req)
1370 struct hci_dev *hdev = req->hdev;
1372 /* The second byte is 0xff instead of 0x9f (two reserved bits
1373 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1374 * command otherwise.
1376 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1378 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1379 * any event mask for pre 1.2 devices.
1381 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1384 if (lmp_bredr_capable(hdev)) {
1385 events[4] |= 0x01; /* Flow Specification Complete */
1386 events[4] |= 0x02; /* Inquiry Result with RSSI */
1387 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1388 events[5] |= 0x08; /* Synchronous Connection Complete */
1389 events[5] |= 0x10; /* Synchronous Connection Changed */
1391 /* Use a different default for LE-only devices */
1392 memset(events, 0, sizeof(events));
1393 events[0] |= 0x10; /* Disconnection Complete */
1394 events[0] |= 0x80; /* Encryption Change */
1395 events[1] |= 0x08; /* Read Remote Version Information Complete */
1396 events[1] |= 0x20; /* Command Complete */
1397 events[1] |= 0x40; /* Command Status */
1398 events[1] |= 0x80; /* Hardware Error */
1399 events[2] |= 0x04; /* Number of Completed Packets */
1400 events[3] |= 0x02; /* Data Buffer Overflow */
1401 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1404 if (lmp_inq_rssi_capable(hdev))
1405 events[4] |= 0x02; /* Inquiry Result with RSSI */
1407 if (lmp_sniffsubr_capable(hdev))
1408 events[5] |= 0x20; /* Sniff Subrating */
1410 if (lmp_pause_enc_capable(hdev))
1411 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1413 if (lmp_ext_inq_capable(hdev))
1414 events[5] |= 0x40; /* Extended Inquiry Result */
1416 if (lmp_no_flush_capable(hdev))
1417 events[7] |= 0x01; /* Enhanced Flush Complete */
1419 if (lmp_lsto_capable(hdev))
1420 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1422 if (lmp_ssp_capable(hdev)) {
1423 events[6] |= 0x01; /* IO Capability Request */
1424 events[6] |= 0x02; /* IO Capability Response */
1425 events[6] |= 0x04; /* User Confirmation Request */
1426 events[6] |= 0x08; /* User Passkey Request */
1427 events[6] |= 0x10; /* Remote OOB Data Request */
1428 events[6] |= 0x20; /* Simple Pairing Complete */
1429 events[7] |= 0x04; /* User Passkey Notification */
1430 events[7] |= 0x08; /* Keypress Notification */
1431 events[7] |= 0x10; /* Remote Host Supported
1432 * Features Notification
1436 if (lmp_le_capable(hdev))
1437 events[7] |= 0x20; /* LE Meta-Event */
1439 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1442 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1444 struct hci_dev *hdev = req->hdev;
1446 if (lmp_bredr_capable(hdev))
1449 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1451 if (lmp_le_capable(hdev))
1454 hci_setup_event_mask(req);
1456 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1457 * local supported commands HCI command.
1459 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1460 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1462 if (lmp_ssp_capable(hdev)) {
1463 /* When SSP is available, then the host features page
1464 * should also be available as well. However some
1465 * controllers list the max_page as 0 as long as SSP
1466 * has not been enabled. To achieve proper debugging
1467 * output, force the minimum max_page to 1 at least.
1469 hdev->max_page = 0x01;
1471 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1473 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1474 sizeof(mode), &mode);
1476 struct hci_cp_write_eir cp;
1478 memset(hdev->eir, 0, sizeof(hdev->eir));
1479 memset(&cp, 0, sizeof(cp));
1481 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1485 if (lmp_inq_rssi_capable(hdev))
1486 hci_setup_inquiry_mode(req);
1488 if (lmp_inq_tx_pwr_capable(hdev))
1489 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1491 if (lmp_ext_feat_capable(hdev)) {
1492 struct hci_cp_read_local_ext_features cp;
1495 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1499 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1501 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1506 static void hci_setup_link_policy(struct hci_request *req)
1508 struct hci_dev *hdev = req->hdev;
1509 struct hci_cp_write_def_link_policy cp;
1510 u16 link_policy = 0;
1512 if (lmp_rswitch_capable(hdev))
1513 link_policy |= HCI_LP_RSWITCH;
1514 if (lmp_hold_capable(hdev))
1515 link_policy |= HCI_LP_HOLD;
1516 if (lmp_sniff_capable(hdev))
1517 link_policy |= HCI_LP_SNIFF;
1518 if (lmp_park_capable(hdev))
1519 link_policy |= HCI_LP_PARK;
1521 cp.policy = cpu_to_le16(link_policy);
1522 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1525 static void hci_set_le_support(struct hci_request *req)
1527 struct hci_dev *hdev = req->hdev;
1528 struct hci_cp_write_le_host_supported cp;
1530 /* LE-only devices do not support explicit enablement */
1531 if (!lmp_bredr_capable(hdev))
1534 memset(&cp, 0, sizeof(cp));
1536 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1538 cp.simul = lmp_le_br_capable(hdev);
1541 if (cp.le != lmp_host_le_capable(hdev))
1542 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1546 static void hci_set_event_mask_page_2(struct hci_request *req)
1548 struct hci_dev *hdev = req->hdev;
1549 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1551 /* If Connectionless Slave Broadcast master role is supported
1552 * enable all necessary events for it.
1554 if (lmp_csb_master_capable(hdev)) {
1555 events[1] |= 0x40; /* Triggered Clock Capture */
1556 events[1] |= 0x80; /* Synchronization Train Complete */
1557 events[2] |= 0x10; /* Slave Page Response Timeout */
1558 events[2] |= 0x20; /* CSB Channel Map Change */
1561 /* If Connectionless Slave Broadcast slave role is supported
1562 * enable all necessary events for it.
1564 if (lmp_csb_slave_capable(hdev)) {
1565 events[2] |= 0x01; /* Synchronization Train Received */
1566 events[2] |= 0x02; /* CSB Receive */
1567 events[2] |= 0x04; /* CSB Timeout */
1568 events[2] |= 0x08; /* Truncated Page Complete */
1571 /* Enable Authenticated Payload Timeout Expired event if supported */
1572 if (lmp_ping_capable(hdev))
1575 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1578 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1580 struct hci_dev *hdev = req->hdev;
1583 /* Some Broadcom based Bluetooth controllers do not support the
1584 * Delete Stored Link Key command. They are clearly indicating its
1585 * absence in the bit mask of supported commands.
1587 * Check the supported commands and only if the the command is marked
1588 * as supported send it. If not supported assume that the controller
1589 * does not have actual support for stored link keys which makes this
1590 * command redundant anyway.
1592 * Some controllers indicate that they support handling deleting
1593 * stored link keys, but they don't. The quirk lets a driver
1594 * just disable this command.
1596 if (hdev->commands[6] & 0x80 &&
1597 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1598 struct hci_cp_delete_stored_link_key cp;
1600 bacpy(&cp.bdaddr, BDADDR_ANY);
1601 cp.delete_all = 0x01;
1602 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1606 if (hdev->commands[5] & 0x10)
1607 hci_setup_link_policy(req);
1609 if (lmp_le_capable(hdev)) {
1612 memset(events, 0, sizeof(events));
1615 /* If controller supports the Connection Parameters Request
1616 * Link Layer Procedure, enable the corresponding event.
1618 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1619 events[0] |= 0x20; /* LE Remote Connection
1623 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1626 hci_set_le_support(req);
1629 /* Read features beyond page 1 if available */
1630 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1631 struct hci_cp_read_local_ext_features cp;
1634 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1639 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1641 struct hci_dev *hdev = req->hdev;
1643 /* Set event mask page 2 if the HCI command for it is supported */
1644 if (hdev->commands[22] & 0x04)
1645 hci_set_event_mask_page_2(req);
1647 /* Check for Synchronization Train support */
1648 if (lmp_sync_train_capable(hdev))
1649 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1651 /* Enable Secure Connections if supported and configured */
1652 if ((lmp_sc_capable(hdev) ||
1653 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1654 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1656 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1657 sizeof(support), &support);
1661 static int __hci_init(struct hci_dev *hdev)
1665 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1669 /* The Device Under Test (DUT) mode is special and available for
1670 * all controller types. So just create it early on.
1672 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1673 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1677 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1678 * BR/EDR/LE type controllers. AMP controllers only need the
1681 if (hdev->dev_type != HCI_BREDR)
1684 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1688 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1692 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1696 /* Only create debugfs entries during the initial setup
1697 * phase and not every time the controller gets powered on.
1699 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1702 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1704 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1705 &hdev->manufacturer);
1706 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1707 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1708 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1710 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1712 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1713 &conn_info_min_age_fops);
1714 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1715 &conn_info_max_age_fops);
1717 if (lmp_bredr_capable(hdev)) {
1718 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1719 hdev, &inquiry_cache_fops);
1720 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1721 hdev, &link_keys_fops);
1722 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1723 hdev, &dev_class_fops);
1724 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1725 hdev, &voice_setting_fops);
1728 if (lmp_ssp_capable(hdev)) {
1729 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1730 hdev, &auto_accept_delay_fops);
1731 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1732 hdev, &force_sc_support_fops);
1733 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1734 hdev, &sc_only_mode_fops);
1737 if (lmp_sniff_capable(hdev)) {
1738 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1739 hdev, &idle_timeout_fops);
1740 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1741 hdev, &sniff_min_interval_fops);
1742 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1743 hdev, &sniff_max_interval_fops);
1746 if (lmp_le_capable(hdev)) {
1747 debugfs_create_file("identity", 0400, hdev->debugfs,
1748 hdev, &identity_fops);
1749 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1750 hdev, &rpa_timeout_fops);
1751 debugfs_create_file("random_address", 0444, hdev->debugfs,
1752 hdev, &random_address_fops);
1753 debugfs_create_file("static_address", 0444, hdev->debugfs,
1754 hdev, &static_address_fops);
1756 /* For controllers with a public address, provide a debug
1757 * option to force the usage of the configured static
1758 * address. By default the public address is used.
1760 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1761 debugfs_create_file("force_static_address", 0644,
1762 hdev->debugfs, hdev,
1763 &force_static_address_fops);
1765 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1766 &hdev->le_white_list_size);
1767 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1769 debugfs_create_file("identity_resolving_keys", 0400,
1770 hdev->debugfs, hdev,
1771 &identity_resolving_keys_fops);
1772 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1773 hdev, &long_term_keys_fops);
1774 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1775 hdev, &conn_min_interval_fops);
1776 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1777 hdev, &conn_max_interval_fops);
1778 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1779 hdev, &conn_latency_fops);
1780 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1781 hdev, &supervision_timeout_fops);
1782 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1783 hdev, &adv_channel_map_fops);
1784 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1786 debugfs_create_u16("discov_interleaved_timeout", 0644,
1788 &hdev->discov_interleaved_timeout);
1794 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1796 struct hci_dev *hdev = req->hdev;
1798 BT_DBG("%s %ld", hdev->name, opt);
1801 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1802 hci_reset_req(req, 0);
1804 /* Read Local Version */
1805 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1807 /* Read BD Address */
1808 if (hdev->set_bdaddr)
1809 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1812 static int __hci_unconf_init(struct hci_dev *hdev)
1816 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1823 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1827 BT_DBG("%s %x", req->hdev->name, scan);
1829 /* Inquiry and Page scans */
1830 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1833 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1837 BT_DBG("%s %x", req->hdev->name, auth);
1839 /* Authentication */
1840 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1843 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1847 BT_DBG("%s %x", req->hdev->name, encrypt);
1850 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1853 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1855 __le16 policy = cpu_to_le16(opt);
1857 BT_DBG("%s %x", req->hdev->name, policy);
1859 /* Default link policy */
1860 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1863 /* Get HCI device by index.
1864 * Device is held on return. */
1865 struct hci_dev *hci_dev_get(int index)
1867 struct hci_dev *hdev = NULL, *d;
1869 BT_DBG("%d", index);
1874 read_lock(&hci_dev_list_lock);
1875 list_for_each_entry(d, &hci_dev_list, list) {
1876 if (d->id == index) {
1877 hdev = hci_dev_hold(d);
1881 read_unlock(&hci_dev_list_lock);
1885 /* ---- Inquiry support ---- */
1887 bool hci_discovery_active(struct hci_dev *hdev)
1889 struct discovery_state *discov = &hdev->discovery;
1891 switch (discov->state) {
1892 case DISCOVERY_FINDING:
1893 case DISCOVERY_RESOLVING:
1901 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1903 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1905 if (hdev->discovery.state == state)
1909 case DISCOVERY_STOPPED:
1910 hci_update_background_scan(hdev);
1912 if (hdev->discovery.state != DISCOVERY_STARTING)
1913 mgmt_discovering(hdev, 0);
1915 case DISCOVERY_STARTING:
1917 case DISCOVERY_FINDING:
1918 mgmt_discovering(hdev, 1);
1920 case DISCOVERY_RESOLVING:
1922 case DISCOVERY_STOPPING:
1926 hdev->discovery.state = state;
1929 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1931 struct discovery_state *cache = &hdev->discovery;
1932 struct inquiry_entry *p, *n;
1934 list_for_each_entry_safe(p, n, &cache->all, all) {
1939 INIT_LIST_HEAD(&cache->unknown);
1940 INIT_LIST_HEAD(&cache->resolve);
1943 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1946 struct discovery_state *cache = &hdev->discovery;
1947 struct inquiry_entry *e;
1949 BT_DBG("cache %p, %pMR", cache, bdaddr);
1951 list_for_each_entry(e, &cache->all, all) {
1952 if (!bacmp(&e->data.bdaddr, bdaddr))
1959 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1962 struct discovery_state *cache = &hdev->discovery;
1963 struct inquiry_entry *e;
1965 BT_DBG("cache %p, %pMR", cache, bdaddr);
1967 list_for_each_entry(e, &cache->unknown, list) {
1968 if (!bacmp(&e->data.bdaddr, bdaddr))
1975 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1979 struct discovery_state *cache = &hdev->discovery;
1980 struct inquiry_entry *e;
1982 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1984 list_for_each_entry(e, &cache->resolve, list) {
1985 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1987 if (!bacmp(&e->data.bdaddr, bdaddr))
1994 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1995 struct inquiry_entry *ie)
1997 struct discovery_state *cache = &hdev->discovery;
1998 struct list_head *pos = &cache->resolve;
1999 struct inquiry_entry *p;
2001 list_del(&ie->list);
2003 list_for_each_entry(p, &cache->resolve, list) {
2004 if (p->name_state != NAME_PENDING &&
2005 abs(p->data.rssi) >= abs(ie->data.rssi))
2010 list_add(&ie->list, pos);
2013 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2016 struct discovery_state *cache = &hdev->discovery;
2017 struct inquiry_entry *ie;
2020 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2022 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2024 if (!data->ssp_mode)
2025 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2027 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2029 if (!ie->data.ssp_mode)
2030 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2032 if (ie->name_state == NAME_NEEDED &&
2033 data->rssi != ie->data.rssi) {
2034 ie->data.rssi = data->rssi;
2035 hci_inquiry_cache_update_resolve(hdev, ie);
2041 /* Entry not in the cache. Add new one. */
2042 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2044 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2048 list_add(&ie->all, &cache->all);
2051 ie->name_state = NAME_KNOWN;
2053 ie->name_state = NAME_NOT_KNOWN;
2054 list_add(&ie->list, &cache->unknown);
2058 if (name_known && ie->name_state != NAME_KNOWN &&
2059 ie->name_state != NAME_PENDING) {
2060 ie->name_state = NAME_KNOWN;
2061 list_del(&ie->list);
2064 memcpy(&ie->data, data, sizeof(*data));
2065 ie->timestamp = jiffies;
2066 cache->timestamp = jiffies;
2068 if (ie->name_state == NAME_NOT_KNOWN)
2069 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2075 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2077 struct discovery_state *cache = &hdev->discovery;
2078 struct inquiry_info *info = (struct inquiry_info *) buf;
2079 struct inquiry_entry *e;
2082 list_for_each_entry(e, &cache->all, all) {
2083 struct inquiry_data *data = &e->data;
2088 bacpy(&info->bdaddr, &data->bdaddr);
2089 info->pscan_rep_mode = data->pscan_rep_mode;
2090 info->pscan_period_mode = data->pscan_period_mode;
2091 info->pscan_mode = data->pscan_mode;
2092 memcpy(info->dev_class, data->dev_class, 3);
2093 info->clock_offset = data->clock_offset;
2099 BT_DBG("cache %p, copied %d", cache, copied);
2103 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2105 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2106 struct hci_dev *hdev = req->hdev;
2107 struct hci_cp_inquiry cp;
2109 BT_DBG("%s", hdev->name);
2111 if (test_bit(HCI_INQUIRY, &hdev->flags))
2115 memcpy(&cp.lap, &ir->lap, 3);
2116 cp.length = ir->length;
2117 cp.num_rsp = ir->num_rsp;
2118 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2121 static int wait_inquiry(void *word)
2124 return signal_pending(current);
2127 int hci_inquiry(void __user *arg)
2129 __u8 __user *ptr = arg;
2130 struct hci_inquiry_req ir;
2131 struct hci_dev *hdev;
2132 int err = 0, do_inquiry = 0, max_rsp;
2136 if (copy_from_user(&ir, ptr, sizeof(ir)))
2139 hdev = hci_dev_get(ir.dev_id);
2143 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2148 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2153 if (hdev->dev_type != HCI_BREDR) {
2158 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2164 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2165 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2166 hci_inquiry_cache_flush(hdev);
2169 hci_dev_unlock(hdev);
2171 timeo = ir.length * msecs_to_jiffies(2000);
2174 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2179 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2180 * cleared). If it is interrupted by a signal, return -EINTR.
2182 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2183 TASK_INTERRUPTIBLE))
2187 /* for unlimited number of responses we will use buffer with
2190 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2192 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2193 * copy it to the user space.
2195 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2202 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2203 hci_dev_unlock(hdev);
2205 BT_DBG("num_rsp %d", ir.num_rsp);
2207 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2209 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2222 static int hci_dev_do_open(struct hci_dev *hdev)
2226 BT_DBG("%s %p", hdev->name, hdev);
2230 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2235 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2236 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2237 /* Check for rfkill but allow the HCI setup stage to
2238 * proceed (which in itself doesn't cause any RF activity).
2240 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2245 /* Check for valid public address or a configured static
2246 * random adddress, but let the HCI setup proceed to
2247 * be able to determine if there is a public address
2250 * In case of user channel usage, it is not important
2251 * if a public address or static random address is
2254 * This check is only valid for BR/EDR controllers
2255 * since AMP controllers do not have an address.
2257 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2258 hdev->dev_type == HCI_BREDR &&
2259 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2260 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2261 ret = -EADDRNOTAVAIL;
2266 if (test_bit(HCI_UP, &hdev->flags)) {
2271 if (hdev->open(hdev)) {
2276 atomic_set(&hdev->cmd_cnt, 1);
2277 set_bit(HCI_INIT, &hdev->flags);
2279 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2281 ret = hdev->setup(hdev);
2283 /* The transport driver can set these quirks before
2284 * creating the HCI device or in its setup callback.
2286 * In case any of them is set, the controller has to
2287 * start up as unconfigured.
2289 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2290 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2291 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2293 /* For an unconfigured controller it is required to
2294 * read at least the version information provided by
2295 * the Read Local Version Information command.
2297 * If the set_bdaddr driver callback is provided, then
2298 * also the original Bluetooth public device address
2299 * will be read using the Read BD Address command.
2301 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2302 ret = __hci_unconf_init(hdev);
2305 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2306 /* If public address change is configured, ensure that
2307 * the address gets programmed. If the driver does not
2308 * support changing the public address, fail the power
2311 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2313 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2315 ret = -EADDRNOTAVAIL;
2319 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2320 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2321 ret = __hci_init(hdev);
2324 clear_bit(HCI_INIT, &hdev->flags);
2328 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2329 set_bit(HCI_UP, &hdev->flags);
2330 hci_notify(hdev, HCI_DEV_UP);
2331 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2332 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2333 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2334 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2335 hdev->dev_type == HCI_BREDR) {
2337 mgmt_powered(hdev, 1);
2338 hci_dev_unlock(hdev);
2341 /* Init failed, cleanup */
2342 flush_work(&hdev->tx_work);
2343 flush_work(&hdev->cmd_work);
2344 flush_work(&hdev->rx_work);
2346 skb_queue_purge(&hdev->cmd_q);
2347 skb_queue_purge(&hdev->rx_q);
2352 if (hdev->sent_cmd) {
2353 kfree_skb(hdev->sent_cmd);
2354 hdev->sent_cmd = NULL;
2358 hdev->flags &= BIT(HCI_RAW);
2362 hci_req_unlock(hdev);
2366 /* ---- HCI ioctl helpers ---- */
2368 int hci_dev_open(__u16 dev)
2370 struct hci_dev *hdev;
2373 hdev = hci_dev_get(dev);
2377 /* Devices that are marked as unconfigured can only be powered
2378 * up as user channel. Trying to bring them up as normal devices
2379 * will result into a failure. Only user channel operation is
2382 * When this function is called for a user channel, the flag
2383 * HCI_USER_CHANNEL will be set first before attempting to
2386 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2387 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2392 /* We need to ensure that no other power on/off work is pending
2393 * before proceeding to call hci_dev_do_open. This is
2394 * particularly important if the setup procedure has not yet
2397 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2398 cancel_delayed_work(&hdev->power_off);
2400 /* After this call it is guaranteed that the setup procedure
2401 * has finished. This means that error conditions like RFKILL
2402 * or no valid public or static random address apply.
2404 flush_workqueue(hdev->req_workqueue);
2406 err = hci_dev_do_open(hdev);
2413 /* This function requires the caller holds hdev->lock */
2414 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2416 struct hci_conn_params *p;
2418 list_for_each_entry(p, &hdev->le_conn_params, list)
2419 list_del_init(&p->action);
2421 BT_DBG("All LE pending actions cleared");
2424 static int hci_dev_do_close(struct hci_dev *hdev)
2426 BT_DBG("%s %p", hdev->name, hdev);
2428 cancel_delayed_work(&hdev->power_off);
2430 hci_req_cancel(hdev, ENODEV);
2433 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2434 cancel_delayed_work_sync(&hdev->cmd_timer);
2435 hci_req_unlock(hdev);
2439 /* Flush RX and TX works */
2440 flush_work(&hdev->tx_work);
2441 flush_work(&hdev->rx_work);
2443 if (hdev->discov_timeout > 0) {
2444 cancel_delayed_work(&hdev->discov_off);
2445 hdev->discov_timeout = 0;
2446 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2447 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2450 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2451 cancel_delayed_work(&hdev->service_cache);
2453 cancel_delayed_work_sync(&hdev->le_scan_disable);
2455 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2456 cancel_delayed_work_sync(&hdev->rpa_expired);
2459 hci_inquiry_cache_flush(hdev);
2460 hci_conn_hash_flush(hdev);
2461 hci_pend_le_actions_clear(hdev);
2462 hci_dev_unlock(hdev);
2464 hci_notify(hdev, HCI_DEV_DOWN);
2470 skb_queue_purge(&hdev->cmd_q);
2471 atomic_set(&hdev->cmd_cnt, 1);
2472 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2473 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2474 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2475 set_bit(HCI_INIT, &hdev->flags);
2476 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2477 clear_bit(HCI_INIT, &hdev->flags);
2480 /* flush cmd work */
2481 flush_work(&hdev->cmd_work);
2484 skb_queue_purge(&hdev->rx_q);
2485 skb_queue_purge(&hdev->cmd_q);
2486 skb_queue_purge(&hdev->raw_q);
2488 /* Drop last sent command */
2489 if (hdev->sent_cmd) {
2490 cancel_delayed_work_sync(&hdev->cmd_timer);
2491 kfree_skb(hdev->sent_cmd);
2492 hdev->sent_cmd = NULL;
2495 kfree_skb(hdev->recv_evt);
2496 hdev->recv_evt = NULL;
2498 /* After this point our queues are empty
2499 * and no tasks are scheduled. */
2503 hdev->flags &= BIT(HCI_RAW);
2504 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2506 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2507 if (hdev->dev_type == HCI_BREDR) {
2509 mgmt_powered(hdev, 0);
2510 hci_dev_unlock(hdev);
2514 /* Controller radio is available but is currently powered down */
2515 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2517 memset(hdev->eir, 0, sizeof(hdev->eir));
2518 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2519 bacpy(&hdev->random_addr, BDADDR_ANY);
2521 hci_req_unlock(hdev);
2527 int hci_dev_close(__u16 dev)
2529 struct hci_dev *hdev;
2532 hdev = hci_dev_get(dev);
2536 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2541 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2542 cancel_delayed_work(&hdev->power_off);
2544 err = hci_dev_do_close(hdev);
2551 int hci_dev_reset(__u16 dev)
2553 struct hci_dev *hdev;
2556 hdev = hci_dev_get(dev);
2562 if (!test_bit(HCI_UP, &hdev->flags)) {
2567 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2572 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2578 skb_queue_purge(&hdev->rx_q);
2579 skb_queue_purge(&hdev->cmd_q);
2582 hci_inquiry_cache_flush(hdev);
2583 hci_conn_hash_flush(hdev);
2584 hci_dev_unlock(hdev);
2589 atomic_set(&hdev->cmd_cnt, 1);
2590 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2592 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2595 hci_req_unlock(hdev);
2600 int hci_dev_reset_stat(__u16 dev)
2602 struct hci_dev *hdev;
2605 hdev = hci_dev_get(dev);
2609 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2614 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2619 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2626 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2628 struct hci_dev *hdev;
2629 struct hci_dev_req dr;
2632 if (copy_from_user(&dr, arg, sizeof(dr)))
2635 hdev = hci_dev_get(dr.dev_id);
2639 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2644 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2649 if (hdev->dev_type != HCI_BREDR) {
2654 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2661 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2666 if (!lmp_encrypt_capable(hdev)) {
2671 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2672 /* Auth must be enabled first */
2673 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2679 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2684 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2689 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2693 case HCISETLINKMODE:
2694 hdev->link_mode = ((__u16) dr.dev_opt) &
2695 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2699 hdev->pkt_type = (__u16) dr.dev_opt;
2703 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2704 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2708 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2709 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2722 int hci_get_dev_list(void __user *arg)
2724 struct hci_dev *hdev;
2725 struct hci_dev_list_req *dl;
2726 struct hci_dev_req *dr;
2727 int n = 0, size, err;
2730 if (get_user(dev_num, (__u16 __user *) arg))
2733 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2736 size = sizeof(*dl) + dev_num * sizeof(*dr);
2738 dl = kzalloc(size, GFP_KERNEL);
2744 read_lock(&hci_dev_list_lock);
2745 list_for_each_entry(hdev, &hci_dev_list, list) {
2746 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2747 cancel_delayed_work(&hdev->power_off);
2749 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2750 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2752 (dr + n)->dev_id = hdev->id;
2753 (dr + n)->dev_opt = hdev->flags;
2758 read_unlock(&hci_dev_list_lock);
2761 size = sizeof(*dl) + n * sizeof(*dr);
2763 err = copy_to_user(arg, dl, size);
2766 return err ? -EFAULT : 0;
2769 int hci_get_dev_info(void __user *arg)
2771 struct hci_dev *hdev;
2772 struct hci_dev_info di;
2775 if (copy_from_user(&di, arg, sizeof(di)))
2778 hdev = hci_dev_get(di.dev_id);
2782 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2783 cancel_delayed_work_sync(&hdev->power_off);
2785 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2786 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2788 strcpy(di.name, hdev->name);
2789 di.bdaddr = hdev->bdaddr;
2790 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2791 di.flags = hdev->flags;
2792 di.pkt_type = hdev->pkt_type;
2793 if (lmp_bredr_capable(hdev)) {
2794 di.acl_mtu = hdev->acl_mtu;
2795 di.acl_pkts = hdev->acl_pkts;
2796 di.sco_mtu = hdev->sco_mtu;
2797 di.sco_pkts = hdev->sco_pkts;
2799 di.acl_mtu = hdev->le_mtu;
2800 di.acl_pkts = hdev->le_pkts;
2804 di.link_policy = hdev->link_policy;
2805 di.link_mode = hdev->link_mode;
2807 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2808 memcpy(&di.features, &hdev->features, sizeof(di.features));
2810 if (copy_to_user(arg, &di, sizeof(di)))
2818 /* ---- Interface to HCI drivers ---- */
2820 static int hci_rfkill_set_block(void *data, bool blocked)
2822 struct hci_dev *hdev = data;
2824 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2826 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2830 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2831 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2832 !test_bit(HCI_CONFIG, &hdev->dev_flags))
2833 hci_dev_do_close(hdev);
2835 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2841 static const struct rfkill_ops hci_rfkill_ops = {
2842 .set_block = hci_rfkill_set_block,
2845 static void hci_power_on(struct work_struct *work)
2847 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2850 BT_DBG("%s", hdev->name);
2852 err = hci_dev_do_open(hdev);
2854 mgmt_set_powered_failed(hdev, err);
2858 /* During the HCI setup phase, a few error conditions are
2859 * ignored and they need to be checked now. If they are still
2860 * valid, it is important to turn the device back off.
2862 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2863 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2864 (hdev->dev_type == HCI_BREDR &&
2865 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2866 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2867 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2868 hci_dev_do_close(hdev);
2869 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2870 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2871 HCI_AUTO_OFF_TIMEOUT);
2874 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2875 /* For unconfigured devices, set the HCI_RAW flag
2876 * so that userspace can easily identify them.
2878 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2879 set_bit(HCI_RAW, &hdev->flags);
2881 /* For fully configured devices, this will send
2882 * the Index Added event. For unconfigured devices,
2883 * it will send Unconfigued Index Added event.
2885 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2886 * and no event will be send.
2888 mgmt_index_added(hdev);
2889 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2890 /* Powering on the controller with HCI_CONFIG set only
2891 * happens with the transition from unconfigured to
2892 * configured. This will send the Index Added event.
2894 mgmt_index_added(hdev);
2898 static void hci_power_off(struct work_struct *work)
2900 struct hci_dev *hdev = container_of(work, struct hci_dev,
2903 BT_DBG("%s", hdev->name);
2905 hci_dev_do_close(hdev);
2908 static void hci_discov_off(struct work_struct *work)
2910 struct hci_dev *hdev;
2912 hdev = container_of(work, struct hci_dev, discov_off.work);
2914 BT_DBG("%s", hdev->name);
2916 mgmt_discoverable_timeout(hdev);
2919 void hci_uuids_clear(struct hci_dev *hdev)
2921 struct bt_uuid *uuid, *tmp;
2923 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2924 list_del(&uuid->list);
2929 void hci_link_keys_clear(struct hci_dev *hdev)
2931 struct list_head *p, *n;
2933 list_for_each_safe(p, n, &hdev->link_keys) {
2934 struct link_key *key;
2936 key = list_entry(p, struct link_key, list);
2943 void hci_smp_ltks_clear(struct hci_dev *hdev)
2945 struct smp_ltk *k, *tmp;
2947 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2953 void hci_smp_irks_clear(struct hci_dev *hdev)
2955 struct smp_irk *k, *tmp;
2957 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2963 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2967 list_for_each_entry(k, &hdev->link_keys, list)
2968 if (bacmp(bdaddr, &k->bdaddr) == 0)
2974 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2975 u8 key_type, u8 old_key_type)
2978 if (key_type < 0x03)
2981 /* Debug keys are insecure so don't store them persistently */
2982 if (key_type == HCI_LK_DEBUG_COMBINATION)
2985 /* Changed combination key and there's no previous one */
2986 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2989 /* Security mode 3 case */
2993 /* Neither local nor remote side had no-bonding as requirement */
2994 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2997 /* Local side had dedicated bonding as requirement */
2998 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3001 /* Remote side had dedicated bonding as requirement */
3002 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3005 /* If none of the above criteria match, then don't store the key
3010 static bool ltk_type_master(u8 type)
3012 return (type == SMP_LTK);
3015 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3020 list_for_each_entry(k, &hdev->long_term_keys, list) {
3021 if (k->ediv != ediv || k->rand != rand)
3024 if (ltk_type_master(k->type) != master)
3033 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3034 u8 addr_type, bool master)
3038 list_for_each_entry(k, &hdev->long_term_keys, list)
3039 if (addr_type == k->bdaddr_type &&
3040 bacmp(bdaddr, &k->bdaddr) == 0 &&
3041 ltk_type_master(k->type) == master)
3047 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3049 struct smp_irk *irk;
3051 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3052 if (!bacmp(&irk->rpa, rpa))
3056 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3057 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3058 bacpy(&irk->rpa, rpa);
3066 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3069 struct smp_irk *irk;
3071 /* Identity Address must be public or static random */
3072 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3075 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3076 if (addr_type == irk->addr_type &&
3077 bacmp(bdaddr, &irk->bdaddr) == 0)
3084 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3085 bdaddr_t *bdaddr, u8 *val, u8 type,
3086 u8 pin_len, bool *persistent)
3088 struct link_key *key, *old_key;
3091 old_key = hci_find_link_key(hdev, bdaddr);
3093 old_key_type = old_key->type;
3096 old_key_type = conn ? conn->key_type : 0xff;
3097 key = kzalloc(sizeof(*key), GFP_KERNEL);
3100 list_add(&key->list, &hdev->link_keys);
3103 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3105 /* Some buggy controller combinations generate a changed
3106 * combination key for legacy pairing even when there's no
3108 if (type == HCI_LK_CHANGED_COMBINATION &&
3109 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3110 type = HCI_LK_COMBINATION;
3112 conn->key_type = type;
3115 bacpy(&key->bdaddr, bdaddr);
3116 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3117 key->pin_len = pin_len;
3119 if (type == HCI_LK_CHANGED_COMBINATION)
3120 key->type = old_key_type;
3125 *persistent = hci_persistent_key(hdev, conn, type,
3131 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3132 u8 addr_type, u8 type, u8 authenticated,
3133 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3135 struct smp_ltk *key, *old_key;
3136 bool master = ltk_type_master(type);
3138 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3142 key = kzalloc(sizeof(*key), GFP_KERNEL);
3145 list_add(&key->list, &hdev->long_term_keys);
3148 bacpy(&key->bdaddr, bdaddr);
3149 key->bdaddr_type = addr_type;
3150 memcpy(key->val, tk, sizeof(key->val));
3151 key->authenticated = authenticated;
3154 key->enc_size = enc_size;
3160 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3161 u8 addr_type, u8 val[16], bdaddr_t *rpa)
3163 struct smp_irk *irk;
3165 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3167 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3171 bacpy(&irk->bdaddr, bdaddr);
3172 irk->addr_type = addr_type;
3174 list_add(&irk->list, &hdev->identity_resolving_keys);
3177 memcpy(irk->val, val, 16);
3178 bacpy(&irk->rpa, rpa);
3183 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3185 struct link_key *key;
3187 key = hci_find_link_key(hdev, bdaddr);
3191 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3193 list_del(&key->list);
3199 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3201 struct smp_ltk *k, *tmp;
3204 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3205 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3208 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3215 return removed ? 0 : -ENOENT;
3218 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3220 struct smp_irk *k, *tmp;
3222 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3223 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3226 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3233 /* HCI command timer function */
3234 static void hci_cmd_timeout(struct work_struct *work)
3236 struct hci_dev *hdev = container_of(work, struct hci_dev,
3239 if (hdev->sent_cmd) {
3240 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3241 u16 opcode = __le16_to_cpu(sent->opcode);
3243 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3245 BT_ERR("%s command tx timeout", hdev->name);
3248 atomic_set(&hdev->cmd_cnt, 1);
3249 queue_work(hdev->workqueue, &hdev->cmd_work);
3252 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3255 struct oob_data *data;
3257 list_for_each_entry(data, &hdev->remote_oob_data, list)
3258 if (bacmp(bdaddr, &data->bdaddr) == 0)
3264 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3266 struct oob_data *data;
3268 data = hci_find_remote_oob_data(hdev, bdaddr);
3272 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3274 list_del(&data->list);
3280 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3282 struct oob_data *data, *n;
3284 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3285 list_del(&data->list);
3290 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3291 u8 *hash, u8 *randomizer)
3293 struct oob_data *data;
3295 data = hci_find_remote_oob_data(hdev, bdaddr);
3297 data = kmalloc(sizeof(*data), GFP_KERNEL);
3301 bacpy(&data->bdaddr, bdaddr);
3302 list_add(&data->list, &hdev->remote_oob_data);
3305 memcpy(data->hash192, hash, sizeof(data->hash192));
3306 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3308 memset(data->hash256, 0, sizeof(data->hash256));
3309 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3311 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3316 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3317 u8 *hash192, u8 *randomizer192,
3318 u8 *hash256, u8 *randomizer256)
3320 struct oob_data *data;
3322 data = hci_find_remote_oob_data(hdev, bdaddr);
3324 data = kmalloc(sizeof(*data), GFP_KERNEL);
3328 bacpy(&data->bdaddr, bdaddr);
3329 list_add(&data->list, &hdev->remote_oob_data);
3332 memcpy(data->hash192, hash192, sizeof(data->hash192));
3333 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3335 memcpy(data->hash256, hash256, sizeof(data->hash256));
3336 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3338 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3343 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3344 bdaddr_t *bdaddr, u8 type)
3346 struct bdaddr_list *b;
3348 list_for_each_entry(b, &hdev->blacklist, list) {
3349 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3356 static void hci_blacklist_clear(struct hci_dev *hdev)
3358 struct list_head *p, *n;
3360 list_for_each_safe(p, n, &hdev->blacklist) {
3361 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3368 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3370 struct bdaddr_list *entry;
3372 if (!bacmp(bdaddr, BDADDR_ANY))
3375 if (hci_blacklist_lookup(hdev, bdaddr, type))
3378 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3382 bacpy(&entry->bdaddr, bdaddr);
3383 entry->bdaddr_type = type;
3385 list_add(&entry->list, &hdev->blacklist);
3390 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3392 struct bdaddr_list *entry;
3394 if (!bacmp(bdaddr, BDADDR_ANY)) {
3395 hci_blacklist_clear(hdev);
3399 entry = hci_blacklist_lookup(hdev, bdaddr, type);
3403 list_del(&entry->list);
3409 struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3410 bdaddr_t *bdaddr, u8 type)
3412 struct bdaddr_list *b;
3414 list_for_each_entry(b, &hdev->le_white_list, list) {
3415 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3422 void hci_white_list_clear(struct hci_dev *hdev)
3424 struct list_head *p, *n;
3426 list_for_each_safe(p, n, &hdev->le_white_list) {
3427 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3434 int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3436 struct bdaddr_list *entry;
3438 if (!bacmp(bdaddr, BDADDR_ANY))
3441 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3445 bacpy(&entry->bdaddr, bdaddr);
3446 entry->bdaddr_type = type;
3448 list_add(&entry->list, &hdev->le_white_list);
3453 int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3455 struct bdaddr_list *entry;
3457 if (!bacmp(bdaddr, BDADDR_ANY))
3460 entry = hci_white_list_lookup(hdev, bdaddr, type);
3464 list_del(&entry->list);
3470 /* This function requires the caller holds hdev->lock */
3471 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3472 bdaddr_t *addr, u8 addr_type)
3474 struct hci_conn_params *params;
3476 /* The conn params list only contains identity addresses */
3477 if (!hci_is_identity_address(addr, addr_type))
3480 list_for_each_entry(params, &hdev->le_conn_params, list) {
3481 if (bacmp(¶ms->addr, addr) == 0 &&
3482 params->addr_type == addr_type) {
3490 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3492 struct hci_conn *conn;
3494 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3498 if (conn->dst_type != type)
3501 if (conn->state != BT_CONNECTED)
3507 /* This function requires the caller holds hdev->lock */
3508 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3509 bdaddr_t *addr, u8 addr_type)
3511 struct hci_conn_params *param;
3513 /* The list only contains identity addresses */
3514 if (!hci_is_identity_address(addr, addr_type))
3517 list_for_each_entry(param, list, action) {
3518 if (bacmp(¶m->addr, addr) == 0 &&
3519 param->addr_type == addr_type)
3526 /* This function requires the caller holds hdev->lock */
3527 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3528 bdaddr_t *addr, u8 addr_type)
3530 struct hci_conn_params *params;
3532 if (!hci_is_identity_address(addr, addr_type))
3535 params = hci_conn_params_lookup(hdev, addr, addr_type);
3539 params = kzalloc(sizeof(*params), GFP_KERNEL);
3541 BT_ERR("Out of memory");
3545 bacpy(¶ms->addr, addr);
3546 params->addr_type = addr_type;
3548 list_add(¶ms->list, &hdev->le_conn_params);
3549 INIT_LIST_HEAD(¶ms->action);
3551 params->conn_min_interval = hdev->le_conn_min_interval;
3552 params->conn_max_interval = hdev->le_conn_max_interval;
3553 params->conn_latency = hdev->le_conn_latency;
3554 params->supervision_timeout = hdev->le_supv_timeout;
3555 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3557 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3562 /* This function requires the caller holds hdev->lock */
3563 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3566 struct hci_conn_params *params;
3568 params = hci_conn_params_add(hdev, addr, addr_type);
3572 if (params->auto_connect == auto_connect)
3575 list_del_init(¶ms->action);
3577 switch (auto_connect) {
3578 case HCI_AUTO_CONN_DISABLED:
3579 case HCI_AUTO_CONN_LINK_LOSS:
3580 hci_update_background_scan(hdev);
3582 case HCI_AUTO_CONN_REPORT:
3583 list_add(¶ms->action, &hdev->pend_le_reports);
3584 hci_update_background_scan(hdev);
3586 case HCI_AUTO_CONN_ALWAYS:
3587 if (!is_connected(hdev, addr, addr_type)) {
3588 list_add(¶ms->action, &hdev->pend_le_conns);
3589 hci_update_background_scan(hdev);
3594 params->auto_connect = auto_connect;
3596 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3602 /* This function requires the caller holds hdev->lock */
3603 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3605 struct hci_conn_params *params;
3607 params = hci_conn_params_lookup(hdev, addr, addr_type);
3611 list_del(¶ms->action);
3612 list_del(¶ms->list);
3615 hci_update_background_scan(hdev);
3617 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3620 /* This function requires the caller holds hdev->lock */
3621 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3623 struct hci_conn_params *params, *tmp;
3625 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3626 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3628 list_del(¶ms->list);
3632 BT_DBG("All LE disabled connection parameters were removed");
3635 /* This function requires the caller holds hdev->lock */
3636 void hci_conn_params_clear_all(struct hci_dev *hdev)
3638 struct hci_conn_params *params, *tmp;
3640 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3641 list_del(¶ms->action);
3642 list_del(¶ms->list);
3646 hci_update_background_scan(hdev);
3648 BT_DBG("All LE connection parameters were removed");
3651 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3654 BT_ERR("Failed to start inquiry: status %d", status);
3657 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3658 hci_dev_unlock(hdev);
3663 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3665 /* General inquiry access code (GIAC) */
3666 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3667 struct hci_request req;
3668 struct hci_cp_inquiry cp;
3672 BT_ERR("Failed to disable LE scanning: status %d", status);
3676 switch (hdev->discovery.type) {
3677 case DISCOV_TYPE_LE:
3679 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3680 hci_dev_unlock(hdev);
3683 case DISCOV_TYPE_INTERLEAVED:
3684 hci_req_init(&req, hdev);
3686 memset(&cp, 0, sizeof(cp));
3687 memcpy(&cp.lap, lap, sizeof(cp.lap));
3688 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3689 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3693 hci_inquiry_cache_flush(hdev);
3695 err = hci_req_run(&req, inquiry_complete);
3697 BT_ERR("Inquiry request failed: err %d", err);
3698 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3701 hci_dev_unlock(hdev);
3706 static void le_scan_disable_work(struct work_struct *work)
3708 struct hci_dev *hdev = container_of(work, struct hci_dev,
3709 le_scan_disable.work);
3710 struct hci_request req;
3713 BT_DBG("%s", hdev->name);
3715 hci_req_init(&req, hdev);
3717 hci_req_add_le_scan_disable(&req);
3719 err = hci_req_run(&req, le_scan_disable_work_complete);
3721 BT_ERR("Disable LE scanning request failed: err %d", err);
3724 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3726 struct hci_dev *hdev = req->hdev;
3728 /* If we're advertising or initiating an LE connection we can't
3729 * go ahead and change the random address at this time. This is
3730 * because the eventual initiator address used for the
3731 * subsequently created connection will be undefined (some
3732 * controllers use the new address and others the one we had
3733 * when the operation started).
3735 * In this kind of scenario skip the update and let the random
3736 * address be updated at the next cycle.
3738 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3739 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3740 BT_DBG("Deferring random address update");
3744 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3747 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3750 struct hci_dev *hdev = req->hdev;
3753 /* If privacy is enabled use a resolvable private address. If
3754 * current RPA has expired or there is something else than
3755 * the current RPA in use, then generate a new one.
3757 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3760 *own_addr_type = ADDR_LE_DEV_RANDOM;
3762 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3763 !bacmp(&hdev->random_addr, &hdev->rpa))
3766 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3768 BT_ERR("%s failed to generate new RPA", hdev->name);
3772 set_random_addr(req, &hdev->rpa);
3774 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3775 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3780 /* In case of required privacy without resolvable private address,
3781 * use an unresolvable private address. This is useful for active
3782 * scanning and non-connectable advertising.
3784 if (require_privacy) {
3787 get_random_bytes(&urpa, 6);
3788 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3790 *own_addr_type = ADDR_LE_DEV_RANDOM;
3791 set_random_addr(req, &urpa);
3795 /* If forcing static address is in use or there is no public
3796 * address use the static address as random address (but skip
3797 * the HCI command if the current random address is already the
3800 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3801 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3802 *own_addr_type = ADDR_LE_DEV_RANDOM;
3803 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3804 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3805 &hdev->static_addr);
3809 /* Neither privacy nor static address is being used so use a
3812 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3817 /* Copy the Identity Address of the controller.
3819 * If the controller has a public BD_ADDR, then by default use that one.
3820 * If this is a LE only controller without a public address, default to
3821 * the static random address.
3823 * For debugging purposes it is possible to force controllers with a
3824 * public address to use the static random address instead.
3826 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3829 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3830 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3831 bacpy(bdaddr, &hdev->static_addr);
3832 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3834 bacpy(bdaddr, &hdev->bdaddr);
3835 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3839 /* Alloc HCI device */
3840 struct hci_dev *hci_alloc_dev(void)
3842 struct hci_dev *hdev;
3844 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3848 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3849 hdev->esco_type = (ESCO_HV1);
3850 hdev->link_mode = (HCI_LM_ACCEPT);
3851 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3852 hdev->io_capability = 0x03; /* No Input No Output */
3853 hdev->manufacturer = 0xffff; /* Default to internal use */
3854 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3855 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3857 hdev->sniff_max_interval = 800;
3858 hdev->sniff_min_interval = 80;
3860 hdev->le_adv_channel_map = 0x07;
3861 hdev->le_scan_interval = 0x0060;
3862 hdev->le_scan_window = 0x0030;
3863 hdev->le_conn_min_interval = 0x0028;
3864 hdev->le_conn_max_interval = 0x0038;
3865 hdev->le_conn_latency = 0x0000;
3866 hdev->le_supv_timeout = 0x002a;
3868 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3869 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3870 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3871 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3873 mutex_init(&hdev->lock);
3874 mutex_init(&hdev->req_lock);
3876 INIT_LIST_HEAD(&hdev->mgmt_pending);
3877 INIT_LIST_HEAD(&hdev->blacklist);
3878 INIT_LIST_HEAD(&hdev->uuids);
3879 INIT_LIST_HEAD(&hdev->link_keys);
3880 INIT_LIST_HEAD(&hdev->long_term_keys);
3881 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3882 INIT_LIST_HEAD(&hdev->remote_oob_data);
3883 INIT_LIST_HEAD(&hdev->le_white_list);
3884 INIT_LIST_HEAD(&hdev->le_conn_params);
3885 INIT_LIST_HEAD(&hdev->pend_le_conns);
3886 INIT_LIST_HEAD(&hdev->pend_le_reports);
3887 INIT_LIST_HEAD(&hdev->conn_hash.list);
3889 INIT_WORK(&hdev->rx_work, hci_rx_work);
3890 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3891 INIT_WORK(&hdev->tx_work, hci_tx_work);
3892 INIT_WORK(&hdev->power_on, hci_power_on);
3894 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3895 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3896 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3898 skb_queue_head_init(&hdev->rx_q);
3899 skb_queue_head_init(&hdev->cmd_q);
3900 skb_queue_head_init(&hdev->raw_q);
3902 init_waitqueue_head(&hdev->req_wait_q);
3904 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3906 hci_init_sysfs(hdev);
3907 discovery_init(hdev);
3911 EXPORT_SYMBOL(hci_alloc_dev);
3913 /* Free HCI device */
3914 void hci_free_dev(struct hci_dev *hdev)
3916 /* will free via device release */
3917 put_device(&hdev->dev);
3919 EXPORT_SYMBOL(hci_free_dev);
3921 /* Register HCI device */
3922 int hci_register_dev(struct hci_dev *hdev)
3926 if (!hdev->open || !hdev->close)
3929 /* Do not allow HCI_AMP devices to register at index 0,
3930 * so the index can be used as the AMP controller ID.
3932 switch (hdev->dev_type) {
3934 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3937 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3946 sprintf(hdev->name, "hci%d", id);
3949 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3951 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3952 WQ_MEM_RECLAIM, 1, hdev->name);
3953 if (!hdev->workqueue) {
3958 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3959 WQ_MEM_RECLAIM, 1, hdev->name);
3960 if (!hdev->req_workqueue) {
3961 destroy_workqueue(hdev->workqueue);
3966 if (!IS_ERR_OR_NULL(bt_debugfs))
3967 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3969 dev_set_name(&hdev->dev, "%s", hdev->name);
3971 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3973 if (IS_ERR(hdev->tfm_aes)) {
3974 BT_ERR("Unable to create crypto context");
3975 error = PTR_ERR(hdev->tfm_aes);
3976 hdev->tfm_aes = NULL;
3980 error = device_add(&hdev->dev);
3984 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3985 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3988 if (rfkill_register(hdev->rfkill) < 0) {
3989 rfkill_destroy(hdev->rfkill);
3990 hdev->rfkill = NULL;
3994 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3995 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3997 set_bit(HCI_SETUP, &hdev->dev_flags);
3998 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4000 if (hdev->dev_type == HCI_BREDR) {
4001 /* Assume BR/EDR support until proven otherwise (such as
4002 * through reading supported features during init.
4004 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4007 write_lock(&hci_dev_list_lock);
4008 list_add(&hdev->list, &hci_dev_list);
4009 write_unlock(&hci_dev_list_lock);
4011 /* Devices that are marked for raw-only usage are unconfigured
4012 * and should not be included in normal operation.
4014 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4015 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4017 hci_notify(hdev, HCI_DEV_REG);
4020 queue_work(hdev->req_workqueue, &hdev->power_on);
4025 crypto_free_blkcipher(hdev->tfm_aes);
4027 destroy_workqueue(hdev->workqueue);
4028 destroy_workqueue(hdev->req_workqueue);
4030 ida_simple_remove(&hci_index_ida, hdev->id);
4034 EXPORT_SYMBOL(hci_register_dev);
4036 /* Unregister HCI device */
4037 void hci_unregister_dev(struct hci_dev *hdev)
4041 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4043 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4047 write_lock(&hci_dev_list_lock);
4048 list_del(&hdev->list);
4049 write_unlock(&hci_dev_list_lock);
4051 hci_dev_do_close(hdev);
4053 for (i = 0; i < NUM_REASSEMBLY; i++)
4054 kfree_skb(hdev->reassembly[i]);
4056 cancel_work_sync(&hdev->power_on);
4058 if (!test_bit(HCI_INIT, &hdev->flags) &&
4059 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4060 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4062 mgmt_index_removed(hdev);
4063 hci_dev_unlock(hdev);
4066 /* mgmt_index_removed should take care of emptying the
4068 BUG_ON(!list_empty(&hdev->mgmt_pending));
4070 hci_notify(hdev, HCI_DEV_UNREG);
4073 rfkill_unregister(hdev->rfkill);
4074 rfkill_destroy(hdev->rfkill);
4078 crypto_free_blkcipher(hdev->tfm_aes);
4080 device_del(&hdev->dev);
4082 debugfs_remove_recursive(hdev->debugfs);
4084 destroy_workqueue(hdev->workqueue);
4085 destroy_workqueue(hdev->req_workqueue);
4088 hci_blacklist_clear(hdev);
4089 hci_uuids_clear(hdev);
4090 hci_link_keys_clear(hdev);
4091 hci_smp_ltks_clear(hdev);
4092 hci_smp_irks_clear(hdev);
4093 hci_remote_oob_data_clear(hdev);
4094 hci_white_list_clear(hdev);
4095 hci_conn_params_clear_all(hdev);
4096 hci_dev_unlock(hdev);
4100 ida_simple_remove(&hci_index_ida, id);
4102 EXPORT_SYMBOL(hci_unregister_dev);
4104 /* Suspend HCI device */
4105 int hci_suspend_dev(struct hci_dev *hdev)
4107 hci_notify(hdev, HCI_DEV_SUSPEND);
4110 EXPORT_SYMBOL(hci_suspend_dev);
4112 /* Resume HCI device */
4113 int hci_resume_dev(struct hci_dev *hdev)
4115 hci_notify(hdev, HCI_DEV_RESUME);
4118 EXPORT_SYMBOL(hci_resume_dev);
4120 /* Receive frame from HCI drivers */
4121 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4123 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4124 && !test_bit(HCI_INIT, &hdev->flags))) {
4130 bt_cb(skb)->incoming = 1;
4133 __net_timestamp(skb);
4135 skb_queue_tail(&hdev->rx_q, skb);
4136 queue_work(hdev->workqueue, &hdev->rx_work);
4140 EXPORT_SYMBOL(hci_recv_frame);
4142 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4143 int count, __u8 index)
4148 struct sk_buff *skb;
4149 struct bt_skb_cb *scb;
4151 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4152 index >= NUM_REASSEMBLY)
4155 skb = hdev->reassembly[index];
4159 case HCI_ACLDATA_PKT:
4160 len = HCI_MAX_FRAME_SIZE;
4161 hlen = HCI_ACL_HDR_SIZE;
4164 len = HCI_MAX_EVENT_SIZE;
4165 hlen = HCI_EVENT_HDR_SIZE;
4167 case HCI_SCODATA_PKT:
4168 len = HCI_MAX_SCO_SIZE;
4169 hlen = HCI_SCO_HDR_SIZE;
4173 skb = bt_skb_alloc(len, GFP_ATOMIC);
4177 scb = (void *) skb->cb;
4179 scb->pkt_type = type;
4181 hdev->reassembly[index] = skb;
4185 scb = (void *) skb->cb;
4186 len = min_t(uint, scb->expect, count);
4188 memcpy(skb_put(skb, len), data, len);
4197 if (skb->len == HCI_EVENT_HDR_SIZE) {
4198 struct hci_event_hdr *h = hci_event_hdr(skb);
4199 scb->expect = h->plen;
4201 if (skb_tailroom(skb) < scb->expect) {
4203 hdev->reassembly[index] = NULL;
4209 case HCI_ACLDATA_PKT:
4210 if (skb->len == HCI_ACL_HDR_SIZE) {
4211 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4212 scb->expect = __le16_to_cpu(h->dlen);
4214 if (skb_tailroom(skb) < scb->expect) {
4216 hdev->reassembly[index] = NULL;
4222 case HCI_SCODATA_PKT:
4223 if (skb->len == HCI_SCO_HDR_SIZE) {
4224 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4225 scb->expect = h->dlen;
4227 if (skb_tailroom(skb) < scb->expect) {
4229 hdev->reassembly[index] = NULL;
4236 if (scb->expect == 0) {
4237 /* Complete frame */
4239 bt_cb(skb)->pkt_type = type;
4240 hci_recv_frame(hdev, skb);
4242 hdev->reassembly[index] = NULL;
4250 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4254 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4258 rem = hci_reassembly(hdev, type, data, count, type - 1);
4262 data += (count - rem);
4268 EXPORT_SYMBOL(hci_recv_fragment);
4270 #define STREAM_REASSEMBLY 0
4272 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4278 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4281 struct { char type; } *pkt;
4283 /* Start of the frame */
4290 type = bt_cb(skb)->pkt_type;
4292 rem = hci_reassembly(hdev, type, data, count,
4297 data += (count - rem);
4303 EXPORT_SYMBOL(hci_recv_stream_fragment);
4305 /* ---- Interface to upper protocols ---- */
4307 int hci_register_cb(struct hci_cb *cb)
4309 BT_DBG("%p name %s", cb, cb->name);
4311 write_lock(&hci_cb_list_lock);
4312 list_add(&cb->list, &hci_cb_list);
4313 write_unlock(&hci_cb_list_lock);
4317 EXPORT_SYMBOL(hci_register_cb);
4319 int hci_unregister_cb(struct hci_cb *cb)
4321 BT_DBG("%p name %s", cb, cb->name);
4323 write_lock(&hci_cb_list_lock);
4324 list_del(&cb->list);
4325 write_unlock(&hci_cb_list_lock);
4329 EXPORT_SYMBOL(hci_unregister_cb);
4331 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4333 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4336 __net_timestamp(skb);
4338 /* Send copy to monitor */
4339 hci_send_to_monitor(hdev, skb);
4341 if (atomic_read(&hdev->promisc)) {
4342 /* Send copy to the sockets */
4343 hci_send_to_sock(hdev, skb);
4346 /* Get rid of skb owner, prior to sending to the driver. */
4349 if (hdev->send(hdev, skb) < 0)
4350 BT_ERR("%s sending frame failed", hdev->name);
4353 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4355 skb_queue_head_init(&req->cmd_q);
4360 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4362 struct hci_dev *hdev = req->hdev;
4363 struct sk_buff *skb;
4364 unsigned long flags;
4366 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4368 /* If an error occured during request building, remove all HCI
4369 * commands queued on the HCI request queue.
4372 skb_queue_purge(&req->cmd_q);
4376 /* Do not allow empty requests */
4377 if (skb_queue_empty(&req->cmd_q))
4380 skb = skb_peek_tail(&req->cmd_q);
4381 bt_cb(skb)->req.complete = complete;
4383 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4384 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4385 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4387 queue_work(hdev->workqueue, &hdev->cmd_work);
4392 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4393 u32 plen, const void *param)
4395 int len = HCI_COMMAND_HDR_SIZE + plen;
4396 struct hci_command_hdr *hdr;
4397 struct sk_buff *skb;
4399 skb = bt_skb_alloc(len, GFP_ATOMIC);
4403 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4404 hdr->opcode = cpu_to_le16(opcode);
4408 memcpy(skb_put(skb, plen), param, plen);
4410 BT_DBG("skb len %d", skb->len);
4412 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4417 /* Send HCI command */
4418 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4421 struct sk_buff *skb;
4423 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4425 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4427 BT_ERR("%s no memory for command", hdev->name);
4431 /* Stand-alone HCI commands must be flaged as
4432 * single-command requests.
4434 bt_cb(skb)->req.start = true;
4436 skb_queue_tail(&hdev->cmd_q, skb);
4437 queue_work(hdev->workqueue, &hdev->cmd_work);
4442 /* Queue a command to an asynchronous HCI request */
4443 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4444 const void *param, u8 event)
4446 struct hci_dev *hdev = req->hdev;
4447 struct sk_buff *skb;
4449 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4451 /* If an error occured during request building, there is no point in
4452 * queueing the HCI command. We can simply return.
4457 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4459 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4460 hdev->name, opcode);
4465 if (skb_queue_empty(&req->cmd_q))
4466 bt_cb(skb)->req.start = true;
4468 bt_cb(skb)->req.event = event;
4470 skb_queue_tail(&req->cmd_q, skb);
4473 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4476 hci_req_add_ev(req, opcode, plen, param, 0);
4479 /* Get data from the previously sent command */
4480 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4482 struct hci_command_hdr *hdr;
4484 if (!hdev->sent_cmd)
4487 hdr = (void *) hdev->sent_cmd->data;
4489 if (hdr->opcode != cpu_to_le16(opcode))
4492 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4494 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4498 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4500 struct hci_acl_hdr *hdr;
4503 skb_push(skb, HCI_ACL_HDR_SIZE);
4504 skb_reset_transport_header(skb);
4505 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4506 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4507 hdr->dlen = cpu_to_le16(len);
4510 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4511 struct sk_buff *skb, __u16 flags)
4513 struct hci_conn *conn = chan->conn;
4514 struct hci_dev *hdev = conn->hdev;
4515 struct sk_buff *list;
4517 skb->len = skb_headlen(skb);
4520 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4522 switch (hdev->dev_type) {
4524 hci_add_acl_hdr(skb, conn->handle, flags);
4527 hci_add_acl_hdr(skb, chan->handle, flags);
4530 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4534 list = skb_shinfo(skb)->frag_list;
4536 /* Non fragmented */
4537 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4539 skb_queue_tail(queue, skb);
4542 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4544 skb_shinfo(skb)->frag_list = NULL;
4546 /* Queue all fragments atomically */
4547 spin_lock(&queue->lock);
4549 __skb_queue_tail(queue, skb);
4551 flags &= ~ACL_START;
4554 skb = list; list = list->next;
4556 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4557 hci_add_acl_hdr(skb, conn->handle, flags);
4559 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4561 __skb_queue_tail(queue, skb);
4564 spin_unlock(&queue->lock);
4568 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4570 struct hci_dev *hdev = chan->conn->hdev;
4572 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4574 hci_queue_acl(chan, &chan->data_q, skb, flags);
4576 queue_work(hdev->workqueue, &hdev->tx_work);
4580 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4582 struct hci_dev *hdev = conn->hdev;
4583 struct hci_sco_hdr hdr;
4585 BT_DBG("%s len %d", hdev->name, skb->len);
4587 hdr.handle = cpu_to_le16(conn->handle);
4588 hdr.dlen = skb->len;
4590 skb_push(skb, HCI_SCO_HDR_SIZE);
4591 skb_reset_transport_header(skb);
4592 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4594 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4596 skb_queue_tail(&conn->data_q, skb);
4597 queue_work(hdev->workqueue, &hdev->tx_work);
4600 /* ---- HCI TX task (outgoing data) ---- */
4602 /* HCI Connection scheduler */
4603 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4606 struct hci_conn_hash *h = &hdev->conn_hash;
4607 struct hci_conn *conn = NULL, *c;
4608 unsigned int num = 0, min = ~0;
4610 /* We don't have to lock device here. Connections are always
4611 * added and removed with TX task disabled. */
4615 list_for_each_entry_rcu(c, &h->list, list) {
4616 if (c->type != type || skb_queue_empty(&c->data_q))
4619 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4624 if (c->sent < min) {
4629 if (hci_conn_num(hdev, type) == num)
4638 switch (conn->type) {
4640 cnt = hdev->acl_cnt;
4644 cnt = hdev->sco_cnt;
4647 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4651 BT_ERR("Unknown link type");
4659 BT_DBG("conn %p quote %d", conn, *quote);
4663 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4665 struct hci_conn_hash *h = &hdev->conn_hash;
4668 BT_ERR("%s link tx timeout", hdev->name);
4672 /* Kill stalled connections */
4673 list_for_each_entry_rcu(c, &h->list, list) {
4674 if (c->type == type && c->sent) {
4675 BT_ERR("%s killing stalled connection %pMR",
4676 hdev->name, &c->dst);
4677 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4684 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4687 struct hci_conn_hash *h = &hdev->conn_hash;
4688 struct hci_chan *chan = NULL;
4689 unsigned int num = 0, min = ~0, cur_prio = 0;
4690 struct hci_conn *conn;
4691 int cnt, q, conn_num = 0;
4693 BT_DBG("%s", hdev->name);
4697 list_for_each_entry_rcu(conn, &h->list, list) {
4698 struct hci_chan *tmp;
4700 if (conn->type != type)
4703 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4708 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4709 struct sk_buff *skb;
4711 if (skb_queue_empty(&tmp->data_q))
4714 skb = skb_peek(&tmp->data_q);
4715 if (skb->priority < cur_prio)
4718 if (skb->priority > cur_prio) {
4721 cur_prio = skb->priority;
4726 if (conn->sent < min) {
4732 if (hci_conn_num(hdev, type) == conn_num)
4741 switch (chan->conn->type) {
4743 cnt = hdev->acl_cnt;
4746 cnt = hdev->block_cnt;
4750 cnt = hdev->sco_cnt;
4753 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4757 BT_ERR("Unknown link type");
4762 BT_DBG("chan %p quote %d", chan, *quote);
4766 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4768 struct hci_conn_hash *h = &hdev->conn_hash;
4769 struct hci_conn *conn;
4772 BT_DBG("%s", hdev->name);
4776 list_for_each_entry_rcu(conn, &h->list, list) {
4777 struct hci_chan *chan;
4779 if (conn->type != type)
4782 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4787 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4788 struct sk_buff *skb;
4795 if (skb_queue_empty(&chan->data_q))
4798 skb = skb_peek(&chan->data_q);
4799 if (skb->priority >= HCI_PRIO_MAX - 1)
4802 skb->priority = HCI_PRIO_MAX - 1;
4804 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4808 if (hci_conn_num(hdev, type) == num)
4816 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4818 /* Calculate count of blocks used by this packet */
4819 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4822 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4824 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4825 /* ACL tx timeout must be longer than maximum
4826 * link supervision timeout (40.9 seconds) */
4827 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4828 HCI_ACL_TX_TIMEOUT))
4829 hci_link_tx_to(hdev, ACL_LINK);
4833 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4835 unsigned int cnt = hdev->acl_cnt;
4836 struct hci_chan *chan;
4837 struct sk_buff *skb;
4840 __check_timeout(hdev, cnt);
4842 while (hdev->acl_cnt &&
4843 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4844 u32 priority = (skb_peek(&chan->data_q))->priority;
4845 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4846 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4847 skb->len, skb->priority);
4849 /* Stop if priority has changed */
4850 if (skb->priority < priority)
4853 skb = skb_dequeue(&chan->data_q);
4855 hci_conn_enter_active_mode(chan->conn,
4856 bt_cb(skb)->force_active);
4858 hci_send_frame(hdev, skb);
4859 hdev->acl_last_tx = jiffies;
4867 if (cnt != hdev->acl_cnt)
4868 hci_prio_recalculate(hdev, ACL_LINK);
4871 static void hci_sched_acl_blk(struct hci_dev *hdev)
4873 unsigned int cnt = hdev->block_cnt;
4874 struct hci_chan *chan;
4875 struct sk_buff *skb;
4879 __check_timeout(hdev, cnt);
4881 BT_DBG("%s", hdev->name);
4883 if (hdev->dev_type == HCI_AMP)
4888 while (hdev->block_cnt > 0 &&
4889 (chan = hci_chan_sent(hdev, type, "e))) {
4890 u32 priority = (skb_peek(&chan->data_q))->priority;
4891 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4894 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4895 skb->len, skb->priority);
4897 /* Stop if priority has changed */
4898 if (skb->priority < priority)
4901 skb = skb_dequeue(&chan->data_q);
4903 blocks = __get_blocks(hdev, skb);
4904 if (blocks > hdev->block_cnt)
4907 hci_conn_enter_active_mode(chan->conn,
4908 bt_cb(skb)->force_active);
4910 hci_send_frame(hdev, skb);
4911 hdev->acl_last_tx = jiffies;
4913 hdev->block_cnt -= blocks;
4916 chan->sent += blocks;
4917 chan->conn->sent += blocks;
4921 if (cnt != hdev->block_cnt)
4922 hci_prio_recalculate(hdev, type);
4925 static void hci_sched_acl(struct hci_dev *hdev)
4927 BT_DBG("%s", hdev->name);
4929 /* No ACL link over BR/EDR controller */
4930 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4933 /* No AMP link over AMP controller */
4934 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4937 switch (hdev->flow_ctl_mode) {
4938 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4939 hci_sched_acl_pkt(hdev);
4942 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4943 hci_sched_acl_blk(hdev);
4949 static void hci_sched_sco(struct hci_dev *hdev)
4951 struct hci_conn *conn;
4952 struct sk_buff *skb;
4955 BT_DBG("%s", hdev->name);
4957 if (!hci_conn_num(hdev, SCO_LINK))
4960 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
4961 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4962 BT_DBG("skb %p len %d", skb, skb->len);
4963 hci_send_frame(hdev, skb);
4966 if (conn->sent == ~0)
4972 static void hci_sched_esco(struct hci_dev *hdev)
4974 struct hci_conn *conn;
4975 struct sk_buff *skb;
4978 BT_DBG("%s", hdev->name);
4980 if (!hci_conn_num(hdev, ESCO_LINK))
4983 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4985 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4986 BT_DBG("skb %p len %d", skb, skb->len);
4987 hci_send_frame(hdev, skb);
4990 if (conn->sent == ~0)
4996 static void hci_sched_le(struct hci_dev *hdev)
4998 struct hci_chan *chan;
4999 struct sk_buff *skb;
5000 int quote, cnt, tmp;
5002 BT_DBG("%s", hdev->name);
5004 if (!hci_conn_num(hdev, LE_LINK))
5007 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5008 /* LE tx timeout must be longer than maximum
5009 * link supervision timeout (40.9 seconds) */
5010 if (!hdev->le_cnt && hdev->le_pkts &&
5011 time_after(jiffies, hdev->le_last_tx + HZ * 45))
5012 hci_link_tx_to(hdev, LE_LINK);
5015 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5017 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
5018 u32 priority = (skb_peek(&chan->data_q))->priority;
5019 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5020 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5021 skb->len, skb->priority);
5023 /* Stop if priority has changed */
5024 if (skb->priority < priority)
5027 skb = skb_dequeue(&chan->data_q);
5029 hci_send_frame(hdev, skb);
5030 hdev->le_last_tx = jiffies;
5041 hdev->acl_cnt = cnt;
5044 hci_prio_recalculate(hdev, LE_LINK);
5047 static void hci_tx_work(struct work_struct *work)
5049 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5050 struct sk_buff *skb;
5052 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5053 hdev->sco_cnt, hdev->le_cnt);
5055 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5056 /* Schedule queues and send stuff to HCI driver */
5057 hci_sched_acl(hdev);
5058 hci_sched_sco(hdev);
5059 hci_sched_esco(hdev);
5063 /* Send next queued raw (unknown type) packet */
5064 while ((skb = skb_dequeue(&hdev->raw_q)))
5065 hci_send_frame(hdev, skb);
5068 /* ----- HCI RX task (incoming data processing) ----- */
5070 /* ACL data packet */
5071 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5073 struct hci_acl_hdr *hdr = (void *) skb->data;
5074 struct hci_conn *conn;
5075 __u16 handle, flags;
5077 skb_pull(skb, HCI_ACL_HDR_SIZE);
5079 handle = __le16_to_cpu(hdr->handle);
5080 flags = hci_flags(handle);
5081 handle = hci_handle(handle);
5083 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5086 hdev->stat.acl_rx++;
5089 conn = hci_conn_hash_lookup_handle(hdev, handle);
5090 hci_dev_unlock(hdev);
5093 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5095 /* Send to upper protocol */
5096 l2cap_recv_acldata(conn, skb, flags);
5099 BT_ERR("%s ACL packet for unknown connection handle %d",
5100 hdev->name, handle);
5106 /* SCO data packet */
5107 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5109 struct hci_sco_hdr *hdr = (void *) skb->data;
5110 struct hci_conn *conn;
5113 skb_pull(skb, HCI_SCO_HDR_SIZE);
5115 handle = __le16_to_cpu(hdr->handle);
5117 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5119 hdev->stat.sco_rx++;
5122 conn = hci_conn_hash_lookup_handle(hdev, handle);
5123 hci_dev_unlock(hdev);
5126 /* Send to upper protocol */
5127 sco_recv_scodata(conn, skb);
5130 BT_ERR("%s SCO packet for unknown connection handle %d",
5131 hdev->name, handle);
5137 static bool hci_req_is_complete(struct hci_dev *hdev)
5139 struct sk_buff *skb;
5141 skb = skb_peek(&hdev->cmd_q);
5145 return bt_cb(skb)->req.start;
5148 static void hci_resend_last(struct hci_dev *hdev)
5150 struct hci_command_hdr *sent;
5151 struct sk_buff *skb;
5154 if (!hdev->sent_cmd)
5157 sent = (void *) hdev->sent_cmd->data;
5158 opcode = __le16_to_cpu(sent->opcode);
5159 if (opcode == HCI_OP_RESET)
5162 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5166 skb_queue_head(&hdev->cmd_q, skb);
5167 queue_work(hdev->workqueue, &hdev->cmd_work);
5170 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5172 hci_req_complete_t req_complete = NULL;
5173 struct sk_buff *skb;
5174 unsigned long flags;
5176 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5178 /* If the completed command doesn't match the last one that was
5179 * sent we need to do special handling of it.
5181 if (!hci_sent_cmd_data(hdev, opcode)) {
5182 /* Some CSR based controllers generate a spontaneous
5183 * reset complete event during init and any pending
5184 * command will never be completed. In such a case we
5185 * need to resend whatever was the last sent
5188 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5189 hci_resend_last(hdev);
5194 /* If the command succeeded and there's still more commands in
5195 * this request the request is not yet complete.
5197 if (!status && !hci_req_is_complete(hdev))
5200 /* If this was the last command in a request the complete
5201 * callback would be found in hdev->sent_cmd instead of the
5202 * command queue (hdev->cmd_q).
5204 if (hdev->sent_cmd) {
5205 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5208 /* We must set the complete callback to NULL to
5209 * avoid calling the callback more than once if
5210 * this function gets called again.
5212 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5218 /* Remove all pending commands belonging to this request */
5219 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5220 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5221 if (bt_cb(skb)->req.start) {
5222 __skb_queue_head(&hdev->cmd_q, skb);
5226 req_complete = bt_cb(skb)->req.complete;
5229 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5233 req_complete(hdev, status);
5236 static void hci_rx_work(struct work_struct *work)
5238 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5239 struct sk_buff *skb;
5241 BT_DBG("%s", hdev->name);
5243 while ((skb = skb_dequeue(&hdev->rx_q))) {
5244 /* Send copy to monitor */
5245 hci_send_to_monitor(hdev, skb);
5247 if (atomic_read(&hdev->promisc)) {
5248 /* Send copy to the sockets */
5249 hci_send_to_sock(hdev, skb);
5252 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5257 if (test_bit(HCI_INIT, &hdev->flags)) {
5258 /* Don't process data packets in this states. */
5259 switch (bt_cb(skb)->pkt_type) {
5260 case HCI_ACLDATA_PKT:
5261 case HCI_SCODATA_PKT:
5268 switch (bt_cb(skb)->pkt_type) {
5270 BT_DBG("%s Event packet", hdev->name);
5271 hci_event_packet(hdev, skb);
5274 case HCI_ACLDATA_PKT:
5275 BT_DBG("%s ACL data packet", hdev->name);
5276 hci_acldata_packet(hdev, skb);
5279 case HCI_SCODATA_PKT:
5280 BT_DBG("%s SCO data packet", hdev->name);
5281 hci_scodata_packet(hdev, skb);
5291 static void hci_cmd_work(struct work_struct *work)
5293 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5294 struct sk_buff *skb;
5296 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5297 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5299 /* Send queued commands */
5300 if (atomic_read(&hdev->cmd_cnt)) {
5301 skb = skb_dequeue(&hdev->cmd_q);
5305 kfree_skb(hdev->sent_cmd);
5307 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5308 if (hdev->sent_cmd) {
5309 atomic_dec(&hdev->cmd_cnt);
5310 hci_send_frame(hdev, skb);
5311 if (test_bit(HCI_RESET, &hdev->flags))
5312 cancel_delayed_work(&hdev->cmd_timer);
5314 schedule_delayed_work(&hdev->cmd_timer,
5317 skb_queue_head(&hdev->cmd_q, skb);
5318 queue_work(hdev->workqueue, &hdev->cmd_work);
5323 void hci_req_add_le_scan_disable(struct hci_request *req)
5325 struct hci_cp_le_set_scan_enable cp;
5327 memset(&cp, 0, sizeof(cp));
5328 cp.enable = LE_SCAN_DISABLE;
5329 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5332 void hci_req_add_le_passive_scan(struct hci_request *req)
5334 struct hci_cp_le_set_scan_param param_cp;
5335 struct hci_cp_le_set_scan_enable enable_cp;
5336 struct hci_dev *hdev = req->hdev;
5339 /* Set require_privacy to false since no SCAN_REQ are send
5340 * during passive scanning. Not using an unresolvable address
5341 * here is important so that peer devices using direct
5342 * advertising with our address will be correctly reported
5343 * by the controller.
5345 if (hci_update_random_address(req, false, &own_addr_type))
5348 memset(¶m_cp, 0, sizeof(param_cp));
5349 param_cp.type = LE_SCAN_PASSIVE;
5350 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5351 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5352 param_cp.own_address_type = own_addr_type;
5353 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5356 memset(&enable_cp, 0, sizeof(enable_cp));
5357 enable_cp.enable = LE_SCAN_ENABLE;
5358 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5359 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5363 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5366 BT_DBG("HCI request failed to update background scanning: "
5367 "status 0x%2.2x", status);
5370 /* This function controls the background scanning based on hdev->pend_le_conns
5371 * list. If there are pending LE connection we start the background scanning,
5372 * otherwise we stop it.
5374 * This function requires the caller holds hdev->lock.
5376 void hci_update_background_scan(struct hci_dev *hdev)
5378 struct hci_request req;
5379 struct hci_conn *conn;
5382 if (!test_bit(HCI_UP, &hdev->flags) ||
5383 test_bit(HCI_INIT, &hdev->flags) ||
5384 test_bit(HCI_SETUP, &hdev->dev_flags) ||
5385 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5386 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5387 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5390 hci_req_init(&req, hdev);
5392 if (list_empty(&hdev->pend_le_conns) &&
5393 list_empty(&hdev->pend_le_reports)) {
5394 /* If there is no pending LE connections or devices
5395 * to be scanned for, we should stop the background
5399 /* If controller is not scanning we are done. */
5400 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5403 hci_req_add_le_scan_disable(&req);
5405 BT_DBG("%s stopping background scanning", hdev->name);
5407 /* If there is at least one pending LE connection, we should
5408 * keep the background scan running.
5411 /* If controller is connecting, we should not start scanning
5412 * since some controllers are not able to scan and connect at
5415 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5419 /* If controller is currently scanning, we stop it to ensure we
5420 * don't miss any advertising (due to duplicates filter).
5422 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5423 hci_req_add_le_scan_disable(&req);
5425 hci_req_add_le_passive_scan(&req);
5427 BT_DBG("%s starting background scanning", hdev->name);
5430 err = hci_req_run(&req, update_background_scan_complete);
5432 BT_ERR("Failed to run HCI request: err %d", err);