2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
57 /* ---- HCI notifications ---- */
59 static void hci_notify(struct hci_dev *hdev, int event)
61 hci_sock_dev_event(hdev, event);
64 /* ---- HCI debugfs entries ---- */
66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
69 struct hci_dev *hdev = file->private_data;
72 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
81 struct hci_dev *hdev = file->private_data;
84 size_t buf_size = min(count, (sizeof(buf)-1));
88 if (!test_bit(HCI_UP, &hdev->flags))
91 if (copy_from_user(buf, user_buf, buf_size))
95 if (strtobool(buf, &enable))
98 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
103 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
106 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
108 hci_req_unlock(hdev);
113 err = -bt_to_errno(skb->data[0]);
119 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
124 static const struct file_operations dut_mode_fops = {
126 .read = dut_mode_read,
127 .write = dut_mode_write,
128 .llseek = default_llseek,
131 static int features_show(struct seq_file *f, void *ptr)
133 struct hci_dev *hdev = f->private;
137 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
138 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
139 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140 hdev->features[p][0], hdev->features[p][1],
141 hdev->features[p][2], hdev->features[p][3],
142 hdev->features[p][4], hdev->features[p][5],
143 hdev->features[p][6], hdev->features[p][7]);
145 if (lmp_le_capable(hdev))
146 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148 hdev->le_features[0], hdev->le_features[1],
149 hdev->le_features[2], hdev->le_features[3],
150 hdev->le_features[4], hdev->le_features[5],
151 hdev->le_features[6], hdev->le_features[7]);
152 hci_dev_unlock(hdev);
157 static int features_open(struct inode *inode, struct file *file)
159 return single_open(file, features_show, inode->i_private);
162 static const struct file_operations features_fops = {
163 .open = features_open,
166 .release = single_release,
169 static int blacklist_show(struct seq_file *f, void *p)
171 struct hci_dev *hdev = f->private;
172 struct bdaddr_list *b;
175 list_for_each_entry(b, &hdev->blacklist, list)
176 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
177 hci_dev_unlock(hdev);
182 static int blacklist_open(struct inode *inode, struct file *file)
184 return single_open(file, blacklist_show, inode->i_private);
187 static const struct file_operations blacklist_fops = {
188 .open = blacklist_open,
191 .release = single_release,
194 static int whitelist_show(struct seq_file *f, void *p)
196 struct hci_dev *hdev = f->private;
197 struct bdaddr_list *b;
200 list_for_each_entry(b, &hdev->whitelist, list)
201 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
202 hci_dev_unlock(hdev);
207 static int whitelist_open(struct inode *inode, struct file *file)
209 return single_open(file, whitelist_show, inode->i_private);
212 static const struct file_operations whitelist_fops = {
213 .open = whitelist_open,
216 .release = single_release,
219 static int uuids_show(struct seq_file *f, void *p)
221 struct hci_dev *hdev = f->private;
222 struct bt_uuid *uuid;
225 list_for_each_entry(uuid, &hdev->uuids, list) {
228 /* The Bluetooth UUID values are stored in big endian,
229 * but with reversed byte order. So convert them into
230 * the right order for the %pUb modifier.
232 for (i = 0; i < 16; i++)
233 val[i] = uuid->uuid[15 - i];
235 seq_printf(f, "%pUb\n", val);
237 hci_dev_unlock(hdev);
242 static int uuids_open(struct inode *inode, struct file *file)
244 return single_open(file, uuids_show, inode->i_private);
247 static const struct file_operations uuids_fops = {
251 .release = single_release,
254 static int inquiry_cache_show(struct seq_file *f, void *p)
256 struct hci_dev *hdev = f->private;
257 struct discovery_state *cache = &hdev->discovery;
258 struct inquiry_entry *e;
262 list_for_each_entry(e, &cache->all, all) {
263 struct inquiry_data *data = &e->data;
264 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
266 data->pscan_rep_mode, data->pscan_period_mode,
267 data->pscan_mode, data->dev_class[2],
268 data->dev_class[1], data->dev_class[0],
269 __le16_to_cpu(data->clock_offset),
270 data->rssi, data->ssp_mode, e->timestamp);
273 hci_dev_unlock(hdev);
278 static int inquiry_cache_open(struct inode *inode, struct file *file)
280 return single_open(file, inquiry_cache_show, inode->i_private);
283 static const struct file_operations inquiry_cache_fops = {
284 .open = inquiry_cache_open,
287 .release = single_release,
290 static int link_keys_show(struct seq_file *f, void *ptr)
292 struct hci_dev *hdev = f->private;
293 struct list_head *p, *n;
296 list_for_each_safe(p, n, &hdev->link_keys) {
297 struct link_key *key = list_entry(p, struct link_key, list);
298 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
299 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
301 hci_dev_unlock(hdev);
306 static int link_keys_open(struct inode *inode, struct file *file)
308 return single_open(file, link_keys_show, inode->i_private);
311 static const struct file_operations link_keys_fops = {
312 .open = link_keys_open,
315 .release = single_release,
318 static int dev_class_show(struct seq_file *f, void *ptr)
320 struct hci_dev *hdev = f->private;
323 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
324 hdev->dev_class[1], hdev->dev_class[0]);
325 hci_dev_unlock(hdev);
330 static int dev_class_open(struct inode *inode, struct file *file)
332 return single_open(file, dev_class_show, inode->i_private);
335 static const struct file_operations dev_class_fops = {
336 .open = dev_class_open,
339 .release = single_release,
342 static int voice_setting_get(void *data, u64 *val)
344 struct hci_dev *hdev = data;
347 *val = hdev->voice_setting;
348 hci_dev_unlock(hdev);
353 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
354 NULL, "0x%4.4llx\n");
356 static int auto_accept_delay_set(void *data, u64 val)
358 struct hci_dev *hdev = data;
361 hdev->auto_accept_delay = val;
362 hci_dev_unlock(hdev);
367 static int auto_accept_delay_get(void *data, u64 *val)
369 struct hci_dev *hdev = data;
372 *val = hdev->auto_accept_delay;
373 hci_dev_unlock(hdev);
378 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
379 auto_accept_delay_set, "%llu\n");
381 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
382 size_t count, loff_t *ppos)
384 struct hci_dev *hdev = file->private_data;
387 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
390 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
393 static ssize_t force_sc_support_write(struct file *file,
394 const char __user *user_buf,
395 size_t count, loff_t *ppos)
397 struct hci_dev *hdev = file->private_data;
399 size_t buf_size = min(count, (sizeof(buf)-1));
402 if (test_bit(HCI_UP, &hdev->flags))
405 if (copy_from_user(buf, user_buf, buf_size))
408 buf[buf_size] = '\0';
409 if (strtobool(buf, &enable))
412 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
415 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
420 static const struct file_operations force_sc_support_fops = {
422 .read = force_sc_support_read,
423 .write = force_sc_support_write,
424 .llseek = default_llseek,
427 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
428 size_t count, loff_t *ppos)
430 struct hci_dev *hdev = file->private_data;
433 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
436 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
439 static const struct file_operations sc_only_mode_fops = {
441 .read = sc_only_mode_read,
442 .llseek = default_llseek,
445 static int idle_timeout_set(void *data, u64 val)
447 struct hci_dev *hdev = data;
449 if (val != 0 && (val < 500 || val > 3600000))
453 hdev->idle_timeout = val;
454 hci_dev_unlock(hdev);
459 static int idle_timeout_get(void *data, u64 *val)
461 struct hci_dev *hdev = data;
464 *val = hdev->idle_timeout;
465 hci_dev_unlock(hdev);
470 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
471 idle_timeout_set, "%llu\n");
473 static int rpa_timeout_set(void *data, u64 val)
475 struct hci_dev *hdev = data;
477 /* Require the RPA timeout to be at least 30 seconds and at most
480 if (val < 30 || val > (60 * 60 * 24))
484 hdev->rpa_timeout = val;
485 hci_dev_unlock(hdev);
490 static int rpa_timeout_get(void *data, u64 *val)
492 struct hci_dev *hdev = data;
495 *val = hdev->rpa_timeout;
496 hci_dev_unlock(hdev);
501 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
502 rpa_timeout_set, "%llu\n");
504 static int sniff_min_interval_set(void *data, u64 val)
506 struct hci_dev *hdev = data;
508 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
512 hdev->sniff_min_interval = val;
513 hci_dev_unlock(hdev);
518 static int sniff_min_interval_get(void *data, u64 *val)
520 struct hci_dev *hdev = data;
523 *val = hdev->sniff_min_interval;
524 hci_dev_unlock(hdev);
529 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
530 sniff_min_interval_set, "%llu\n");
532 static int sniff_max_interval_set(void *data, u64 val)
534 struct hci_dev *hdev = data;
536 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
540 hdev->sniff_max_interval = val;
541 hci_dev_unlock(hdev);
546 static int sniff_max_interval_get(void *data, u64 *val)
548 struct hci_dev *hdev = data;
551 *val = hdev->sniff_max_interval;
552 hci_dev_unlock(hdev);
557 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
558 sniff_max_interval_set, "%llu\n");
560 static int conn_info_min_age_set(void *data, u64 val)
562 struct hci_dev *hdev = data;
564 if (val == 0 || val > hdev->conn_info_max_age)
568 hdev->conn_info_min_age = val;
569 hci_dev_unlock(hdev);
574 static int conn_info_min_age_get(void *data, u64 *val)
576 struct hci_dev *hdev = data;
579 *val = hdev->conn_info_min_age;
580 hci_dev_unlock(hdev);
585 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
586 conn_info_min_age_set, "%llu\n");
588 static int conn_info_max_age_set(void *data, u64 val)
590 struct hci_dev *hdev = data;
592 if (val == 0 || val < hdev->conn_info_min_age)
596 hdev->conn_info_max_age = val;
597 hci_dev_unlock(hdev);
602 static int conn_info_max_age_get(void *data, u64 *val)
604 struct hci_dev *hdev = data;
607 *val = hdev->conn_info_max_age;
608 hci_dev_unlock(hdev);
613 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
614 conn_info_max_age_set, "%llu\n");
616 static int identity_show(struct seq_file *f, void *p)
618 struct hci_dev *hdev = f->private;
624 hci_copy_identity_address(hdev, &addr, &addr_type);
626 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
627 16, hdev->irk, &hdev->rpa);
629 hci_dev_unlock(hdev);
634 static int identity_open(struct inode *inode, struct file *file)
636 return single_open(file, identity_show, inode->i_private);
639 static const struct file_operations identity_fops = {
640 .open = identity_open,
643 .release = single_release,
646 static int random_address_show(struct seq_file *f, void *p)
648 struct hci_dev *hdev = f->private;
651 seq_printf(f, "%pMR\n", &hdev->random_addr);
652 hci_dev_unlock(hdev);
657 static int random_address_open(struct inode *inode, struct file *file)
659 return single_open(file, random_address_show, inode->i_private);
662 static const struct file_operations random_address_fops = {
663 .open = random_address_open,
666 .release = single_release,
669 static int static_address_show(struct seq_file *f, void *p)
671 struct hci_dev *hdev = f->private;
674 seq_printf(f, "%pMR\n", &hdev->static_addr);
675 hci_dev_unlock(hdev);
680 static int static_address_open(struct inode *inode, struct file *file)
682 return single_open(file, static_address_show, inode->i_private);
685 static const struct file_operations static_address_fops = {
686 .open = static_address_open,
689 .release = single_release,
692 static ssize_t force_static_address_read(struct file *file,
693 char __user *user_buf,
694 size_t count, loff_t *ppos)
696 struct hci_dev *hdev = file->private_data;
699 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
702 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
705 static ssize_t force_static_address_write(struct file *file,
706 const char __user *user_buf,
707 size_t count, loff_t *ppos)
709 struct hci_dev *hdev = file->private_data;
711 size_t buf_size = min(count, (sizeof(buf)-1));
714 if (test_bit(HCI_UP, &hdev->flags))
717 if (copy_from_user(buf, user_buf, buf_size))
720 buf[buf_size] = '\0';
721 if (strtobool(buf, &enable))
724 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
727 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
732 static const struct file_operations force_static_address_fops = {
734 .read = force_static_address_read,
735 .write = force_static_address_write,
736 .llseek = default_llseek,
739 static int white_list_show(struct seq_file *f, void *ptr)
741 struct hci_dev *hdev = f->private;
742 struct bdaddr_list *b;
745 list_for_each_entry(b, &hdev->le_white_list, list)
746 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
747 hci_dev_unlock(hdev);
752 static int white_list_open(struct inode *inode, struct file *file)
754 return single_open(file, white_list_show, inode->i_private);
757 static const struct file_operations white_list_fops = {
758 .open = white_list_open,
761 .release = single_release,
764 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
766 struct hci_dev *hdev = f->private;
767 struct list_head *p, *n;
770 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
771 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
772 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
773 &irk->bdaddr, irk->addr_type,
774 16, irk->val, &irk->rpa);
776 hci_dev_unlock(hdev);
781 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
783 return single_open(file, identity_resolving_keys_show,
787 static const struct file_operations identity_resolving_keys_fops = {
788 .open = identity_resolving_keys_open,
791 .release = single_release,
794 static int long_term_keys_show(struct seq_file *f, void *ptr)
796 struct hci_dev *hdev = f->private;
797 struct list_head *p, *n;
800 list_for_each_safe(p, n, &hdev->long_term_keys) {
801 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
802 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
803 <k->bdaddr, ltk->bdaddr_type, ltk->authenticated,
804 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
805 __le64_to_cpu(ltk->rand), 16, ltk->val);
807 hci_dev_unlock(hdev);
812 static int long_term_keys_open(struct inode *inode, struct file *file)
814 return single_open(file, long_term_keys_show, inode->i_private);
817 static const struct file_operations long_term_keys_fops = {
818 .open = long_term_keys_open,
821 .release = single_release,
824 static int conn_min_interval_set(void *data, u64 val)
826 struct hci_dev *hdev = data;
828 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
832 hdev->le_conn_min_interval = val;
833 hci_dev_unlock(hdev);
838 static int conn_min_interval_get(void *data, u64 *val)
840 struct hci_dev *hdev = data;
843 *val = hdev->le_conn_min_interval;
844 hci_dev_unlock(hdev);
849 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
850 conn_min_interval_set, "%llu\n");
852 static int conn_max_interval_set(void *data, u64 val)
854 struct hci_dev *hdev = data;
856 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
860 hdev->le_conn_max_interval = val;
861 hci_dev_unlock(hdev);
866 static int conn_max_interval_get(void *data, u64 *val)
868 struct hci_dev *hdev = data;
871 *val = hdev->le_conn_max_interval;
872 hci_dev_unlock(hdev);
877 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
878 conn_max_interval_set, "%llu\n");
880 static int conn_latency_set(void *data, u64 val)
882 struct hci_dev *hdev = data;
888 hdev->le_conn_latency = val;
889 hci_dev_unlock(hdev);
894 static int conn_latency_get(void *data, u64 *val)
896 struct hci_dev *hdev = data;
899 *val = hdev->le_conn_latency;
900 hci_dev_unlock(hdev);
905 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
906 conn_latency_set, "%llu\n");
908 static int supervision_timeout_set(void *data, u64 val)
910 struct hci_dev *hdev = data;
912 if (val < 0x000a || val > 0x0c80)
916 hdev->le_supv_timeout = val;
917 hci_dev_unlock(hdev);
922 static int supervision_timeout_get(void *data, u64 *val)
924 struct hci_dev *hdev = data;
927 *val = hdev->le_supv_timeout;
928 hci_dev_unlock(hdev);
933 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
934 supervision_timeout_set, "%llu\n");
936 static int adv_channel_map_set(void *data, u64 val)
938 struct hci_dev *hdev = data;
940 if (val < 0x01 || val > 0x07)
944 hdev->le_adv_channel_map = val;
945 hci_dev_unlock(hdev);
950 static int adv_channel_map_get(void *data, u64 *val)
952 struct hci_dev *hdev = data;
955 *val = hdev->le_adv_channel_map;
956 hci_dev_unlock(hdev);
961 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
962 adv_channel_map_set, "%llu\n");
964 static int device_list_show(struct seq_file *f, void *ptr)
966 struct hci_dev *hdev = f->private;
967 struct hci_conn_params *p;
970 list_for_each_entry(p, &hdev->le_conn_params, list) {
971 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
974 hci_dev_unlock(hdev);
979 static int device_list_open(struct inode *inode, struct file *file)
981 return single_open(file, device_list_show, inode->i_private);
984 static const struct file_operations device_list_fops = {
985 .open = device_list_open,
988 .release = single_release,
991 /* ---- HCI requests ---- */
993 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
995 BT_DBG("%s result 0x%2.2x", hdev->name, result);
997 if (hdev->req_status == HCI_REQ_PEND) {
998 hdev->req_result = result;
999 hdev->req_status = HCI_REQ_DONE;
1000 wake_up_interruptible(&hdev->req_wait_q);
1004 static void hci_req_cancel(struct hci_dev *hdev, int err)
1006 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1008 if (hdev->req_status == HCI_REQ_PEND) {
1009 hdev->req_result = err;
1010 hdev->req_status = HCI_REQ_CANCELED;
1011 wake_up_interruptible(&hdev->req_wait_q);
1015 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1018 struct hci_ev_cmd_complete *ev;
1019 struct hci_event_hdr *hdr;
1020 struct sk_buff *skb;
1024 skb = hdev->recv_evt;
1025 hdev->recv_evt = NULL;
1027 hci_dev_unlock(hdev);
1030 return ERR_PTR(-ENODATA);
1032 if (skb->len < sizeof(*hdr)) {
1033 BT_ERR("Too short HCI event");
1037 hdr = (void *) skb->data;
1038 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1041 if (hdr->evt != event)
1046 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1047 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1051 if (skb->len < sizeof(*ev)) {
1052 BT_ERR("Too short cmd_complete event");
1056 ev = (void *) skb->data;
1057 skb_pull(skb, sizeof(*ev));
1059 if (opcode == __le16_to_cpu(ev->opcode))
1062 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1063 __le16_to_cpu(ev->opcode));
1067 return ERR_PTR(-ENODATA);
1070 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1071 const void *param, u8 event, u32 timeout)
1073 DECLARE_WAITQUEUE(wait, current);
1074 struct hci_request req;
1077 BT_DBG("%s", hdev->name);
1079 hci_req_init(&req, hdev);
1081 hci_req_add_ev(&req, opcode, plen, param, event);
1083 hdev->req_status = HCI_REQ_PEND;
1085 err = hci_req_run(&req, hci_req_sync_complete);
1087 return ERR_PTR(err);
1089 add_wait_queue(&hdev->req_wait_q, &wait);
1090 set_current_state(TASK_INTERRUPTIBLE);
1092 schedule_timeout(timeout);
1094 remove_wait_queue(&hdev->req_wait_q, &wait);
1096 if (signal_pending(current))
1097 return ERR_PTR(-EINTR);
1099 switch (hdev->req_status) {
1101 err = -bt_to_errno(hdev->req_result);
1104 case HCI_REQ_CANCELED:
1105 err = -hdev->req_result;
1113 hdev->req_status = hdev->req_result = 0;
1115 BT_DBG("%s end: err %d", hdev->name, err);
1118 return ERR_PTR(err);
1120 return hci_get_cmd_complete(hdev, opcode, event);
1122 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1124 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1125 const void *param, u32 timeout)
1127 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1129 EXPORT_SYMBOL(__hci_cmd_sync);
1131 /* Execute request and wait for completion. */
1132 static int __hci_req_sync(struct hci_dev *hdev,
1133 void (*func)(struct hci_request *req,
1135 unsigned long opt, __u32 timeout)
1137 struct hci_request req;
1138 DECLARE_WAITQUEUE(wait, current);
1141 BT_DBG("%s start", hdev->name);
1143 hci_req_init(&req, hdev);
1145 hdev->req_status = HCI_REQ_PEND;
1149 err = hci_req_run(&req, hci_req_sync_complete);
1151 hdev->req_status = 0;
1153 /* ENODATA means the HCI request command queue is empty.
1154 * This can happen when a request with conditionals doesn't
1155 * trigger any commands to be sent. This is normal behavior
1156 * and should not trigger an error return.
1158 if (err == -ENODATA)
1164 add_wait_queue(&hdev->req_wait_q, &wait);
1165 set_current_state(TASK_INTERRUPTIBLE);
1167 schedule_timeout(timeout);
1169 remove_wait_queue(&hdev->req_wait_q, &wait);
1171 if (signal_pending(current))
1174 switch (hdev->req_status) {
1176 err = -bt_to_errno(hdev->req_result);
1179 case HCI_REQ_CANCELED:
1180 err = -hdev->req_result;
1188 hdev->req_status = hdev->req_result = 0;
1190 BT_DBG("%s end: err %d", hdev->name, err);
1195 static int hci_req_sync(struct hci_dev *hdev,
1196 void (*req)(struct hci_request *req,
1198 unsigned long opt, __u32 timeout)
1202 if (!test_bit(HCI_UP, &hdev->flags))
1205 /* Serialize all requests */
1207 ret = __hci_req_sync(hdev, req, opt, timeout);
1208 hci_req_unlock(hdev);
1213 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1215 BT_DBG("%s %ld", req->hdev->name, opt);
1218 set_bit(HCI_RESET, &req->hdev->flags);
1219 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1222 static void bredr_init(struct hci_request *req)
1224 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1226 /* Read Local Supported Features */
1227 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1229 /* Read Local Version */
1230 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1232 /* Read BD Address */
1233 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1236 static void amp_init(struct hci_request *req)
1238 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1240 /* Read Local Version */
1241 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1243 /* Read Local Supported Commands */
1244 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1246 /* Read Local Supported Features */
1247 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1249 /* Read Local AMP Info */
1250 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1252 /* Read Data Blk size */
1253 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1255 /* Read Flow Control Mode */
1256 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1258 /* Read Location Data */
1259 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1262 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1264 struct hci_dev *hdev = req->hdev;
1266 BT_DBG("%s %ld", hdev->name, opt);
1269 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1270 hci_reset_req(req, 0);
1272 switch (hdev->dev_type) {
1282 BT_ERR("Unknown device type %d", hdev->dev_type);
1287 static void bredr_setup(struct hci_request *req)
1289 struct hci_dev *hdev = req->hdev;
1294 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1295 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1297 /* Read Class of Device */
1298 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1300 /* Read Local Name */
1301 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1303 /* Read Voice Setting */
1304 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1306 /* Read Number of Supported IAC */
1307 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1309 /* Read Current IAC LAP */
1310 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1312 /* Clear Event Filters */
1313 flt_type = HCI_FLT_CLEAR_ALL;
1314 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1316 /* Connection accept timeout ~20 secs */
1317 param = cpu_to_le16(0x7d00);
1318 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
1320 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1321 * but it does not support page scan related HCI commands.
1323 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1324 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1325 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1329 static void le_setup(struct hci_request *req)
1331 struct hci_dev *hdev = req->hdev;
1333 /* Read LE Buffer Size */
1334 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1336 /* Read LE Local Supported Features */
1337 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1339 /* Read LE Supported States */
1340 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1342 /* Read LE Advertising Channel TX Power */
1343 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1345 /* Read LE White List Size */
1346 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1348 /* Clear LE White List */
1349 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1351 /* LE-only controllers have LE implicitly enabled */
1352 if (!lmp_bredr_capable(hdev))
1353 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1356 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1358 if (lmp_ext_inq_capable(hdev))
1361 if (lmp_inq_rssi_capable(hdev))
1364 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1365 hdev->lmp_subver == 0x0757)
1368 if (hdev->manufacturer == 15) {
1369 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1371 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1373 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1377 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1378 hdev->lmp_subver == 0x1805)
1384 static void hci_setup_inquiry_mode(struct hci_request *req)
1388 mode = hci_get_inquiry_mode(req->hdev);
1390 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1393 static void hci_setup_event_mask(struct hci_request *req)
1395 struct hci_dev *hdev = req->hdev;
1397 /* The second byte is 0xff instead of 0x9f (two reserved bits
1398 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1399 * command otherwise.
1401 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1403 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1404 * any event mask for pre 1.2 devices.
1406 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1409 if (lmp_bredr_capable(hdev)) {
1410 events[4] |= 0x01; /* Flow Specification Complete */
1411 events[4] |= 0x02; /* Inquiry Result with RSSI */
1412 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1413 events[5] |= 0x08; /* Synchronous Connection Complete */
1414 events[5] |= 0x10; /* Synchronous Connection Changed */
1416 /* Use a different default for LE-only devices */
1417 memset(events, 0, sizeof(events));
1418 events[0] |= 0x10; /* Disconnection Complete */
1419 events[0] |= 0x80; /* Encryption Change */
1420 events[1] |= 0x08; /* Read Remote Version Information Complete */
1421 events[1] |= 0x20; /* Command Complete */
1422 events[1] |= 0x40; /* Command Status */
1423 events[1] |= 0x80; /* Hardware Error */
1424 events[2] |= 0x04; /* Number of Completed Packets */
1425 events[3] |= 0x02; /* Data Buffer Overflow */
1426 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1429 if (lmp_inq_rssi_capable(hdev))
1430 events[4] |= 0x02; /* Inquiry Result with RSSI */
1432 if (lmp_sniffsubr_capable(hdev))
1433 events[5] |= 0x20; /* Sniff Subrating */
1435 if (lmp_pause_enc_capable(hdev))
1436 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1438 if (lmp_ext_inq_capable(hdev))
1439 events[5] |= 0x40; /* Extended Inquiry Result */
1441 if (lmp_no_flush_capable(hdev))
1442 events[7] |= 0x01; /* Enhanced Flush Complete */
1444 if (lmp_lsto_capable(hdev))
1445 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1447 if (lmp_ssp_capable(hdev)) {
1448 events[6] |= 0x01; /* IO Capability Request */
1449 events[6] |= 0x02; /* IO Capability Response */
1450 events[6] |= 0x04; /* User Confirmation Request */
1451 events[6] |= 0x08; /* User Passkey Request */
1452 events[6] |= 0x10; /* Remote OOB Data Request */
1453 events[6] |= 0x20; /* Simple Pairing Complete */
1454 events[7] |= 0x04; /* User Passkey Notification */
1455 events[7] |= 0x08; /* Keypress Notification */
1456 events[7] |= 0x10; /* Remote Host Supported
1457 * Features Notification
1461 if (lmp_le_capable(hdev))
1462 events[7] |= 0x20; /* LE Meta-Event */
1464 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1467 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1469 struct hci_dev *hdev = req->hdev;
1471 if (lmp_bredr_capable(hdev))
1474 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1476 if (lmp_le_capable(hdev))
1479 hci_setup_event_mask(req);
1481 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1482 * local supported commands HCI command.
1484 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1485 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1487 if (lmp_ssp_capable(hdev)) {
1488 /* When SSP is available, then the host features page
1489 * should also be available as well. However some
1490 * controllers list the max_page as 0 as long as SSP
1491 * has not been enabled. To achieve proper debugging
1492 * output, force the minimum max_page to 1 at least.
1494 hdev->max_page = 0x01;
1496 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1498 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1499 sizeof(mode), &mode);
1501 struct hci_cp_write_eir cp;
1503 memset(hdev->eir, 0, sizeof(hdev->eir));
1504 memset(&cp, 0, sizeof(cp));
1506 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1510 if (lmp_inq_rssi_capable(hdev))
1511 hci_setup_inquiry_mode(req);
1513 if (lmp_inq_tx_pwr_capable(hdev))
1514 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1516 if (lmp_ext_feat_capable(hdev)) {
1517 struct hci_cp_read_local_ext_features cp;
1520 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1524 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1526 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1531 static void hci_setup_link_policy(struct hci_request *req)
1533 struct hci_dev *hdev = req->hdev;
1534 struct hci_cp_write_def_link_policy cp;
1535 u16 link_policy = 0;
1537 if (lmp_rswitch_capable(hdev))
1538 link_policy |= HCI_LP_RSWITCH;
1539 if (lmp_hold_capable(hdev))
1540 link_policy |= HCI_LP_HOLD;
1541 if (lmp_sniff_capable(hdev))
1542 link_policy |= HCI_LP_SNIFF;
1543 if (lmp_park_capable(hdev))
1544 link_policy |= HCI_LP_PARK;
1546 cp.policy = cpu_to_le16(link_policy);
1547 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1550 static void hci_set_le_support(struct hci_request *req)
1552 struct hci_dev *hdev = req->hdev;
1553 struct hci_cp_write_le_host_supported cp;
1555 /* LE-only devices do not support explicit enablement */
1556 if (!lmp_bredr_capable(hdev))
1559 memset(&cp, 0, sizeof(cp));
1561 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1563 cp.simul = lmp_le_br_capable(hdev);
1566 if (cp.le != lmp_host_le_capable(hdev))
1567 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1571 static void hci_set_event_mask_page_2(struct hci_request *req)
1573 struct hci_dev *hdev = req->hdev;
1574 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1576 /* If Connectionless Slave Broadcast master role is supported
1577 * enable all necessary events for it.
1579 if (lmp_csb_master_capable(hdev)) {
1580 events[1] |= 0x40; /* Triggered Clock Capture */
1581 events[1] |= 0x80; /* Synchronization Train Complete */
1582 events[2] |= 0x10; /* Slave Page Response Timeout */
1583 events[2] |= 0x20; /* CSB Channel Map Change */
1586 /* If Connectionless Slave Broadcast slave role is supported
1587 * enable all necessary events for it.
1589 if (lmp_csb_slave_capable(hdev)) {
1590 events[2] |= 0x01; /* Synchronization Train Received */
1591 events[2] |= 0x02; /* CSB Receive */
1592 events[2] |= 0x04; /* CSB Timeout */
1593 events[2] |= 0x08; /* Truncated Page Complete */
1596 /* Enable Authenticated Payload Timeout Expired event if supported */
1597 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1600 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1603 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1605 struct hci_dev *hdev = req->hdev;
1608 /* Some Broadcom based Bluetooth controllers do not support the
1609 * Delete Stored Link Key command. They are clearly indicating its
1610 * absence in the bit mask of supported commands.
1612 * Check the supported commands and only if the the command is marked
1613 * as supported send it. If not supported assume that the controller
1614 * does not have actual support for stored link keys which makes this
1615 * command redundant anyway.
1617 * Some controllers indicate that they support handling deleting
1618 * stored link keys, but they don't. The quirk lets a driver
1619 * just disable this command.
1621 if (hdev->commands[6] & 0x80 &&
1622 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1623 struct hci_cp_delete_stored_link_key cp;
1625 bacpy(&cp.bdaddr, BDADDR_ANY);
1626 cp.delete_all = 0x01;
1627 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1631 if (hdev->commands[5] & 0x10)
1632 hci_setup_link_policy(req);
1634 if (lmp_le_capable(hdev)) {
1637 memset(events, 0, sizeof(events));
1640 /* If controller supports the Connection Parameters Request
1641 * Link Layer Procedure, enable the corresponding event.
1643 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1644 events[0] |= 0x20; /* LE Remote Connection
1648 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1651 hci_set_le_support(req);
1654 /* Read features beyond page 1 if available */
1655 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1656 struct hci_cp_read_local_ext_features cp;
1659 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1664 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1666 struct hci_dev *hdev = req->hdev;
1668 /* Set event mask page 2 if the HCI command for it is supported */
1669 if (hdev->commands[22] & 0x04)
1670 hci_set_event_mask_page_2(req);
1672 /* Check for Synchronization Train support */
1673 if (lmp_sync_train_capable(hdev))
1674 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1676 /* Enable Secure Connections if supported and configured */
1677 if ((lmp_sc_capable(hdev) ||
1678 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1679 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1681 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1682 sizeof(support), &support);
1686 static int __hci_init(struct hci_dev *hdev)
1690 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1694 /* The Device Under Test (DUT) mode is special and available for
1695 * all controller types. So just create it early on.
1697 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1698 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1702 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1703 * BR/EDR/LE type controllers. AMP controllers only need the
1706 if (hdev->dev_type != HCI_BREDR)
1709 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1713 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1717 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1721 /* Only create debugfs entries during the initial setup
1722 * phase and not every time the controller gets powered on.
1724 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1727 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1729 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1730 &hdev->manufacturer);
1731 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1732 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1733 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1735 debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1737 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1739 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1740 &conn_info_min_age_fops);
1741 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1742 &conn_info_max_age_fops);
1744 if (lmp_bredr_capable(hdev)) {
1745 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1746 hdev, &inquiry_cache_fops);
1747 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1748 hdev, &link_keys_fops);
1749 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1750 hdev, &dev_class_fops);
1751 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1752 hdev, &voice_setting_fops);
1755 if (lmp_ssp_capable(hdev)) {
1756 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1757 hdev, &auto_accept_delay_fops);
1758 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1759 hdev, &force_sc_support_fops);
1760 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1761 hdev, &sc_only_mode_fops);
1764 if (lmp_sniff_capable(hdev)) {
1765 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1766 hdev, &idle_timeout_fops);
1767 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1768 hdev, &sniff_min_interval_fops);
1769 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1770 hdev, &sniff_max_interval_fops);
1773 if (lmp_le_capable(hdev)) {
1774 debugfs_create_file("identity", 0400, hdev->debugfs,
1775 hdev, &identity_fops);
1776 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1777 hdev, &rpa_timeout_fops);
1778 debugfs_create_file("random_address", 0444, hdev->debugfs,
1779 hdev, &random_address_fops);
1780 debugfs_create_file("static_address", 0444, hdev->debugfs,
1781 hdev, &static_address_fops);
1783 /* For controllers with a public address, provide a debug
1784 * option to force the usage of the configured static
1785 * address. By default the public address is used.
1787 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1788 debugfs_create_file("force_static_address", 0644,
1789 hdev->debugfs, hdev,
1790 &force_static_address_fops);
1792 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1793 &hdev->le_white_list_size);
1794 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1796 debugfs_create_file("identity_resolving_keys", 0400,
1797 hdev->debugfs, hdev,
1798 &identity_resolving_keys_fops);
1799 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1800 hdev, &long_term_keys_fops);
1801 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1802 hdev, &conn_min_interval_fops);
1803 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1804 hdev, &conn_max_interval_fops);
1805 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1806 hdev, &conn_latency_fops);
1807 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1808 hdev, &supervision_timeout_fops);
1809 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1810 hdev, &adv_channel_map_fops);
1811 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1813 debugfs_create_u16("discov_interleaved_timeout", 0644,
1815 &hdev->discov_interleaved_timeout);
1821 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1823 struct hci_dev *hdev = req->hdev;
1825 BT_DBG("%s %ld", hdev->name, opt);
1828 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1829 hci_reset_req(req, 0);
1831 /* Read Local Version */
1832 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1834 /* Read BD Address */
1835 if (hdev->set_bdaddr)
1836 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1839 static int __hci_unconf_init(struct hci_dev *hdev)
1843 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1846 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1853 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1857 BT_DBG("%s %x", req->hdev->name, scan);
1859 /* Inquiry and Page scans */
1860 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1863 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1867 BT_DBG("%s %x", req->hdev->name, auth);
1869 /* Authentication */
1870 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1873 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1877 BT_DBG("%s %x", req->hdev->name, encrypt);
1880 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1883 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1885 __le16 policy = cpu_to_le16(opt);
1887 BT_DBG("%s %x", req->hdev->name, policy);
1889 /* Default link policy */
1890 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1893 /* Get HCI device by index.
1894 * Device is held on return. */
1895 struct hci_dev *hci_dev_get(int index)
1897 struct hci_dev *hdev = NULL, *d;
1899 BT_DBG("%d", index);
1904 read_lock(&hci_dev_list_lock);
1905 list_for_each_entry(d, &hci_dev_list, list) {
1906 if (d->id == index) {
1907 hdev = hci_dev_hold(d);
1911 read_unlock(&hci_dev_list_lock);
1915 /* ---- Inquiry support ---- */
1917 bool hci_discovery_active(struct hci_dev *hdev)
1919 struct discovery_state *discov = &hdev->discovery;
1921 switch (discov->state) {
1922 case DISCOVERY_FINDING:
1923 case DISCOVERY_RESOLVING:
1931 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1933 int old_state = hdev->discovery.state;
1935 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1937 if (old_state == state)
1940 hdev->discovery.state = state;
1943 case DISCOVERY_STOPPED:
1944 hci_update_background_scan(hdev);
1946 if (old_state != DISCOVERY_STARTING)
1947 mgmt_discovering(hdev, 0);
1949 case DISCOVERY_STARTING:
1951 case DISCOVERY_FINDING:
1952 mgmt_discovering(hdev, 1);
1954 case DISCOVERY_RESOLVING:
1956 case DISCOVERY_STOPPING:
1961 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1963 struct discovery_state *cache = &hdev->discovery;
1964 struct inquiry_entry *p, *n;
1966 list_for_each_entry_safe(p, n, &cache->all, all) {
1971 INIT_LIST_HEAD(&cache->unknown);
1972 INIT_LIST_HEAD(&cache->resolve);
1975 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1978 struct discovery_state *cache = &hdev->discovery;
1979 struct inquiry_entry *e;
1981 BT_DBG("cache %p, %pMR", cache, bdaddr);
1983 list_for_each_entry(e, &cache->all, all) {
1984 if (!bacmp(&e->data.bdaddr, bdaddr))
1991 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1994 struct discovery_state *cache = &hdev->discovery;
1995 struct inquiry_entry *e;
1997 BT_DBG("cache %p, %pMR", cache, bdaddr);
1999 list_for_each_entry(e, &cache->unknown, list) {
2000 if (!bacmp(&e->data.bdaddr, bdaddr))
2007 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2011 struct discovery_state *cache = &hdev->discovery;
2012 struct inquiry_entry *e;
2014 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2016 list_for_each_entry(e, &cache->resolve, list) {
2017 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2019 if (!bacmp(&e->data.bdaddr, bdaddr))
2026 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2027 struct inquiry_entry *ie)
2029 struct discovery_state *cache = &hdev->discovery;
2030 struct list_head *pos = &cache->resolve;
2031 struct inquiry_entry *p;
2033 list_del(&ie->list);
2035 list_for_each_entry(p, &cache->resolve, list) {
2036 if (p->name_state != NAME_PENDING &&
2037 abs(p->data.rssi) >= abs(ie->data.rssi))
2042 list_add(&ie->list, pos);
2045 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2048 struct discovery_state *cache = &hdev->discovery;
2049 struct inquiry_entry *ie;
2052 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2054 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2056 if (!data->ssp_mode)
2057 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2059 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2061 if (!ie->data.ssp_mode)
2062 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2064 if (ie->name_state == NAME_NEEDED &&
2065 data->rssi != ie->data.rssi) {
2066 ie->data.rssi = data->rssi;
2067 hci_inquiry_cache_update_resolve(hdev, ie);
2073 /* Entry not in the cache. Add new one. */
2074 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2076 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2080 list_add(&ie->all, &cache->all);
2083 ie->name_state = NAME_KNOWN;
2085 ie->name_state = NAME_NOT_KNOWN;
2086 list_add(&ie->list, &cache->unknown);
2090 if (name_known && ie->name_state != NAME_KNOWN &&
2091 ie->name_state != NAME_PENDING) {
2092 ie->name_state = NAME_KNOWN;
2093 list_del(&ie->list);
2096 memcpy(&ie->data, data, sizeof(*data));
2097 ie->timestamp = jiffies;
2098 cache->timestamp = jiffies;
2100 if (ie->name_state == NAME_NOT_KNOWN)
2101 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2107 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2109 struct discovery_state *cache = &hdev->discovery;
2110 struct inquiry_info *info = (struct inquiry_info *) buf;
2111 struct inquiry_entry *e;
2114 list_for_each_entry(e, &cache->all, all) {
2115 struct inquiry_data *data = &e->data;
2120 bacpy(&info->bdaddr, &data->bdaddr);
2121 info->pscan_rep_mode = data->pscan_rep_mode;
2122 info->pscan_period_mode = data->pscan_period_mode;
2123 info->pscan_mode = data->pscan_mode;
2124 memcpy(info->dev_class, data->dev_class, 3);
2125 info->clock_offset = data->clock_offset;
2131 BT_DBG("cache %p, copied %d", cache, copied);
2135 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2137 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2138 struct hci_dev *hdev = req->hdev;
2139 struct hci_cp_inquiry cp;
2141 BT_DBG("%s", hdev->name);
2143 if (test_bit(HCI_INQUIRY, &hdev->flags))
2147 memcpy(&cp.lap, &ir->lap, 3);
2148 cp.length = ir->length;
2149 cp.num_rsp = ir->num_rsp;
2150 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2153 static int wait_inquiry(void *word)
2156 return signal_pending(current);
2159 int hci_inquiry(void __user *arg)
2161 __u8 __user *ptr = arg;
2162 struct hci_inquiry_req ir;
2163 struct hci_dev *hdev;
2164 int err = 0, do_inquiry = 0, max_rsp;
2168 if (copy_from_user(&ir, ptr, sizeof(ir)))
2171 hdev = hci_dev_get(ir.dev_id);
2175 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2180 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2185 if (hdev->dev_type != HCI_BREDR) {
2190 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2196 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2197 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2198 hci_inquiry_cache_flush(hdev);
2201 hci_dev_unlock(hdev);
2203 timeo = ir.length * msecs_to_jiffies(2000);
2206 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2211 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2212 * cleared). If it is interrupted by a signal, return -EINTR.
2214 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2215 TASK_INTERRUPTIBLE))
2219 /* for unlimited number of responses we will use buffer with
2222 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2224 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2225 * copy it to the user space.
2227 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2234 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2235 hci_dev_unlock(hdev);
2237 BT_DBG("num_rsp %d", ir.num_rsp);
2239 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2241 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2254 static int hci_dev_do_open(struct hci_dev *hdev)
2258 BT_DBG("%s %p", hdev->name, hdev);
2262 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2267 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2268 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2269 /* Check for rfkill but allow the HCI setup stage to
2270 * proceed (which in itself doesn't cause any RF activity).
2272 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2277 /* Check for valid public address or a configured static
2278 * random adddress, but let the HCI setup proceed to
2279 * be able to determine if there is a public address
2282 * In case of user channel usage, it is not important
2283 * if a public address or static random address is
2286 * This check is only valid for BR/EDR controllers
2287 * since AMP controllers do not have an address.
2289 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2290 hdev->dev_type == HCI_BREDR &&
2291 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2292 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2293 ret = -EADDRNOTAVAIL;
2298 if (test_bit(HCI_UP, &hdev->flags)) {
2303 if (hdev->open(hdev)) {
2308 atomic_set(&hdev->cmd_cnt, 1);
2309 set_bit(HCI_INIT, &hdev->flags);
2311 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2313 ret = hdev->setup(hdev);
2315 /* The transport driver can set these quirks before
2316 * creating the HCI device or in its setup callback.
2318 * In case any of them is set, the controller has to
2319 * start up as unconfigured.
2321 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2322 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2323 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2325 /* For an unconfigured controller it is required to
2326 * read at least the version information provided by
2327 * the Read Local Version Information command.
2329 * If the set_bdaddr driver callback is provided, then
2330 * also the original Bluetooth public device address
2331 * will be read using the Read BD Address command.
2333 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2334 ret = __hci_unconf_init(hdev);
2337 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2338 /* If public address change is configured, ensure that
2339 * the address gets programmed. If the driver does not
2340 * support changing the public address, fail the power
2343 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2345 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2347 ret = -EADDRNOTAVAIL;
2351 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2352 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2353 ret = __hci_init(hdev);
2356 clear_bit(HCI_INIT, &hdev->flags);
2360 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2361 set_bit(HCI_UP, &hdev->flags);
2362 hci_notify(hdev, HCI_DEV_UP);
2363 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2364 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2365 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2366 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2367 hdev->dev_type == HCI_BREDR) {
2369 mgmt_powered(hdev, 1);
2370 hci_dev_unlock(hdev);
2373 /* Init failed, cleanup */
2374 flush_work(&hdev->tx_work);
2375 flush_work(&hdev->cmd_work);
2376 flush_work(&hdev->rx_work);
2378 skb_queue_purge(&hdev->cmd_q);
2379 skb_queue_purge(&hdev->rx_q);
2384 if (hdev->sent_cmd) {
2385 kfree_skb(hdev->sent_cmd);
2386 hdev->sent_cmd = NULL;
2390 hdev->flags &= BIT(HCI_RAW);
2394 hci_req_unlock(hdev);
2398 /* ---- HCI ioctl helpers ---- */
2400 int hci_dev_open(__u16 dev)
2402 struct hci_dev *hdev;
2405 hdev = hci_dev_get(dev);
2409 /* Devices that are marked as unconfigured can only be powered
2410 * up as user channel. Trying to bring them up as normal devices
2411 * will result into a failure. Only user channel operation is
2414 * When this function is called for a user channel, the flag
2415 * HCI_USER_CHANNEL will be set first before attempting to
2418 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2419 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2424 /* We need to ensure that no other power on/off work is pending
2425 * before proceeding to call hci_dev_do_open. This is
2426 * particularly important if the setup procedure has not yet
2429 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2430 cancel_delayed_work(&hdev->power_off);
2432 /* After this call it is guaranteed that the setup procedure
2433 * has finished. This means that error conditions like RFKILL
2434 * or no valid public or static random address apply.
2436 flush_workqueue(hdev->req_workqueue);
2438 err = hci_dev_do_open(hdev);
2445 /* This function requires the caller holds hdev->lock */
2446 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2448 struct hci_conn_params *p;
2450 list_for_each_entry(p, &hdev->le_conn_params, list)
2451 list_del_init(&p->action);
2453 BT_DBG("All LE pending actions cleared");
2456 static int hci_dev_do_close(struct hci_dev *hdev)
2458 BT_DBG("%s %p", hdev->name, hdev);
2460 cancel_delayed_work(&hdev->power_off);
2462 hci_req_cancel(hdev, ENODEV);
2465 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2466 cancel_delayed_work_sync(&hdev->cmd_timer);
2467 hci_req_unlock(hdev);
2471 /* Flush RX and TX works */
2472 flush_work(&hdev->tx_work);
2473 flush_work(&hdev->rx_work);
2475 if (hdev->discov_timeout > 0) {
2476 cancel_delayed_work(&hdev->discov_off);
2477 hdev->discov_timeout = 0;
2478 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2479 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2482 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2483 cancel_delayed_work(&hdev->service_cache);
2485 cancel_delayed_work_sync(&hdev->le_scan_disable);
2487 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2488 cancel_delayed_work_sync(&hdev->rpa_expired);
2491 hci_inquiry_cache_flush(hdev);
2492 hci_conn_hash_flush(hdev);
2493 hci_pend_le_actions_clear(hdev);
2494 hci_dev_unlock(hdev);
2496 hci_notify(hdev, HCI_DEV_DOWN);
2502 skb_queue_purge(&hdev->cmd_q);
2503 atomic_set(&hdev->cmd_cnt, 1);
2504 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2505 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2506 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2507 set_bit(HCI_INIT, &hdev->flags);
2508 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2509 clear_bit(HCI_INIT, &hdev->flags);
2512 /* flush cmd work */
2513 flush_work(&hdev->cmd_work);
2516 skb_queue_purge(&hdev->rx_q);
2517 skb_queue_purge(&hdev->cmd_q);
2518 skb_queue_purge(&hdev->raw_q);
2520 /* Drop last sent command */
2521 if (hdev->sent_cmd) {
2522 cancel_delayed_work_sync(&hdev->cmd_timer);
2523 kfree_skb(hdev->sent_cmd);
2524 hdev->sent_cmd = NULL;
2527 kfree_skb(hdev->recv_evt);
2528 hdev->recv_evt = NULL;
2530 /* After this point our queues are empty
2531 * and no tasks are scheduled. */
2535 hdev->flags &= BIT(HCI_RAW);
2536 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2538 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2539 if (hdev->dev_type == HCI_BREDR) {
2541 mgmt_powered(hdev, 0);
2542 hci_dev_unlock(hdev);
2546 /* Controller radio is available but is currently powered down */
2547 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2549 memset(hdev->eir, 0, sizeof(hdev->eir));
2550 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2551 bacpy(&hdev->random_addr, BDADDR_ANY);
2553 hci_req_unlock(hdev);
2559 int hci_dev_close(__u16 dev)
2561 struct hci_dev *hdev;
2564 hdev = hci_dev_get(dev);
2568 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2573 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2574 cancel_delayed_work(&hdev->power_off);
2576 err = hci_dev_do_close(hdev);
2583 int hci_dev_reset(__u16 dev)
2585 struct hci_dev *hdev;
2588 hdev = hci_dev_get(dev);
2594 if (!test_bit(HCI_UP, &hdev->flags)) {
2599 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2604 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2610 skb_queue_purge(&hdev->rx_q);
2611 skb_queue_purge(&hdev->cmd_q);
2614 hci_inquiry_cache_flush(hdev);
2615 hci_conn_hash_flush(hdev);
2616 hci_dev_unlock(hdev);
2621 atomic_set(&hdev->cmd_cnt, 1);
2622 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2624 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2627 hci_req_unlock(hdev);
2632 int hci_dev_reset_stat(__u16 dev)
2634 struct hci_dev *hdev;
2637 hdev = hci_dev_get(dev);
2641 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2646 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2651 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2658 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2660 bool conn_changed, discov_changed;
2662 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2664 if ((scan & SCAN_PAGE))
2665 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2668 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2671 if ((scan & SCAN_INQUIRY)) {
2672 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2675 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2676 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2680 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2683 if (conn_changed || discov_changed) {
2684 /* In case this was disabled through mgmt */
2685 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2687 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2688 mgmt_update_adv_data(hdev);
2690 mgmt_new_settings(hdev);
2694 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2696 struct hci_dev *hdev;
2697 struct hci_dev_req dr;
2700 if (copy_from_user(&dr, arg, sizeof(dr)))
2703 hdev = hci_dev_get(dr.dev_id);
2707 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2712 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2717 if (hdev->dev_type != HCI_BREDR) {
2722 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2729 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2734 if (!lmp_encrypt_capable(hdev)) {
2739 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2740 /* Auth must be enabled first */
2741 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2747 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2752 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2755 /* Ensure that the connectable and discoverable states
2756 * get correctly modified as this was a non-mgmt change.
2759 hci_update_scan_state(hdev, dr.dev_opt);
2763 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2767 case HCISETLINKMODE:
2768 hdev->link_mode = ((__u16) dr.dev_opt) &
2769 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2773 hdev->pkt_type = (__u16) dr.dev_opt;
2777 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2778 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2782 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2783 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2796 int hci_get_dev_list(void __user *arg)
2798 struct hci_dev *hdev;
2799 struct hci_dev_list_req *dl;
2800 struct hci_dev_req *dr;
2801 int n = 0, size, err;
2804 if (get_user(dev_num, (__u16 __user *) arg))
2807 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2810 size = sizeof(*dl) + dev_num * sizeof(*dr);
2812 dl = kzalloc(size, GFP_KERNEL);
2818 read_lock(&hci_dev_list_lock);
2819 list_for_each_entry(hdev, &hci_dev_list, list) {
2820 unsigned long flags = hdev->flags;
2822 /* When the auto-off is configured it means the transport
2823 * is running, but in that case still indicate that the
2824 * device is actually down.
2826 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2827 flags &= ~BIT(HCI_UP);
2829 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2830 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2832 (dr + n)->dev_id = hdev->id;
2833 (dr + n)->dev_opt = flags;
2838 read_unlock(&hci_dev_list_lock);
2841 size = sizeof(*dl) + n * sizeof(*dr);
2843 err = copy_to_user(arg, dl, size);
2846 return err ? -EFAULT : 0;
2849 int hci_get_dev_info(void __user *arg)
2851 struct hci_dev *hdev;
2852 struct hci_dev_info di;
2853 unsigned long flags;
2856 if (copy_from_user(&di, arg, sizeof(di)))
2859 hdev = hci_dev_get(di.dev_id);
2863 /* When the auto-off is configured it means the transport
2864 * is running, but in that case still indicate that the
2865 * device is actually down.
2867 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2868 flags = hdev->flags & ~BIT(HCI_UP);
2870 flags = hdev->flags;
2872 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2873 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2875 strcpy(di.name, hdev->name);
2876 di.bdaddr = hdev->bdaddr;
2877 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2879 di.pkt_type = hdev->pkt_type;
2880 if (lmp_bredr_capable(hdev)) {
2881 di.acl_mtu = hdev->acl_mtu;
2882 di.acl_pkts = hdev->acl_pkts;
2883 di.sco_mtu = hdev->sco_mtu;
2884 di.sco_pkts = hdev->sco_pkts;
2886 di.acl_mtu = hdev->le_mtu;
2887 di.acl_pkts = hdev->le_pkts;
2891 di.link_policy = hdev->link_policy;
2892 di.link_mode = hdev->link_mode;
2894 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2895 memcpy(&di.features, &hdev->features, sizeof(di.features));
2897 if (copy_to_user(arg, &di, sizeof(di)))
2905 /* ---- Interface to HCI drivers ---- */
2907 static int hci_rfkill_set_block(void *data, bool blocked)
2909 struct hci_dev *hdev = data;
2911 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2913 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2917 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2918 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2919 !test_bit(HCI_CONFIG, &hdev->dev_flags))
2920 hci_dev_do_close(hdev);
2922 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2928 static const struct rfkill_ops hci_rfkill_ops = {
2929 .set_block = hci_rfkill_set_block,
2932 static void hci_power_on(struct work_struct *work)
2934 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2937 BT_DBG("%s", hdev->name);
2939 err = hci_dev_do_open(hdev);
2941 mgmt_set_powered_failed(hdev, err);
2945 /* During the HCI setup phase, a few error conditions are
2946 * ignored and they need to be checked now. If they are still
2947 * valid, it is important to turn the device back off.
2949 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2950 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2951 (hdev->dev_type == HCI_BREDR &&
2952 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2953 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2954 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2955 hci_dev_do_close(hdev);
2956 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2957 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2958 HCI_AUTO_OFF_TIMEOUT);
2961 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2962 /* For unconfigured devices, set the HCI_RAW flag
2963 * so that userspace can easily identify them.
2965 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2966 set_bit(HCI_RAW, &hdev->flags);
2968 /* For fully configured devices, this will send
2969 * the Index Added event. For unconfigured devices,
2970 * it will send Unconfigued Index Added event.
2972 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2973 * and no event will be send.
2975 mgmt_index_added(hdev);
2976 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2977 /* When the controller is now configured, then it
2978 * is important to clear the HCI_RAW flag.
2980 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2981 clear_bit(HCI_RAW, &hdev->flags);
2983 /* Powering on the controller with HCI_CONFIG set only
2984 * happens with the transition from unconfigured to
2985 * configured. This will send the Index Added event.
2987 mgmt_index_added(hdev);
2991 static void hci_power_off(struct work_struct *work)
2993 struct hci_dev *hdev = container_of(work, struct hci_dev,
2996 BT_DBG("%s", hdev->name);
2998 hci_dev_do_close(hdev);
3001 static void hci_discov_off(struct work_struct *work)
3003 struct hci_dev *hdev;
3005 hdev = container_of(work, struct hci_dev, discov_off.work);
3007 BT_DBG("%s", hdev->name);
3009 mgmt_discoverable_timeout(hdev);
3012 void hci_uuids_clear(struct hci_dev *hdev)
3014 struct bt_uuid *uuid, *tmp;
3016 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3017 list_del(&uuid->list);
3022 void hci_link_keys_clear(struct hci_dev *hdev)
3024 struct list_head *p, *n;
3026 list_for_each_safe(p, n, &hdev->link_keys) {
3027 struct link_key *key;
3029 key = list_entry(p, struct link_key, list);
3036 void hci_smp_ltks_clear(struct hci_dev *hdev)
3038 struct smp_ltk *k, *tmp;
3040 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3046 void hci_smp_irks_clear(struct hci_dev *hdev)
3048 struct smp_irk *k, *tmp;
3050 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3056 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3060 list_for_each_entry(k, &hdev->link_keys, list)
3061 if (bacmp(bdaddr, &k->bdaddr) == 0)
3067 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3068 u8 key_type, u8 old_key_type)
3071 if (key_type < 0x03)
3074 /* Debug keys are insecure so don't store them persistently */
3075 if (key_type == HCI_LK_DEBUG_COMBINATION)
3078 /* Changed combination key and there's no previous one */
3079 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3082 /* Security mode 3 case */
3086 /* Neither local nor remote side had no-bonding as requirement */
3087 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3090 /* Local side had dedicated bonding as requirement */
3091 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3094 /* Remote side had dedicated bonding as requirement */
3095 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3098 /* If none of the above criteria match, then don't store the key
3103 static bool ltk_type_master(u8 type)
3105 return (type == SMP_LTK);
3108 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3113 list_for_each_entry(k, &hdev->long_term_keys, list) {
3114 if (k->ediv != ediv || k->rand != rand)
3117 if (ltk_type_master(k->type) != master)
3126 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3127 u8 addr_type, bool master)
3131 list_for_each_entry(k, &hdev->long_term_keys, list)
3132 if (addr_type == k->bdaddr_type &&
3133 bacmp(bdaddr, &k->bdaddr) == 0 &&
3134 ltk_type_master(k->type) == master)
3140 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3142 struct smp_irk *irk;
3144 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3145 if (!bacmp(&irk->rpa, rpa))
3149 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3150 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3151 bacpy(&irk->rpa, rpa);
3159 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3162 struct smp_irk *irk;
3164 /* Identity Address must be public or static random */
3165 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3168 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3169 if (addr_type == irk->addr_type &&
3170 bacmp(bdaddr, &irk->bdaddr) == 0)
3177 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3178 bdaddr_t *bdaddr, u8 *val, u8 type,
3179 u8 pin_len, bool *persistent)
3181 struct link_key *key, *old_key;
3184 old_key = hci_find_link_key(hdev, bdaddr);
3186 old_key_type = old_key->type;
3189 old_key_type = conn ? conn->key_type : 0xff;
3190 key = kzalloc(sizeof(*key), GFP_KERNEL);
3193 list_add(&key->list, &hdev->link_keys);
3196 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3198 /* Some buggy controller combinations generate a changed
3199 * combination key for legacy pairing even when there's no
3201 if (type == HCI_LK_CHANGED_COMBINATION &&
3202 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3203 type = HCI_LK_COMBINATION;
3205 conn->key_type = type;
3208 bacpy(&key->bdaddr, bdaddr);
3209 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3210 key->pin_len = pin_len;
3212 if (type == HCI_LK_CHANGED_COMBINATION)
3213 key->type = old_key_type;
3218 *persistent = hci_persistent_key(hdev, conn, type,
3224 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3225 u8 addr_type, u8 type, u8 authenticated,
3226 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3228 struct smp_ltk *key, *old_key;
3229 bool master = ltk_type_master(type);
3231 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3235 key = kzalloc(sizeof(*key), GFP_KERNEL);
3238 list_add(&key->list, &hdev->long_term_keys);
3241 bacpy(&key->bdaddr, bdaddr);
3242 key->bdaddr_type = addr_type;
3243 memcpy(key->val, tk, sizeof(key->val));
3244 key->authenticated = authenticated;
3247 key->enc_size = enc_size;
3253 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3254 u8 addr_type, u8 val[16], bdaddr_t *rpa)
3256 struct smp_irk *irk;
3258 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3260 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3264 bacpy(&irk->bdaddr, bdaddr);
3265 irk->addr_type = addr_type;
3267 list_add(&irk->list, &hdev->identity_resolving_keys);
3270 memcpy(irk->val, val, 16);
3271 bacpy(&irk->rpa, rpa);
3276 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3278 struct link_key *key;
3280 key = hci_find_link_key(hdev, bdaddr);
3284 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3286 list_del(&key->list);
3292 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3294 struct smp_ltk *k, *tmp;
3297 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3298 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3301 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3308 return removed ? 0 : -ENOENT;
3311 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3313 struct smp_irk *k, *tmp;
3315 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3316 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3319 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3326 /* HCI command timer function */
3327 static void hci_cmd_timeout(struct work_struct *work)
3329 struct hci_dev *hdev = container_of(work, struct hci_dev,
3332 if (hdev->sent_cmd) {
3333 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3334 u16 opcode = __le16_to_cpu(sent->opcode);
3336 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3338 BT_ERR("%s command tx timeout", hdev->name);
3341 atomic_set(&hdev->cmd_cnt, 1);
3342 queue_work(hdev->workqueue, &hdev->cmd_work);
3345 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3348 struct oob_data *data;
3350 list_for_each_entry(data, &hdev->remote_oob_data, list)
3351 if (bacmp(bdaddr, &data->bdaddr) == 0)
3357 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3359 struct oob_data *data;
3361 data = hci_find_remote_oob_data(hdev, bdaddr);
3365 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3367 list_del(&data->list);
3373 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3375 struct oob_data *data, *n;
3377 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3378 list_del(&data->list);
3383 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3384 u8 *hash, u8 *randomizer)
3386 struct oob_data *data;
3388 data = hci_find_remote_oob_data(hdev, bdaddr);
3390 data = kmalloc(sizeof(*data), GFP_KERNEL);
3394 bacpy(&data->bdaddr, bdaddr);
3395 list_add(&data->list, &hdev->remote_oob_data);
3398 memcpy(data->hash192, hash, sizeof(data->hash192));
3399 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3401 memset(data->hash256, 0, sizeof(data->hash256));
3402 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3404 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3409 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3410 u8 *hash192, u8 *randomizer192,
3411 u8 *hash256, u8 *randomizer256)
3413 struct oob_data *data;
3415 data = hci_find_remote_oob_data(hdev, bdaddr);
3417 data = kmalloc(sizeof(*data), GFP_KERNEL);
3421 bacpy(&data->bdaddr, bdaddr);
3422 list_add(&data->list, &hdev->remote_oob_data);
3425 memcpy(data->hash192, hash192, sizeof(data->hash192));
3426 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3428 memcpy(data->hash256, hash256, sizeof(data->hash256));
3429 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3431 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3436 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3437 bdaddr_t *bdaddr, u8 type)
3439 struct bdaddr_list *b;
3441 list_for_each_entry(b, bdaddr_list, list) {
3442 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3449 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3451 struct list_head *p, *n;
3453 list_for_each_safe(p, n, bdaddr_list) {
3454 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3461 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3463 struct bdaddr_list *entry;
3465 if (!bacmp(bdaddr, BDADDR_ANY))
3468 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3471 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3475 bacpy(&entry->bdaddr, bdaddr);
3476 entry->bdaddr_type = type;
3478 list_add(&entry->list, list);
3483 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3485 struct bdaddr_list *entry;
3487 if (!bacmp(bdaddr, BDADDR_ANY)) {
3488 hci_bdaddr_list_clear(list);
3492 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3496 list_del(&entry->list);
3502 /* This function requires the caller holds hdev->lock */
3503 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3504 bdaddr_t *addr, u8 addr_type)
3506 struct hci_conn_params *params;
3508 /* The conn params list only contains identity addresses */
3509 if (!hci_is_identity_address(addr, addr_type))
3512 list_for_each_entry(params, &hdev->le_conn_params, list) {
3513 if (bacmp(¶ms->addr, addr) == 0 &&
3514 params->addr_type == addr_type) {
3522 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3524 struct hci_conn *conn;
3526 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3530 if (conn->dst_type != type)
3533 if (conn->state != BT_CONNECTED)
3539 /* This function requires the caller holds hdev->lock */
3540 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3541 bdaddr_t *addr, u8 addr_type)
3543 struct hci_conn_params *param;
3545 /* The list only contains identity addresses */
3546 if (!hci_is_identity_address(addr, addr_type))
3549 list_for_each_entry(param, list, action) {
3550 if (bacmp(¶m->addr, addr) == 0 &&
3551 param->addr_type == addr_type)
3558 /* This function requires the caller holds hdev->lock */
3559 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3560 bdaddr_t *addr, u8 addr_type)
3562 struct hci_conn_params *params;
3564 if (!hci_is_identity_address(addr, addr_type))
3567 params = hci_conn_params_lookup(hdev, addr, addr_type);
3571 params = kzalloc(sizeof(*params), GFP_KERNEL);
3573 BT_ERR("Out of memory");
3577 bacpy(¶ms->addr, addr);
3578 params->addr_type = addr_type;
3580 list_add(¶ms->list, &hdev->le_conn_params);
3581 INIT_LIST_HEAD(¶ms->action);
3583 params->conn_min_interval = hdev->le_conn_min_interval;
3584 params->conn_max_interval = hdev->le_conn_max_interval;
3585 params->conn_latency = hdev->le_conn_latency;
3586 params->supervision_timeout = hdev->le_supv_timeout;
3587 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3589 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3594 /* This function requires the caller holds hdev->lock */
3595 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3598 struct hci_conn_params *params;
3600 params = hci_conn_params_add(hdev, addr, addr_type);
3604 if (params->auto_connect == auto_connect)
3607 list_del_init(¶ms->action);
3609 switch (auto_connect) {
3610 case HCI_AUTO_CONN_DISABLED:
3611 case HCI_AUTO_CONN_LINK_LOSS:
3612 hci_update_background_scan(hdev);
3614 case HCI_AUTO_CONN_REPORT:
3615 list_add(¶ms->action, &hdev->pend_le_reports);
3616 hci_update_background_scan(hdev);
3618 case HCI_AUTO_CONN_ALWAYS:
3619 if (!is_connected(hdev, addr, addr_type)) {
3620 list_add(¶ms->action, &hdev->pend_le_conns);
3621 hci_update_background_scan(hdev);
3626 params->auto_connect = auto_connect;
3628 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3634 /* This function requires the caller holds hdev->lock */
3635 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3637 struct hci_conn_params *params;
3639 params = hci_conn_params_lookup(hdev, addr, addr_type);
3643 list_del(¶ms->action);
3644 list_del(¶ms->list);
3647 hci_update_background_scan(hdev);
3649 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3652 /* This function requires the caller holds hdev->lock */
3653 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3655 struct hci_conn_params *params, *tmp;
3657 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3658 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3660 list_del(¶ms->list);
3664 BT_DBG("All LE disabled connection parameters were removed");
3667 /* This function requires the caller holds hdev->lock */
3668 void hci_conn_params_clear_all(struct hci_dev *hdev)
3670 struct hci_conn_params *params, *tmp;
3672 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3673 list_del(¶ms->action);
3674 list_del(¶ms->list);
3678 hci_update_background_scan(hdev);
3680 BT_DBG("All LE connection parameters were removed");
3683 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3686 BT_ERR("Failed to start inquiry: status %d", status);
3689 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3690 hci_dev_unlock(hdev);
3695 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3697 /* General inquiry access code (GIAC) */
3698 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3699 struct hci_request req;
3700 struct hci_cp_inquiry cp;
3704 BT_ERR("Failed to disable LE scanning: status %d", status);
3708 switch (hdev->discovery.type) {
3709 case DISCOV_TYPE_LE:
3711 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3712 hci_dev_unlock(hdev);
3715 case DISCOV_TYPE_INTERLEAVED:
3716 hci_req_init(&req, hdev);
3718 memset(&cp, 0, sizeof(cp));
3719 memcpy(&cp.lap, lap, sizeof(cp.lap));
3720 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3721 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3725 hci_inquiry_cache_flush(hdev);
3727 err = hci_req_run(&req, inquiry_complete);
3729 BT_ERR("Inquiry request failed: err %d", err);
3730 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3733 hci_dev_unlock(hdev);
3738 static void le_scan_disable_work(struct work_struct *work)
3740 struct hci_dev *hdev = container_of(work, struct hci_dev,
3741 le_scan_disable.work);
3742 struct hci_request req;
3745 BT_DBG("%s", hdev->name);
3747 hci_req_init(&req, hdev);
3749 hci_req_add_le_scan_disable(&req);
3751 err = hci_req_run(&req, le_scan_disable_work_complete);
3753 BT_ERR("Disable LE scanning request failed: err %d", err);
3756 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3758 struct hci_dev *hdev = req->hdev;
3760 /* If we're advertising or initiating an LE connection we can't
3761 * go ahead and change the random address at this time. This is
3762 * because the eventual initiator address used for the
3763 * subsequently created connection will be undefined (some
3764 * controllers use the new address and others the one we had
3765 * when the operation started).
3767 * In this kind of scenario skip the update and let the random
3768 * address be updated at the next cycle.
3770 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3771 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3772 BT_DBG("Deferring random address update");
3776 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3779 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3782 struct hci_dev *hdev = req->hdev;
3785 /* If privacy is enabled use a resolvable private address. If
3786 * current RPA has expired or there is something else than
3787 * the current RPA in use, then generate a new one.
3789 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3792 *own_addr_type = ADDR_LE_DEV_RANDOM;
3794 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3795 !bacmp(&hdev->random_addr, &hdev->rpa))
3798 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3800 BT_ERR("%s failed to generate new RPA", hdev->name);
3804 set_random_addr(req, &hdev->rpa);
3806 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3807 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3812 /* In case of required privacy without resolvable private address,
3813 * use an unresolvable private address. This is useful for active
3814 * scanning and non-connectable advertising.
3816 if (require_privacy) {
3819 get_random_bytes(&urpa, 6);
3820 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3822 *own_addr_type = ADDR_LE_DEV_RANDOM;
3823 set_random_addr(req, &urpa);
3827 /* If forcing static address is in use or there is no public
3828 * address use the static address as random address (but skip
3829 * the HCI command if the current random address is already the
3832 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3833 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3834 *own_addr_type = ADDR_LE_DEV_RANDOM;
3835 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3836 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3837 &hdev->static_addr);
3841 /* Neither privacy nor static address is being used so use a
3844 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3849 /* Copy the Identity Address of the controller.
3851 * If the controller has a public BD_ADDR, then by default use that one.
3852 * If this is a LE only controller without a public address, default to
3853 * the static random address.
3855 * For debugging purposes it is possible to force controllers with a
3856 * public address to use the static random address instead.
3858 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3861 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3862 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3863 bacpy(bdaddr, &hdev->static_addr);
3864 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3866 bacpy(bdaddr, &hdev->bdaddr);
3867 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3871 /* Alloc HCI device */
3872 struct hci_dev *hci_alloc_dev(void)
3874 struct hci_dev *hdev;
3876 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3880 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3881 hdev->esco_type = (ESCO_HV1);
3882 hdev->link_mode = (HCI_LM_ACCEPT);
3883 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3884 hdev->io_capability = 0x03; /* No Input No Output */
3885 hdev->manufacturer = 0xffff; /* Default to internal use */
3886 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3887 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3889 hdev->sniff_max_interval = 800;
3890 hdev->sniff_min_interval = 80;
3892 hdev->le_adv_channel_map = 0x07;
3893 hdev->le_scan_interval = 0x0060;
3894 hdev->le_scan_window = 0x0030;
3895 hdev->le_conn_min_interval = 0x0028;
3896 hdev->le_conn_max_interval = 0x0038;
3897 hdev->le_conn_latency = 0x0000;
3898 hdev->le_supv_timeout = 0x002a;
3900 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3901 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3902 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3903 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3905 mutex_init(&hdev->lock);
3906 mutex_init(&hdev->req_lock);
3908 INIT_LIST_HEAD(&hdev->mgmt_pending);
3909 INIT_LIST_HEAD(&hdev->blacklist);
3910 INIT_LIST_HEAD(&hdev->whitelist);
3911 INIT_LIST_HEAD(&hdev->uuids);
3912 INIT_LIST_HEAD(&hdev->link_keys);
3913 INIT_LIST_HEAD(&hdev->long_term_keys);
3914 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3915 INIT_LIST_HEAD(&hdev->remote_oob_data);
3916 INIT_LIST_HEAD(&hdev->le_white_list);
3917 INIT_LIST_HEAD(&hdev->le_conn_params);
3918 INIT_LIST_HEAD(&hdev->pend_le_conns);
3919 INIT_LIST_HEAD(&hdev->pend_le_reports);
3920 INIT_LIST_HEAD(&hdev->conn_hash.list);
3922 INIT_WORK(&hdev->rx_work, hci_rx_work);
3923 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3924 INIT_WORK(&hdev->tx_work, hci_tx_work);
3925 INIT_WORK(&hdev->power_on, hci_power_on);
3927 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3928 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3929 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3931 skb_queue_head_init(&hdev->rx_q);
3932 skb_queue_head_init(&hdev->cmd_q);
3933 skb_queue_head_init(&hdev->raw_q);
3935 init_waitqueue_head(&hdev->req_wait_q);
3937 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3939 hci_init_sysfs(hdev);
3940 discovery_init(hdev);
3944 EXPORT_SYMBOL(hci_alloc_dev);
3946 /* Free HCI device */
3947 void hci_free_dev(struct hci_dev *hdev)
3949 /* will free via device release */
3950 put_device(&hdev->dev);
3952 EXPORT_SYMBOL(hci_free_dev);
3954 /* Register HCI device */
3955 int hci_register_dev(struct hci_dev *hdev)
3959 if (!hdev->open || !hdev->close || !hdev->send)
3962 /* Do not allow HCI_AMP devices to register at index 0,
3963 * so the index can be used as the AMP controller ID.
3965 switch (hdev->dev_type) {
3967 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3970 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3979 sprintf(hdev->name, "hci%d", id);
3982 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3984 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3985 WQ_MEM_RECLAIM, 1, hdev->name);
3986 if (!hdev->workqueue) {
3991 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3992 WQ_MEM_RECLAIM, 1, hdev->name);
3993 if (!hdev->req_workqueue) {
3994 destroy_workqueue(hdev->workqueue);
3999 if (!IS_ERR_OR_NULL(bt_debugfs))
4000 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4002 dev_set_name(&hdev->dev, "%s", hdev->name);
4004 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
4006 if (IS_ERR(hdev->tfm_aes)) {
4007 BT_ERR("Unable to create crypto context");
4008 error = PTR_ERR(hdev->tfm_aes);
4009 hdev->tfm_aes = NULL;
4013 error = device_add(&hdev->dev);
4017 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4018 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4021 if (rfkill_register(hdev->rfkill) < 0) {
4022 rfkill_destroy(hdev->rfkill);
4023 hdev->rfkill = NULL;
4027 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4028 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4030 set_bit(HCI_SETUP, &hdev->dev_flags);
4031 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4033 if (hdev->dev_type == HCI_BREDR) {
4034 /* Assume BR/EDR support until proven otherwise (such as
4035 * through reading supported features during init.
4037 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4040 write_lock(&hci_dev_list_lock);
4041 list_add(&hdev->list, &hci_dev_list);
4042 write_unlock(&hci_dev_list_lock);
4044 /* Devices that are marked for raw-only usage are unconfigured
4045 * and should not be included in normal operation.
4047 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4048 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4050 hci_notify(hdev, HCI_DEV_REG);
4053 queue_work(hdev->req_workqueue, &hdev->power_on);
4058 crypto_free_blkcipher(hdev->tfm_aes);
4060 destroy_workqueue(hdev->workqueue);
4061 destroy_workqueue(hdev->req_workqueue);
4063 ida_simple_remove(&hci_index_ida, hdev->id);
4067 EXPORT_SYMBOL(hci_register_dev);
4069 /* Unregister HCI device */
4070 void hci_unregister_dev(struct hci_dev *hdev)
4074 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4076 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4080 write_lock(&hci_dev_list_lock);
4081 list_del(&hdev->list);
4082 write_unlock(&hci_dev_list_lock);
4084 hci_dev_do_close(hdev);
4086 for (i = 0; i < NUM_REASSEMBLY; i++)
4087 kfree_skb(hdev->reassembly[i]);
4089 cancel_work_sync(&hdev->power_on);
4091 if (!test_bit(HCI_INIT, &hdev->flags) &&
4092 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4093 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4095 mgmt_index_removed(hdev);
4096 hci_dev_unlock(hdev);
4099 /* mgmt_index_removed should take care of emptying the
4101 BUG_ON(!list_empty(&hdev->mgmt_pending));
4103 hci_notify(hdev, HCI_DEV_UNREG);
4106 rfkill_unregister(hdev->rfkill);
4107 rfkill_destroy(hdev->rfkill);
4111 crypto_free_blkcipher(hdev->tfm_aes);
4113 device_del(&hdev->dev);
4115 debugfs_remove_recursive(hdev->debugfs);
4117 destroy_workqueue(hdev->workqueue);
4118 destroy_workqueue(hdev->req_workqueue);
4121 hci_bdaddr_list_clear(&hdev->blacklist);
4122 hci_bdaddr_list_clear(&hdev->whitelist);
4123 hci_uuids_clear(hdev);
4124 hci_link_keys_clear(hdev);
4125 hci_smp_ltks_clear(hdev);
4126 hci_smp_irks_clear(hdev);
4127 hci_remote_oob_data_clear(hdev);
4128 hci_bdaddr_list_clear(&hdev->le_white_list);
4129 hci_conn_params_clear_all(hdev);
4130 hci_dev_unlock(hdev);
4134 ida_simple_remove(&hci_index_ida, id);
4136 EXPORT_SYMBOL(hci_unregister_dev);
4138 /* Suspend HCI device */
4139 int hci_suspend_dev(struct hci_dev *hdev)
4141 hci_notify(hdev, HCI_DEV_SUSPEND);
4144 EXPORT_SYMBOL(hci_suspend_dev);
4146 /* Resume HCI device */
4147 int hci_resume_dev(struct hci_dev *hdev)
4149 hci_notify(hdev, HCI_DEV_RESUME);
4152 EXPORT_SYMBOL(hci_resume_dev);
4154 /* Receive frame from HCI drivers */
4155 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4157 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4158 && !test_bit(HCI_INIT, &hdev->flags))) {
4164 bt_cb(skb)->incoming = 1;
4167 __net_timestamp(skb);
4169 skb_queue_tail(&hdev->rx_q, skb);
4170 queue_work(hdev->workqueue, &hdev->rx_work);
4174 EXPORT_SYMBOL(hci_recv_frame);
4176 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4177 int count, __u8 index)
4182 struct sk_buff *skb;
4183 struct bt_skb_cb *scb;
4185 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4186 index >= NUM_REASSEMBLY)
4189 skb = hdev->reassembly[index];
4193 case HCI_ACLDATA_PKT:
4194 len = HCI_MAX_FRAME_SIZE;
4195 hlen = HCI_ACL_HDR_SIZE;
4198 len = HCI_MAX_EVENT_SIZE;
4199 hlen = HCI_EVENT_HDR_SIZE;
4201 case HCI_SCODATA_PKT:
4202 len = HCI_MAX_SCO_SIZE;
4203 hlen = HCI_SCO_HDR_SIZE;
4207 skb = bt_skb_alloc(len, GFP_ATOMIC);
4211 scb = (void *) skb->cb;
4213 scb->pkt_type = type;
4215 hdev->reassembly[index] = skb;
4219 scb = (void *) skb->cb;
4220 len = min_t(uint, scb->expect, count);
4222 memcpy(skb_put(skb, len), data, len);
4231 if (skb->len == HCI_EVENT_HDR_SIZE) {
4232 struct hci_event_hdr *h = hci_event_hdr(skb);
4233 scb->expect = h->plen;
4235 if (skb_tailroom(skb) < scb->expect) {
4237 hdev->reassembly[index] = NULL;
4243 case HCI_ACLDATA_PKT:
4244 if (skb->len == HCI_ACL_HDR_SIZE) {
4245 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4246 scb->expect = __le16_to_cpu(h->dlen);
4248 if (skb_tailroom(skb) < scb->expect) {
4250 hdev->reassembly[index] = NULL;
4256 case HCI_SCODATA_PKT:
4257 if (skb->len == HCI_SCO_HDR_SIZE) {
4258 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4259 scb->expect = h->dlen;
4261 if (skb_tailroom(skb) < scb->expect) {
4263 hdev->reassembly[index] = NULL;
4270 if (scb->expect == 0) {
4271 /* Complete frame */
4273 bt_cb(skb)->pkt_type = type;
4274 hci_recv_frame(hdev, skb);
4276 hdev->reassembly[index] = NULL;
4284 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4288 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4292 rem = hci_reassembly(hdev, type, data, count, type - 1);
4296 data += (count - rem);
4302 EXPORT_SYMBOL(hci_recv_fragment);
4304 #define STREAM_REASSEMBLY 0
4306 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4312 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4315 struct { char type; } *pkt;
4317 /* Start of the frame */
4324 type = bt_cb(skb)->pkt_type;
4326 rem = hci_reassembly(hdev, type, data, count,
4331 data += (count - rem);
4337 EXPORT_SYMBOL(hci_recv_stream_fragment);
4339 /* ---- Interface to upper protocols ---- */
4341 int hci_register_cb(struct hci_cb *cb)
4343 BT_DBG("%p name %s", cb, cb->name);
4345 write_lock(&hci_cb_list_lock);
4346 list_add(&cb->list, &hci_cb_list);
4347 write_unlock(&hci_cb_list_lock);
4351 EXPORT_SYMBOL(hci_register_cb);
4353 int hci_unregister_cb(struct hci_cb *cb)
4355 BT_DBG("%p name %s", cb, cb->name);
4357 write_lock(&hci_cb_list_lock);
4358 list_del(&cb->list);
4359 write_unlock(&hci_cb_list_lock);
4363 EXPORT_SYMBOL(hci_unregister_cb);
4365 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4369 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4372 __net_timestamp(skb);
4374 /* Send copy to monitor */
4375 hci_send_to_monitor(hdev, skb);
4377 if (atomic_read(&hdev->promisc)) {
4378 /* Send copy to the sockets */
4379 hci_send_to_sock(hdev, skb);
4382 /* Get rid of skb owner, prior to sending to the driver. */
4385 err = hdev->send(hdev, skb);
4387 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4392 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4394 skb_queue_head_init(&req->cmd_q);
4399 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4401 struct hci_dev *hdev = req->hdev;
4402 struct sk_buff *skb;
4403 unsigned long flags;
4405 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4407 /* If an error occured during request building, remove all HCI
4408 * commands queued on the HCI request queue.
4411 skb_queue_purge(&req->cmd_q);
4415 /* Do not allow empty requests */
4416 if (skb_queue_empty(&req->cmd_q))
4419 skb = skb_peek_tail(&req->cmd_q);
4420 bt_cb(skb)->req.complete = complete;
4422 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4423 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4424 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4426 queue_work(hdev->workqueue, &hdev->cmd_work);
4431 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4432 u32 plen, const void *param)
4434 int len = HCI_COMMAND_HDR_SIZE + plen;
4435 struct hci_command_hdr *hdr;
4436 struct sk_buff *skb;
4438 skb = bt_skb_alloc(len, GFP_ATOMIC);
4442 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4443 hdr->opcode = cpu_to_le16(opcode);
4447 memcpy(skb_put(skb, plen), param, plen);
4449 BT_DBG("skb len %d", skb->len);
4451 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4456 /* Send HCI command */
4457 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4460 struct sk_buff *skb;
4462 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4464 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4466 BT_ERR("%s no memory for command", hdev->name);
4470 /* Stand-alone HCI commands must be flaged as
4471 * single-command requests.
4473 bt_cb(skb)->req.start = true;
4475 skb_queue_tail(&hdev->cmd_q, skb);
4476 queue_work(hdev->workqueue, &hdev->cmd_work);
4481 /* Queue a command to an asynchronous HCI request */
4482 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4483 const void *param, u8 event)
4485 struct hci_dev *hdev = req->hdev;
4486 struct sk_buff *skb;
4488 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4490 /* If an error occured during request building, there is no point in
4491 * queueing the HCI command. We can simply return.
4496 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4498 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4499 hdev->name, opcode);
4504 if (skb_queue_empty(&req->cmd_q))
4505 bt_cb(skb)->req.start = true;
4507 bt_cb(skb)->req.event = event;
4509 skb_queue_tail(&req->cmd_q, skb);
4512 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4515 hci_req_add_ev(req, opcode, plen, param, 0);
4518 /* Get data from the previously sent command */
4519 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4521 struct hci_command_hdr *hdr;
4523 if (!hdev->sent_cmd)
4526 hdr = (void *) hdev->sent_cmd->data;
4528 if (hdr->opcode != cpu_to_le16(opcode))
4531 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4533 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4537 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4539 struct hci_acl_hdr *hdr;
4542 skb_push(skb, HCI_ACL_HDR_SIZE);
4543 skb_reset_transport_header(skb);
4544 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4545 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4546 hdr->dlen = cpu_to_le16(len);
4549 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4550 struct sk_buff *skb, __u16 flags)
4552 struct hci_conn *conn = chan->conn;
4553 struct hci_dev *hdev = conn->hdev;
4554 struct sk_buff *list;
4556 skb->len = skb_headlen(skb);
4559 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4561 switch (hdev->dev_type) {
4563 hci_add_acl_hdr(skb, conn->handle, flags);
4566 hci_add_acl_hdr(skb, chan->handle, flags);
4569 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4573 list = skb_shinfo(skb)->frag_list;
4575 /* Non fragmented */
4576 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4578 skb_queue_tail(queue, skb);
4581 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4583 skb_shinfo(skb)->frag_list = NULL;
4585 /* Queue all fragments atomically */
4586 spin_lock(&queue->lock);
4588 __skb_queue_tail(queue, skb);
4590 flags &= ~ACL_START;
4593 skb = list; list = list->next;
4595 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4596 hci_add_acl_hdr(skb, conn->handle, flags);
4598 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4600 __skb_queue_tail(queue, skb);
4603 spin_unlock(&queue->lock);
4607 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4609 struct hci_dev *hdev = chan->conn->hdev;
4611 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4613 hci_queue_acl(chan, &chan->data_q, skb, flags);
4615 queue_work(hdev->workqueue, &hdev->tx_work);
4619 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4621 struct hci_dev *hdev = conn->hdev;
4622 struct hci_sco_hdr hdr;
4624 BT_DBG("%s len %d", hdev->name, skb->len);
4626 hdr.handle = cpu_to_le16(conn->handle);
4627 hdr.dlen = skb->len;
4629 skb_push(skb, HCI_SCO_HDR_SIZE);
4630 skb_reset_transport_header(skb);
4631 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4633 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4635 skb_queue_tail(&conn->data_q, skb);
4636 queue_work(hdev->workqueue, &hdev->tx_work);
4639 /* ---- HCI TX task (outgoing data) ---- */
4641 /* HCI Connection scheduler */
4642 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4645 struct hci_conn_hash *h = &hdev->conn_hash;
4646 struct hci_conn *conn = NULL, *c;
4647 unsigned int num = 0, min = ~0;
4649 /* We don't have to lock device here. Connections are always
4650 * added and removed with TX task disabled. */
4654 list_for_each_entry_rcu(c, &h->list, list) {
4655 if (c->type != type || skb_queue_empty(&c->data_q))
4658 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4663 if (c->sent < min) {
4668 if (hci_conn_num(hdev, type) == num)
4677 switch (conn->type) {
4679 cnt = hdev->acl_cnt;
4683 cnt = hdev->sco_cnt;
4686 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4690 BT_ERR("Unknown link type");
4698 BT_DBG("conn %p quote %d", conn, *quote);
4702 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4704 struct hci_conn_hash *h = &hdev->conn_hash;
4707 BT_ERR("%s link tx timeout", hdev->name);
4711 /* Kill stalled connections */
4712 list_for_each_entry_rcu(c, &h->list, list) {
4713 if (c->type == type && c->sent) {
4714 BT_ERR("%s killing stalled connection %pMR",
4715 hdev->name, &c->dst);
4716 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4723 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4726 struct hci_conn_hash *h = &hdev->conn_hash;
4727 struct hci_chan *chan = NULL;
4728 unsigned int num = 0, min = ~0, cur_prio = 0;
4729 struct hci_conn *conn;
4730 int cnt, q, conn_num = 0;
4732 BT_DBG("%s", hdev->name);
4736 list_for_each_entry_rcu(conn, &h->list, list) {
4737 struct hci_chan *tmp;
4739 if (conn->type != type)
4742 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4747 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4748 struct sk_buff *skb;
4750 if (skb_queue_empty(&tmp->data_q))
4753 skb = skb_peek(&tmp->data_q);
4754 if (skb->priority < cur_prio)
4757 if (skb->priority > cur_prio) {
4760 cur_prio = skb->priority;
4765 if (conn->sent < min) {
4771 if (hci_conn_num(hdev, type) == conn_num)
4780 switch (chan->conn->type) {
4782 cnt = hdev->acl_cnt;
4785 cnt = hdev->block_cnt;
4789 cnt = hdev->sco_cnt;
4792 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4796 BT_ERR("Unknown link type");
4801 BT_DBG("chan %p quote %d", chan, *quote);
4805 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4807 struct hci_conn_hash *h = &hdev->conn_hash;
4808 struct hci_conn *conn;
4811 BT_DBG("%s", hdev->name);
4815 list_for_each_entry_rcu(conn, &h->list, list) {
4816 struct hci_chan *chan;
4818 if (conn->type != type)
4821 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4826 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4827 struct sk_buff *skb;
4834 if (skb_queue_empty(&chan->data_q))
4837 skb = skb_peek(&chan->data_q);
4838 if (skb->priority >= HCI_PRIO_MAX - 1)
4841 skb->priority = HCI_PRIO_MAX - 1;
4843 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4847 if (hci_conn_num(hdev, type) == num)
4855 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4857 /* Calculate count of blocks used by this packet */
4858 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4861 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4863 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4864 /* ACL tx timeout must be longer than maximum
4865 * link supervision timeout (40.9 seconds) */
4866 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4867 HCI_ACL_TX_TIMEOUT))
4868 hci_link_tx_to(hdev, ACL_LINK);
4872 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4874 unsigned int cnt = hdev->acl_cnt;
4875 struct hci_chan *chan;
4876 struct sk_buff *skb;
4879 __check_timeout(hdev, cnt);
4881 while (hdev->acl_cnt &&
4882 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4883 u32 priority = (skb_peek(&chan->data_q))->priority;
4884 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4885 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4886 skb->len, skb->priority);
4888 /* Stop if priority has changed */
4889 if (skb->priority < priority)
4892 skb = skb_dequeue(&chan->data_q);
4894 hci_conn_enter_active_mode(chan->conn,
4895 bt_cb(skb)->force_active);
4897 hci_send_frame(hdev, skb);
4898 hdev->acl_last_tx = jiffies;
4906 if (cnt != hdev->acl_cnt)
4907 hci_prio_recalculate(hdev, ACL_LINK);
4910 static void hci_sched_acl_blk(struct hci_dev *hdev)
4912 unsigned int cnt = hdev->block_cnt;
4913 struct hci_chan *chan;
4914 struct sk_buff *skb;
4918 __check_timeout(hdev, cnt);
4920 BT_DBG("%s", hdev->name);
4922 if (hdev->dev_type == HCI_AMP)
4927 while (hdev->block_cnt > 0 &&
4928 (chan = hci_chan_sent(hdev, type, "e))) {
4929 u32 priority = (skb_peek(&chan->data_q))->priority;
4930 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4933 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4934 skb->len, skb->priority);
4936 /* Stop if priority has changed */
4937 if (skb->priority < priority)
4940 skb = skb_dequeue(&chan->data_q);
4942 blocks = __get_blocks(hdev, skb);
4943 if (blocks > hdev->block_cnt)
4946 hci_conn_enter_active_mode(chan->conn,
4947 bt_cb(skb)->force_active);
4949 hci_send_frame(hdev, skb);
4950 hdev->acl_last_tx = jiffies;
4952 hdev->block_cnt -= blocks;
4955 chan->sent += blocks;
4956 chan->conn->sent += blocks;
4960 if (cnt != hdev->block_cnt)
4961 hci_prio_recalculate(hdev, type);
4964 static void hci_sched_acl(struct hci_dev *hdev)
4966 BT_DBG("%s", hdev->name);
4968 /* No ACL link over BR/EDR controller */
4969 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4972 /* No AMP link over AMP controller */
4973 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4976 switch (hdev->flow_ctl_mode) {
4977 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4978 hci_sched_acl_pkt(hdev);
4981 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4982 hci_sched_acl_blk(hdev);
4988 static void hci_sched_sco(struct hci_dev *hdev)
4990 struct hci_conn *conn;
4991 struct sk_buff *skb;
4994 BT_DBG("%s", hdev->name);
4996 if (!hci_conn_num(hdev, SCO_LINK))
4999 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
5000 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5001 BT_DBG("skb %p len %d", skb, skb->len);
5002 hci_send_frame(hdev, skb);
5005 if (conn->sent == ~0)
5011 static void hci_sched_esco(struct hci_dev *hdev)
5013 struct hci_conn *conn;
5014 struct sk_buff *skb;
5017 BT_DBG("%s", hdev->name);
5019 if (!hci_conn_num(hdev, ESCO_LINK))
5022 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5024 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5025 BT_DBG("skb %p len %d", skb, skb->len);
5026 hci_send_frame(hdev, skb);
5029 if (conn->sent == ~0)
5035 static void hci_sched_le(struct hci_dev *hdev)
5037 struct hci_chan *chan;
5038 struct sk_buff *skb;
5039 int quote, cnt, tmp;
5041 BT_DBG("%s", hdev->name);
5043 if (!hci_conn_num(hdev, LE_LINK))
5046 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5047 /* LE tx timeout must be longer than maximum
5048 * link supervision timeout (40.9 seconds) */
5049 if (!hdev->le_cnt && hdev->le_pkts &&
5050 time_after(jiffies, hdev->le_last_tx + HZ * 45))
5051 hci_link_tx_to(hdev, LE_LINK);
5054 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5056 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
5057 u32 priority = (skb_peek(&chan->data_q))->priority;
5058 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5059 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5060 skb->len, skb->priority);
5062 /* Stop if priority has changed */
5063 if (skb->priority < priority)
5066 skb = skb_dequeue(&chan->data_q);
5068 hci_send_frame(hdev, skb);
5069 hdev->le_last_tx = jiffies;
5080 hdev->acl_cnt = cnt;
5083 hci_prio_recalculate(hdev, LE_LINK);
5086 static void hci_tx_work(struct work_struct *work)
5088 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5089 struct sk_buff *skb;
5091 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5092 hdev->sco_cnt, hdev->le_cnt);
5094 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5095 /* Schedule queues and send stuff to HCI driver */
5096 hci_sched_acl(hdev);
5097 hci_sched_sco(hdev);
5098 hci_sched_esco(hdev);
5102 /* Send next queued raw (unknown type) packet */
5103 while ((skb = skb_dequeue(&hdev->raw_q)))
5104 hci_send_frame(hdev, skb);
5107 /* ----- HCI RX task (incoming data processing) ----- */
5109 /* ACL data packet */
5110 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5112 struct hci_acl_hdr *hdr = (void *) skb->data;
5113 struct hci_conn *conn;
5114 __u16 handle, flags;
5116 skb_pull(skb, HCI_ACL_HDR_SIZE);
5118 handle = __le16_to_cpu(hdr->handle);
5119 flags = hci_flags(handle);
5120 handle = hci_handle(handle);
5122 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5125 hdev->stat.acl_rx++;
5128 conn = hci_conn_hash_lookup_handle(hdev, handle);
5129 hci_dev_unlock(hdev);
5132 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5134 /* Send to upper protocol */
5135 l2cap_recv_acldata(conn, skb, flags);
5138 BT_ERR("%s ACL packet for unknown connection handle %d",
5139 hdev->name, handle);
5145 /* SCO data packet */
5146 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5148 struct hci_sco_hdr *hdr = (void *) skb->data;
5149 struct hci_conn *conn;
5152 skb_pull(skb, HCI_SCO_HDR_SIZE);
5154 handle = __le16_to_cpu(hdr->handle);
5156 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5158 hdev->stat.sco_rx++;
5161 conn = hci_conn_hash_lookup_handle(hdev, handle);
5162 hci_dev_unlock(hdev);
5165 /* Send to upper protocol */
5166 sco_recv_scodata(conn, skb);
5169 BT_ERR("%s SCO packet for unknown connection handle %d",
5170 hdev->name, handle);
5176 static bool hci_req_is_complete(struct hci_dev *hdev)
5178 struct sk_buff *skb;
5180 skb = skb_peek(&hdev->cmd_q);
5184 return bt_cb(skb)->req.start;
5187 static void hci_resend_last(struct hci_dev *hdev)
5189 struct hci_command_hdr *sent;
5190 struct sk_buff *skb;
5193 if (!hdev->sent_cmd)
5196 sent = (void *) hdev->sent_cmd->data;
5197 opcode = __le16_to_cpu(sent->opcode);
5198 if (opcode == HCI_OP_RESET)
5201 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5205 skb_queue_head(&hdev->cmd_q, skb);
5206 queue_work(hdev->workqueue, &hdev->cmd_work);
5209 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5211 hci_req_complete_t req_complete = NULL;
5212 struct sk_buff *skb;
5213 unsigned long flags;
5215 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5217 /* If the completed command doesn't match the last one that was
5218 * sent we need to do special handling of it.
5220 if (!hci_sent_cmd_data(hdev, opcode)) {
5221 /* Some CSR based controllers generate a spontaneous
5222 * reset complete event during init and any pending
5223 * command will never be completed. In such a case we
5224 * need to resend whatever was the last sent
5227 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5228 hci_resend_last(hdev);
5233 /* If the command succeeded and there's still more commands in
5234 * this request the request is not yet complete.
5236 if (!status && !hci_req_is_complete(hdev))
5239 /* If this was the last command in a request the complete
5240 * callback would be found in hdev->sent_cmd instead of the
5241 * command queue (hdev->cmd_q).
5243 if (hdev->sent_cmd) {
5244 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5247 /* We must set the complete callback to NULL to
5248 * avoid calling the callback more than once if
5249 * this function gets called again.
5251 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5257 /* Remove all pending commands belonging to this request */
5258 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5259 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5260 if (bt_cb(skb)->req.start) {
5261 __skb_queue_head(&hdev->cmd_q, skb);
5265 req_complete = bt_cb(skb)->req.complete;
5268 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5272 req_complete(hdev, status);
5275 static void hci_rx_work(struct work_struct *work)
5277 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5278 struct sk_buff *skb;
5280 BT_DBG("%s", hdev->name);
5282 while ((skb = skb_dequeue(&hdev->rx_q))) {
5283 /* Send copy to monitor */
5284 hci_send_to_monitor(hdev, skb);
5286 if (atomic_read(&hdev->promisc)) {
5287 /* Send copy to the sockets */
5288 hci_send_to_sock(hdev, skb);
5291 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5296 if (test_bit(HCI_INIT, &hdev->flags)) {
5297 /* Don't process data packets in this states. */
5298 switch (bt_cb(skb)->pkt_type) {
5299 case HCI_ACLDATA_PKT:
5300 case HCI_SCODATA_PKT:
5307 switch (bt_cb(skb)->pkt_type) {
5309 BT_DBG("%s Event packet", hdev->name);
5310 hci_event_packet(hdev, skb);
5313 case HCI_ACLDATA_PKT:
5314 BT_DBG("%s ACL data packet", hdev->name);
5315 hci_acldata_packet(hdev, skb);
5318 case HCI_SCODATA_PKT:
5319 BT_DBG("%s SCO data packet", hdev->name);
5320 hci_scodata_packet(hdev, skb);
5330 static void hci_cmd_work(struct work_struct *work)
5332 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5333 struct sk_buff *skb;
5335 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5336 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5338 /* Send queued commands */
5339 if (atomic_read(&hdev->cmd_cnt)) {
5340 skb = skb_dequeue(&hdev->cmd_q);
5344 kfree_skb(hdev->sent_cmd);
5346 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5347 if (hdev->sent_cmd) {
5348 atomic_dec(&hdev->cmd_cnt);
5349 hci_send_frame(hdev, skb);
5350 if (test_bit(HCI_RESET, &hdev->flags))
5351 cancel_delayed_work(&hdev->cmd_timer);
5353 schedule_delayed_work(&hdev->cmd_timer,
5356 skb_queue_head(&hdev->cmd_q, skb);
5357 queue_work(hdev->workqueue, &hdev->cmd_work);
5362 void hci_req_add_le_scan_disable(struct hci_request *req)
5364 struct hci_cp_le_set_scan_enable cp;
5366 memset(&cp, 0, sizeof(cp));
5367 cp.enable = LE_SCAN_DISABLE;
5368 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5371 void hci_req_add_le_passive_scan(struct hci_request *req)
5373 struct hci_cp_le_set_scan_param param_cp;
5374 struct hci_cp_le_set_scan_enable enable_cp;
5375 struct hci_dev *hdev = req->hdev;
5378 /* Set require_privacy to false since no SCAN_REQ are send
5379 * during passive scanning. Not using an unresolvable address
5380 * here is important so that peer devices using direct
5381 * advertising with our address will be correctly reported
5382 * by the controller.
5384 if (hci_update_random_address(req, false, &own_addr_type))
5387 memset(¶m_cp, 0, sizeof(param_cp));
5388 param_cp.type = LE_SCAN_PASSIVE;
5389 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5390 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5391 param_cp.own_address_type = own_addr_type;
5392 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5395 memset(&enable_cp, 0, sizeof(enable_cp));
5396 enable_cp.enable = LE_SCAN_ENABLE;
5397 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5398 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5402 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5405 BT_DBG("HCI request failed to update background scanning: "
5406 "status 0x%2.2x", status);
5409 /* This function controls the background scanning based on hdev->pend_le_conns
5410 * list. If there are pending LE connection we start the background scanning,
5411 * otherwise we stop it.
5413 * This function requires the caller holds hdev->lock.
5415 void hci_update_background_scan(struct hci_dev *hdev)
5417 struct hci_request req;
5418 struct hci_conn *conn;
5421 if (!test_bit(HCI_UP, &hdev->flags) ||
5422 test_bit(HCI_INIT, &hdev->flags) ||
5423 test_bit(HCI_SETUP, &hdev->dev_flags) ||
5424 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5425 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5426 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5429 /* No point in doing scanning if LE support hasn't been enabled */
5430 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5433 /* If discovery is active don't interfere with it */
5434 if (hdev->discovery.state != DISCOVERY_STOPPED)
5437 hci_req_init(&req, hdev);
5439 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
5440 list_empty(&hdev->pend_le_conns) &&
5441 list_empty(&hdev->pend_le_reports)) {
5442 /* If there is no pending LE connections or devices
5443 * to be scanned for, we should stop the background
5447 /* If controller is not scanning we are done. */
5448 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5451 hci_req_add_le_scan_disable(&req);
5453 BT_DBG("%s stopping background scanning", hdev->name);
5455 /* If there is at least one pending LE connection, we should
5456 * keep the background scan running.
5459 /* If controller is connecting, we should not start scanning
5460 * since some controllers are not able to scan and connect at
5463 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5467 /* If controller is currently scanning, we stop it to ensure we
5468 * don't miss any advertising (due to duplicates filter).
5470 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5471 hci_req_add_le_scan_disable(&req);
5473 hci_req_add_le_passive_scan(&req);
5475 BT_DBG("%s starting background scanning", hdev->name);
5478 err = hci_req_run(&req, update_background_scan_complete);
5480 BT_ERR("Failed to run HCI request: err %d", err);