2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
57 /* ---- HCI notifications ---- */
59 static void hci_notify(struct hci_dev *hdev, int event)
61 hci_sock_dev_event(hdev, event);
64 /* ---- HCI debugfs entries ---- */
66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
69 struct hci_dev *hdev = file->private_data;
72 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
81 struct hci_dev *hdev = file->private_data;
84 size_t buf_size = min(count, (sizeof(buf)-1));
88 if (!test_bit(HCI_UP, &hdev->flags))
91 if (copy_from_user(buf, user_buf, buf_size))
95 if (strtobool(buf, &enable))
98 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
103 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
106 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
108 hci_req_unlock(hdev);
113 err = -bt_to_errno(skb->data[0]);
119 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
124 static const struct file_operations dut_mode_fops = {
126 .read = dut_mode_read,
127 .write = dut_mode_write,
128 .llseek = default_llseek,
131 static int features_show(struct seq_file *f, void *ptr)
133 struct hci_dev *hdev = f->private;
137 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
138 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
139 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140 hdev->features[p][0], hdev->features[p][1],
141 hdev->features[p][2], hdev->features[p][3],
142 hdev->features[p][4], hdev->features[p][5],
143 hdev->features[p][6], hdev->features[p][7]);
145 if (lmp_le_capable(hdev))
146 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148 hdev->le_features[0], hdev->le_features[1],
149 hdev->le_features[2], hdev->le_features[3],
150 hdev->le_features[4], hdev->le_features[5],
151 hdev->le_features[6], hdev->le_features[7]);
152 hci_dev_unlock(hdev);
157 static int features_open(struct inode *inode, struct file *file)
159 return single_open(file, features_show, inode->i_private);
162 static const struct file_operations features_fops = {
163 .open = features_open,
166 .release = single_release,
169 static int blacklist_show(struct seq_file *f, void *p)
171 struct hci_dev *hdev = f->private;
172 struct bdaddr_list *b;
175 list_for_each_entry(b, &hdev->blacklist, list)
176 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
177 hci_dev_unlock(hdev);
182 static int blacklist_open(struct inode *inode, struct file *file)
184 return single_open(file, blacklist_show, inode->i_private);
187 static const struct file_operations blacklist_fops = {
188 .open = blacklist_open,
191 .release = single_release,
194 static int whitelist_show(struct seq_file *f, void *p)
196 struct hci_dev *hdev = f->private;
197 struct bdaddr_list *b;
200 list_for_each_entry(b, &hdev->whitelist, list)
201 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
202 hci_dev_unlock(hdev);
207 static int whitelist_open(struct inode *inode, struct file *file)
209 return single_open(file, whitelist_show, inode->i_private);
212 static const struct file_operations whitelist_fops = {
213 .open = whitelist_open,
216 .release = single_release,
219 static int uuids_show(struct seq_file *f, void *p)
221 struct hci_dev *hdev = f->private;
222 struct bt_uuid *uuid;
225 list_for_each_entry(uuid, &hdev->uuids, list) {
228 /* The Bluetooth UUID values are stored in big endian,
229 * but with reversed byte order. So convert them into
230 * the right order for the %pUb modifier.
232 for (i = 0; i < 16; i++)
233 val[i] = uuid->uuid[15 - i];
235 seq_printf(f, "%pUb\n", val);
237 hci_dev_unlock(hdev);
242 static int uuids_open(struct inode *inode, struct file *file)
244 return single_open(file, uuids_show, inode->i_private);
247 static const struct file_operations uuids_fops = {
251 .release = single_release,
254 static int inquiry_cache_show(struct seq_file *f, void *p)
256 struct hci_dev *hdev = f->private;
257 struct discovery_state *cache = &hdev->discovery;
258 struct inquiry_entry *e;
262 list_for_each_entry(e, &cache->all, all) {
263 struct inquiry_data *data = &e->data;
264 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
266 data->pscan_rep_mode, data->pscan_period_mode,
267 data->pscan_mode, data->dev_class[2],
268 data->dev_class[1], data->dev_class[0],
269 __le16_to_cpu(data->clock_offset),
270 data->rssi, data->ssp_mode, e->timestamp);
273 hci_dev_unlock(hdev);
278 static int inquiry_cache_open(struct inode *inode, struct file *file)
280 return single_open(file, inquiry_cache_show, inode->i_private);
283 static const struct file_operations inquiry_cache_fops = {
284 .open = inquiry_cache_open,
287 .release = single_release,
290 static int link_keys_show(struct seq_file *f, void *ptr)
292 struct hci_dev *hdev = f->private;
293 struct list_head *p, *n;
296 list_for_each_safe(p, n, &hdev->link_keys) {
297 struct link_key *key = list_entry(p, struct link_key, list);
298 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
299 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
301 hci_dev_unlock(hdev);
306 static int link_keys_open(struct inode *inode, struct file *file)
308 return single_open(file, link_keys_show, inode->i_private);
311 static const struct file_operations link_keys_fops = {
312 .open = link_keys_open,
315 .release = single_release,
318 static int dev_class_show(struct seq_file *f, void *ptr)
320 struct hci_dev *hdev = f->private;
323 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
324 hdev->dev_class[1], hdev->dev_class[0]);
325 hci_dev_unlock(hdev);
330 static int dev_class_open(struct inode *inode, struct file *file)
332 return single_open(file, dev_class_show, inode->i_private);
335 static const struct file_operations dev_class_fops = {
336 .open = dev_class_open,
339 .release = single_release,
342 static int voice_setting_get(void *data, u64 *val)
344 struct hci_dev *hdev = data;
347 *val = hdev->voice_setting;
348 hci_dev_unlock(hdev);
353 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
354 NULL, "0x%4.4llx\n");
356 static int auto_accept_delay_set(void *data, u64 val)
358 struct hci_dev *hdev = data;
361 hdev->auto_accept_delay = val;
362 hci_dev_unlock(hdev);
367 static int auto_accept_delay_get(void *data, u64 *val)
369 struct hci_dev *hdev = data;
372 *val = hdev->auto_accept_delay;
373 hci_dev_unlock(hdev);
378 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
379 auto_accept_delay_set, "%llu\n");
381 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
382 size_t count, loff_t *ppos)
384 struct hci_dev *hdev = file->private_data;
387 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
390 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
393 static ssize_t force_sc_support_write(struct file *file,
394 const char __user *user_buf,
395 size_t count, loff_t *ppos)
397 struct hci_dev *hdev = file->private_data;
399 size_t buf_size = min(count, (sizeof(buf)-1));
402 if (test_bit(HCI_UP, &hdev->flags))
405 if (copy_from_user(buf, user_buf, buf_size))
408 buf[buf_size] = '\0';
409 if (strtobool(buf, &enable))
412 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
415 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
420 static const struct file_operations force_sc_support_fops = {
422 .read = force_sc_support_read,
423 .write = force_sc_support_write,
424 .llseek = default_llseek,
427 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
428 size_t count, loff_t *ppos)
430 struct hci_dev *hdev = file->private_data;
433 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
436 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
439 static const struct file_operations sc_only_mode_fops = {
441 .read = sc_only_mode_read,
442 .llseek = default_llseek,
445 static int idle_timeout_set(void *data, u64 val)
447 struct hci_dev *hdev = data;
449 if (val != 0 && (val < 500 || val > 3600000))
453 hdev->idle_timeout = val;
454 hci_dev_unlock(hdev);
459 static int idle_timeout_get(void *data, u64 *val)
461 struct hci_dev *hdev = data;
464 *val = hdev->idle_timeout;
465 hci_dev_unlock(hdev);
470 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
471 idle_timeout_set, "%llu\n");
473 static int rpa_timeout_set(void *data, u64 val)
475 struct hci_dev *hdev = data;
477 /* Require the RPA timeout to be at least 30 seconds and at most
480 if (val < 30 || val > (60 * 60 * 24))
484 hdev->rpa_timeout = val;
485 hci_dev_unlock(hdev);
490 static int rpa_timeout_get(void *data, u64 *val)
492 struct hci_dev *hdev = data;
495 *val = hdev->rpa_timeout;
496 hci_dev_unlock(hdev);
501 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
502 rpa_timeout_set, "%llu\n");
504 static int sniff_min_interval_set(void *data, u64 val)
506 struct hci_dev *hdev = data;
508 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
512 hdev->sniff_min_interval = val;
513 hci_dev_unlock(hdev);
518 static int sniff_min_interval_get(void *data, u64 *val)
520 struct hci_dev *hdev = data;
523 *val = hdev->sniff_min_interval;
524 hci_dev_unlock(hdev);
529 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
530 sniff_min_interval_set, "%llu\n");
532 static int sniff_max_interval_set(void *data, u64 val)
534 struct hci_dev *hdev = data;
536 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
540 hdev->sniff_max_interval = val;
541 hci_dev_unlock(hdev);
546 static int sniff_max_interval_get(void *data, u64 *val)
548 struct hci_dev *hdev = data;
551 *val = hdev->sniff_max_interval;
552 hci_dev_unlock(hdev);
557 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
558 sniff_max_interval_set, "%llu\n");
560 static int conn_info_min_age_set(void *data, u64 val)
562 struct hci_dev *hdev = data;
564 if (val == 0 || val > hdev->conn_info_max_age)
568 hdev->conn_info_min_age = val;
569 hci_dev_unlock(hdev);
574 static int conn_info_min_age_get(void *data, u64 *val)
576 struct hci_dev *hdev = data;
579 *val = hdev->conn_info_min_age;
580 hci_dev_unlock(hdev);
585 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
586 conn_info_min_age_set, "%llu\n");
588 static int conn_info_max_age_set(void *data, u64 val)
590 struct hci_dev *hdev = data;
592 if (val == 0 || val < hdev->conn_info_min_age)
596 hdev->conn_info_max_age = val;
597 hci_dev_unlock(hdev);
602 static int conn_info_max_age_get(void *data, u64 *val)
604 struct hci_dev *hdev = data;
607 *val = hdev->conn_info_max_age;
608 hci_dev_unlock(hdev);
613 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
614 conn_info_max_age_set, "%llu\n");
616 static int identity_show(struct seq_file *f, void *p)
618 struct hci_dev *hdev = f->private;
624 hci_copy_identity_address(hdev, &addr, &addr_type);
626 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
627 16, hdev->irk, &hdev->rpa);
629 hci_dev_unlock(hdev);
634 static int identity_open(struct inode *inode, struct file *file)
636 return single_open(file, identity_show, inode->i_private);
639 static const struct file_operations identity_fops = {
640 .open = identity_open,
643 .release = single_release,
646 static int random_address_show(struct seq_file *f, void *p)
648 struct hci_dev *hdev = f->private;
651 seq_printf(f, "%pMR\n", &hdev->random_addr);
652 hci_dev_unlock(hdev);
657 static int random_address_open(struct inode *inode, struct file *file)
659 return single_open(file, random_address_show, inode->i_private);
662 static const struct file_operations random_address_fops = {
663 .open = random_address_open,
666 .release = single_release,
669 static int static_address_show(struct seq_file *f, void *p)
671 struct hci_dev *hdev = f->private;
674 seq_printf(f, "%pMR\n", &hdev->static_addr);
675 hci_dev_unlock(hdev);
680 static int static_address_open(struct inode *inode, struct file *file)
682 return single_open(file, static_address_show, inode->i_private);
685 static const struct file_operations static_address_fops = {
686 .open = static_address_open,
689 .release = single_release,
692 static ssize_t force_static_address_read(struct file *file,
693 char __user *user_buf,
694 size_t count, loff_t *ppos)
696 struct hci_dev *hdev = file->private_data;
699 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
702 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
705 static ssize_t force_static_address_write(struct file *file,
706 const char __user *user_buf,
707 size_t count, loff_t *ppos)
709 struct hci_dev *hdev = file->private_data;
711 size_t buf_size = min(count, (sizeof(buf)-1));
714 if (test_bit(HCI_UP, &hdev->flags))
717 if (copy_from_user(buf, user_buf, buf_size))
720 buf[buf_size] = '\0';
721 if (strtobool(buf, &enable))
724 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
727 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
732 static const struct file_operations force_static_address_fops = {
734 .read = force_static_address_read,
735 .write = force_static_address_write,
736 .llseek = default_llseek,
739 static int white_list_show(struct seq_file *f, void *ptr)
741 struct hci_dev *hdev = f->private;
742 struct bdaddr_list *b;
745 list_for_each_entry(b, &hdev->le_white_list, list)
746 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
747 hci_dev_unlock(hdev);
752 static int white_list_open(struct inode *inode, struct file *file)
754 return single_open(file, white_list_show, inode->i_private);
757 static const struct file_operations white_list_fops = {
758 .open = white_list_open,
761 .release = single_release,
764 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
766 struct hci_dev *hdev = f->private;
767 struct list_head *p, *n;
770 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
771 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
772 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
773 &irk->bdaddr, irk->addr_type,
774 16, irk->val, &irk->rpa);
776 hci_dev_unlock(hdev);
781 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
783 return single_open(file, identity_resolving_keys_show,
787 static const struct file_operations identity_resolving_keys_fops = {
788 .open = identity_resolving_keys_open,
791 .release = single_release,
794 static int long_term_keys_show(struct seq_file *f, void *ptr)
796 struct hci_dev *hdev = f->private;
797 struct list_head *p, *n;
800 list_for_each_safe(p, n, &hdev->long_term_keys) {
801 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
802 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
803 <k->bdaddr, ltk->bdaddr_type, ltk->authenticated,
804 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
805 __le64_to_cpu(ltk->rand), 16, ltk->val);
807 hci_dev_unlock(hdev);
812 static int long_term_keys_open(struct inode *inode, struct file *file)
814 return single_open(file, long_term_keys_show, inode->i_private);
817 static const struct file_operations long_term_keys_fops = {
818 .open = long_term_keys_open,
821 .release = single_release,
824 static int conn_min_interval_set(void *data, u64 val)
826 struct hci_dev *hdev = data;
828 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
832 hdev->le_conn_min_interval = val;
833 hci_dev_unlock(hdev);
838 static int conn_min_interval_get(void *data, u64 *val)
840 struct hci_dev *hdev = data;
843 *val = hdev->le_conn_min_interval;
844 hci_dev_unlock(hdev);
849 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
850 conn_min_interval_set, "%llu\n");
852 static int conn_max_interval_set(void *data, u64 val)
854 struct hci_dev *hdev = data;
856 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
860 hdev->le_conn_max_interval = val;
861 hci_dev_unlock(hdev);
866 static int conn_max_interval_get(void *data, u64 *val)
868 struct hci_dev *hdev = data;
871 *val = hdev->le_conn_max_interval;
872 hci_dev_unlock(hdev);
877 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
878 conn_max_interval_set, "%llu\n");
880 static int conn_latency_set(void *data, u64 val)
882 struct hci_dev *hdev = data;
888 hdev->le_conn_latency = val;
889 hci_dev_unlock(hdev);
894 static int conn_latency_get(void *data, u64 *val)
896 struct hci_dev *hdev = data;
899 *val = hdev->le_conn_latency;
900 hci_dev_unlock(hdev);
905 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
906 conn_latency_set, "%llu\n");
908 static int supervision_timeout_set(void *data, u64 val)
910 struct hci_dev *hdev = data;
912 if (val < 0x000a || val > 0x0c80)
916 hdev->le_supv_timeout = val;
917 hci_dev_unlock(hdev);
922 static int supervision_timeout_get(void *data, u64 *val)
924 struct hci_dev *hdev = data;
927 *val = hdev->le_supv_timeout;
928 hci_dev_unlock(hdev);
933 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
934 supervision_timeout_set, "%llu\n");
936 static int adv_channel_map_set(void *data, u64 val)
938 struct hci_dev *hdev = data;
940 if (val < 0x01 || val > 0x07)
944 hdev->le_adv_channel_map = val;
945 hci_dev_unlock(hdev);
950 static int adv_channel_map_get(void *data, u64 *val)
952 struct hci_dev *hdev = data;
955 *val = hdev->le_adv_channel_map;
956 hci_dev_unlock(hdev);
961 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
962 adv_channel_map_set, "%llu\n");
964 static int device_list_show(struct seq_file *f, void *ptr)
966 struct hci_dev *hdev = f->private;
967 struct hci_conn_params *p;
970 list_for_each_entry(p, &hdev->le_conn_params, list) {
971 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
974 hci_dev_unlock(hdev);
979 static int device_list_open(struct inode *inode, struct file *file)
981 return single_open(file, device_list_show, inode->i_private);
984 static const struct file_operations device_list_fops = {
985 .open = device_list_open,
988 .release = single_release,
991 /* ---- HCI requests ---- */
993 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
995 BT_DBG("%s result 0x%2.2x", hdev->name, result);
997 if (hdev->req_status == HCI_REQ_PEND) {
998 hdev->req_result = result;
999 hdev->req_status = HCI_REQ_DONE;
1000 wake_up_interruptible(&hdev->req_wait_q);
1004 static void hci_req_cancel(struct hci_dev *hdev, int err)
1006 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1008 if (hdev->req_status == HCI_REQ_PEND) {
1009 hdev->req_result = err;
1010 hdev->req_status = HCI_REQ_CANCELED;
1011 wake_up_interruptible(&hdev->req_wait_q);
1015 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1018 struct hci_ev_cmd_complete *ev;
1019 struct hci_event_hdr *hdr;
1020 struct sk_buff *skb;
1024 skb = hdev->recv_evt;
1025 hdev->recv_evt = NULL;
1027 hci_dev_unlock(hdev);
1030 return ERR_PTR(-ENODATA);
1032 if (skb->len < sizeof(*hdr)) {
1033 BT_ERR("Too short HCI event");
1037 hdr = (void *) skb->data;
1038 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1041 if (hdr->evt != event)
1046 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1047 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1051 if (skb->len < sizeof(*ev)) {
1052 BT_ERR("Too short cmd_complete event");
1056 ev = (void *) skb->data;
1057 skb_pull(skb, sizeof(*ev));
1059 if (opcode == __le16_to_cpu(ev->opcode))
1062 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1063 __le16_to_cpu(ev->opcode));
1067 return ERR_PTR(-ENODATA);
1070 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1071 const void *param, u8 event, u32 timeout)
1073 DECLARE_WAITQUEUE(wait, current);
1074 struct hci_request req;
1077 BT_DBG("%s", hdev->name);
1079 hci_req_init(&req, hdev);
1081 hci_req_add_ev(&req, opcode, plen, param, event);
1083 hdev->req_status = HCI_REQ_PEND;
1085 err = hci_req_run(&req, hci_req_sync_complete);
1087 return ERR_PTR(err);
1089 add_wait_queue(&hdev->req_wait_q, &wait);
1090 set_current_state(TASK_INTERRUPTIBLE);
1092 schedule_timeout(timeout);
1094 remove_wait_queue(&hdev->req_wait_q, &wait);
1096 if (signal_pending(current))
1097 return ERR_PTR(-EINTR);
1099 switch (hdev->req_status) {
1101 err = -bt_to_errno(hdev->req_result);
1104 case HCI_REQ_CANCELED:
1105 err = -hdev->req_result;
1113 hdev->req_status = hdev->req_result = 0;
1115 BT_DBG("%s end: err %d", hdev->name, err);
1118 return ERR_PTR(err);
1120 return hci_get_cmd_complete(hdev, opcode, event);
1122 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1124 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1125 const void *param, u32 timeout)
1127 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1129 EXPORT_SYMBOL(__hci_cmd_sync);
1131 /* Execute request and wait for completion. */
1132 static int __hci_req_sync(struct hci_dev *hdev,
1133 void (*func)(struct hci_request *req,
1135 unsigned long opt, __u32 timeout)
1137 struct hci_request req;
1138 DECLARE_WAITQUEUE(wait, current);
1141 BT_DBG("%s start", hdev->name);
1143 hci_req_init(&req, hdev);
1145 hdev->req_status = HCI_REQ_PEND;
1149 err = hci_req_run(&req, hci_req_sync_complete);
1151 hdev->req_status = 0;
1153 /* ENODATA means the HCI request command queue is empty.
1154 * This can happen when a request with conditionals doesn't
1155 * trigger any commands to be sent. This is normal behavior
1156 * and should not trigger an error return.
1158 if (err == -ENODATA)
1164 add_wait_queue(&hdev->req_wait_q, &wait);
1165 set_current_state(TASK_INTERRUPTIBLE);
1167 schedule_timeout(timeout);
1169 remove_wait_queue(&hdev->req_wait_q, &wait);
1171 if (signal_pending(current))
1174 switch (hdev->req_status) {
1176 err = -bt_to_errno(hdev->req_result);
1179 case HCI_REQ_CANCELED:
1180 err = -hdev->req_result;
1188 hdev->req_status = hdev->req_result = 0;
1190 BT_DBG("%s end: err %d", hdev->name, err);
1195 static int hci_req_sync(struct hci_dev *hdev,
1196 void (*req)(struct hci_request *req,
1198 unsigned long opt, __u32 timeout)
1202 if (!test_bit(HCI_UP, &hdev->flags))
1205 /* Serialize all requests */
1207 ret = __hci_req_sync(hdev, req, opt, timeout);
1208 hci_req_unlock(hdev);
1213 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1215 BT_DBG("%s %ld", req->hdev->name, opt);
1218 set_bit(HCI_RESET, &req->hdev->flags);
1219 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1222 static void bredr_init(struct hci_request *req)
1224 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1226 /* Read Local Supported Features */
1227 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1229 /* Read Local Version */
1230 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1232 /* Read BD Address */
1233 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1236 static void amp_init(struct hci_request *req)
1238 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1240 /* Read Local Version */
1241 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1243 /* Read Local Supported Commands */
1244 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1246 /* Read Local Supported Features */
1247 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1249 /* Read Local AMP Info */
1250 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1252 /* Read Data Blk size */
1253 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1255 /* Read Flow Control Mode */
1256 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1258 /* Read Location Data */
1259 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1262 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1264 struct hci_dev *hdev = req->hdev;
1266 BT_DBG("%s %ld", hdev->name, opt);
1269 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1270 hci_reset_req(req, 0);
1272 switch (hdev->dev_type) {
1282 BT_ERR("Unknown device type %d", hdev->dev_type);
1287 static void bredr_setup(struct hci_request *req)
1289 struct hci_dev *hdev = req->hdev;
1294 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1295 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1297 /* Read Class of Device */
1298 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1300 /* Read Local Name */
1301 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1303 /* Read Voice Setting */
1304 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1306 /* Read Number of Supported IAC */
1307 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1309 /* Read Current IAC LAP */
1310 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1312 /* Clear Event Filters */
1313 flt_type = HCI_FLT_CLEAR_ALL;
1314 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1316 /* Connection accept timeout ~20 secs */
1317 param = cpu_to_le16(0x7d00);
1318 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
1320 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1321 * but it does not support page scan related HCI commands.
1323 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1324 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1325 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1329 static void le_setup(struct hci_request *req)
1331 struct hci_dev *hdev = req->hdev;
1333 /* Read LE Buffer Size */
1334 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1336 /* Read LE Local Supported Features */
1337 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1339 /* Read LE Supported States */
1340 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1342 /* Read LE Advertising Channel TX Power */
1343 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1345 /* Read LE White List Size */
1346 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1348 /* Clear LE White List */
1349 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1351 /* LE-only controllers have LE implicitly enabled */
1352 if (!lmp_bredr_capable(hdev))
1353 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1356 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1358 if (lmp_ext_inq_capable(hdev))
1361 if (lmp_inq_rssi_capable(hdev))
1364 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1365 hdev->lmp_subver == 0x0757)
1368 if (hdev->manufacturer == 15) {
1369 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1371 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1373 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1377 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1378 hdev->lmp_subver == 0x1805)
1384 static void hci_setup_inquiry_mode(struct hci_request *req)
1388 mode = hci_get_inquiry_mode(req->hdev);
1390 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1393 static void hci_setup_event_mask(struct hci_request *req)
1395 struct hci_dev *hdev = req->hdev;
1397 /* The second byte is 0xff instead of 0x9f (two reserved bits
1398 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1399 * command otherwise.
1401 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1403 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1404 * any event mask for pre 1.2 devices.
1406 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1409 if (lmp_bredr_capable(hdev)) {
1410 events[4] |= 0x01; /* Flow Specification Complete */
1411 events[4] |= 0x02; /* Inquiry Result with RSSI */
1412 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1413 events[5] |= 0x08; /* Synchronous Connection Complete */
1414 events[5] |= 0x10; /* Synchronous Connection Changed */
1416 /* Use a different default for LE-only devices */
1417 memset(events, 0, sizeof(events));
1418 events[0] |= 0x10; /* Disconnection Complete */
1419 events[0] |= 0x80; /* Encryption Change */
1420 events[1] |= 0x08; /* Read Remote Version Information Complete */
1421 events[1] |= 0x20; /* Command Complete */
1422 events[1] |= 0x40; /* Command Status */
1423 events[1] |= 0x80; /* Hardware Error */
1424 events[2] |= 0x04; /* Number of Completed Packets */
1425 events[3] |= 0x02; /* Data Buffer Overflow */
1426 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1429 if (lmp_inq_rssi_capable(hdev))
1430 events[4] |= 0x02; /* Inquiry Result with RSSI */
1432 if (lmp_sniffsubr_capable(hdev))
1433 events[5] |= 0x20; /* Sniff Subrating */
1435 if (lmp_pause_enc_capable(hdev))
1436 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1438 if (lmp_ext_inq_capable(hdev))
1439 events[5] |= 0x40; /* Extended Inquiry Result */
1441 if (lmp_no_flush_capable(hdev))
1442 events[7] |= 0x01; /* Enhanced Flush Complete */
1444 if (lmp_lsto_capable(hdev))
1445 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1447 if (lmp_ssp_capable(hdev)) {
1448 events[6] |= 0x01; /* IO Capability Request */
1449 events[6] |= 0x02; /* IO Capability Response */
1450 events[6] |= 0x04; /* User Confirmation Request */
1451 events[6] |= 0x08; /* User Passkey Request */
1452 events[6] |= 0x10; /* Remote OOB Data Request */
1453 events[6] |= 0x20; /* Simple Pairing Complete */
1454 events[7] |= 0x04; /* User Passkey Notification */
1455 events[7] |= 0x08; /* Keypress Notification */
1456 events[7] |= 0x10; /* Remote Host Supported
1457 * Features Notification
1461 if (lmp_le_capable(hdev))
1462 events[7] |= 0x20; /* LE Meta-Event */
1464 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1467 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1469 struct hci_dev *hdev = req->hdev;
1471 if (lmp_bredr_capable(hdev))
1474 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1476 if (lmp_le_capable(hdev))
1479 hci_setup_event_mask(req);
1481 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1482 * local supported commands HCI command.
1484 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1485 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1487 if (lmp_ssp_capable(hdev)) {
1488 /* When SSP is available, then the host features page
1489 * should also be available as well. However some
1490 * controllers list the max_page as 0 as long as SSP
1491 * has not been enabled. To achieve proper debugging
1492 * output, force the minimum max_page to 1 at least.
1494 hdev->max_page = 0x01;
1496 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1498 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1499 sizeof(mode), &mode);
1501 struct hci_cp_write_eir cp;
1503 memset(hdev->eir, 0, sizeof(hdev->eir));
1504 memset(&cp, 0, sizeof(cp));
1506 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1510 if (lmp_inq_rssi_capable(hdev))
1511 hci_setup_inquiry_mode(req);
1513 if (lmp_inq_tx_pwr_capable(hdev))
1514 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1516 if (lmp_ext_feat_capable(hdev)) {
1517 struct hci_cp_read_local_ext_features cp;
1520 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1524 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1526 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1531 static void hci_setup_link_policy(struct hci_request *req)
1533 struct hci_dev *hdev = req->hdev;
1534 struct hci_cp_write_def_link_policy cp;
1535 u16 link_policy = 0;
1537 if (lmp_rswitch_capable(hdev))
1538 link_policy |= HCI_LP_RSWITCH;
1539 if (lmp_hold_capable(hdev))
1540 link_policy |= HCI_LP_HOLD;
1541 if (lmp_sniff_capable(hdev))
1542 link_policy |= HCI_LP_SNIFF;
1543 if (lmp_park_capable(hdev))
1544 link_policy |= HCI_LP_PARK;
1546 cp.policy = cpu_to_le16(link_policy);
1547 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1550 static void hci_set_le_support(struct hci_request *req)
1552 struct hci_dev *hdev = req->hdev;
1553 struct hci_cp_write_le_host_supported cp;
1555 /* LE-only devices do not support explicit enablement */
1556 if (!lmp_bredr_capable(hdev))
1559 memset(&cp, 0, sizeof(cp));
1561 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1563 cp.simul = lmp_le_br_capable(hdev);
1566 if (cp.le != lmp_host_le_capable(hdev))
1567 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1571 static void hci_set_event_mask_page_2(struct hci_request *req)
1573 struct hci_dev *hdev = req->hdev;
1574 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1576 /* If Connectionless Slave Broadcast master role is supported
1577 * enable all necessary events for it.
1579 if (lmp_csb_master_capable(hdev)) {
1580 events[1] |= 0x40; /* Triggered Clock Capture */
1581 events[1] |= 0x80; /* Synchronization Train Complete */
1582 events[2] |= 0x10; /* Slave Page Response Timeout */
1583 events[2] |= 0x20; /* CSB Channel Map Change */
1586 /* If Connectionless Slave Broadcast slave role is supported
1587 * enable all necessary events for it.
1589 if (lmp_csb_slave_capable(hdev)) {
1590 events[2] |= 0x01; /* Synchronization Train Received */
1591 events[2] |= 0x02; /* CSB Receive */
1592 events[2] |= 0x04; /* CSB Timeout */
1593 events[2] |= 0x08; /* Truncated Page Complete */
1596 /* Enable Authenticated Payload Timeout Expired event if supported */
1597 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1600 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1603 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1605 struct hci_dev *hdev = req->hdev;
1608 /* Some Broadcom based Bluetooth controllers do not support the
1609 * Delete Stored Link Key command. They are clearly indicating its
1610 * absence in the bit mask of supported commands.
1612 * Check the supported commands and only if the the command is marked
1613 * as supported send it. If not supported assume that the controller
1614 * does not have actual support for stored link keys which makes this
1615 * command redundant anyway.
1617 * Some controllers indicate that they support handling deleting
1618 * stored link keys, but they don't. The quirk lets a driver
1619 * just disable this command.
1621 if (hdev->commands[6] & 0x80 &&
1622 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1623 struct hci_cp_delete_stored_link_key cp;
1625 bacpy(&cp.bdaddr, BDADDR_ANY);
1626 cp.delete_all = 0x01;
1627 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1631 if (hdev->commands[5] & 0x10)
1632 hci_setup_link_policy(req);
1634 if (lmp_le_capable(hdev)) {
1637 memset(events, 0, sizeof(events));
1640 /* If controller supports the Connection Parameters Request
1641 * Link Layer Procedure, enable the corresponding event.
1643 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1644 events[0] |= 0x20; /* LE Remote Connection
1648 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1651 hci_set_le_support(req);
1654 /* Read features beyond page 1 if available */
1655 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1656 struct hci_cp_read_local_ext_features cp;
1659 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1664 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1666 struct hci_dev *hdev = req->hdev;
1668 /* Set event mask page 2 if the HCI command for it is supported */
1669 if (hdev->commands[22] & 0x04)
1670 hci_set_event_mask_page_2(req);
1672 /* Check for Synchronization Train support */
1673 if (lmp_sync_train_capable(hdev))
1674 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1676 /* Enable Secure Connections if supported and configured */
1677 if ((lmp_sc_capable(hdev) ||
1678 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1679 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1681 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1682 sizeof(support), &support);
1686 static int __hci_init(struct hci_dev *hdev)
1690 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1694 /* The Device Under Test (DUT) mode is special and available for
1695 * all controller types. So just create it early on.
1697 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1698 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1702 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1703 * BR/EDR/LE type controllers. AMP controllers only need the
1706 if (hdev->dev_type != HCI_BREDR)
1709 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1713 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1717 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1721 /* Only create debugfs entries during the initial setup
1722 * phase and not every time the controller gets powered on.
1724 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1727 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1729 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1730 &hdev->manufacturer);
1731 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1732 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1733 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1735 debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1737 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1739 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1740 &conn_info_min_age_fops);
1741 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1742 &conn_info_max_age_fops);
1744 if (lmp_bredr_capable(hdev)) {
1745 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1746 hdev, &inquiry_cache_fops);
1747 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1748 hdev, &link_keys_fops);
1749 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1750 hdev, &dev_class_fops);
1751 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1752 hdev, &voice_setting_fops);
1755 if (lmp_ssp_capable(hdev)) {
1756 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1757 hdev, &auto_accept_delay_fops);
1758 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1759 hdev, &force_sc_support_fops);
1760 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1761 hdev, &sc_only_mode_fops);
1764 if (lmp_sniff_capable(hdev)) {
1765 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1766 hdev, &idle_timeout_fops);
1767 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1768 hdev, &sniff_min_interval_fops);
1769 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1770 hdev, &sniff_max_interval_fops);
1773 if (lmp_le_capable(hdev)) {
1774 debugfs_create_file("identity", 0400, hdev->debugfs,
1775 hdev, &identity_fops);
1776 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1777 hdev, &rpa_timeout_fops);
1778 debugfs_create_file("random_address", 0444, hdev->debugfs,
1779 hdev, &random_address_fops);
1780 debugfs_create_file("static_address", 0444, hdev->debugfs,
1781 hdev, &static_address_fops);
1783 /* For controllers with a public address, provide a debug
1784 * option to force the usage of the configured static
1785 * address. By default the public address is used.
1787 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1788 debugfs_create_file("force_static_address", 0644,
1789 hdev->debugfs, hdev,
1790 &force_static_address_fops);
1792 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1793 &hdev->le_white_list_size);
1794 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1796 debugfs_create_file("identity_resolving_keys", 0400,
1797 hdev->debugfs, hdev,
1798 &identity_resolving_keys_fops);
1799 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1800 hdev, &long_term_keys_fops);
1801 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1802 hdev, &conn_min_interval_fops);
1803 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1804 hdev, &conn_max_interval_fops);
1805 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1806 hdev, &conn_latency_fops);
1807 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1808 hdev, &supervision_timeout_fops);
1809 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1810 hdev, &adv_channel_map_fops);
1811 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1813 debugfs_create_u16("discov_interleaved_timeout", 0644,
1815 &hdev->discov_interleaved_timeout);
1821 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1823 struct hci_dev *hdev = req->hdev;
1825 BT_DBG("%s %ld", hdev->name, opt);
1828 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1829 hci_reset_req(req, 0);
1831 /* Read Local Version */
1832 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1834 /* Read BD Address */
1835 if (hdev->set_bdaddr)
1836 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1839 static int __hci_unconf_init(struct hci_dev *hdev)
1843 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1846 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1853 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1857 BT_DBG("%s %x", req->hdev->name, scan);
1859 /* Inquiry and Page scans */
1860 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1863 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1867 BT_DBG("%s %x", req->hdev->name, auth);
1869 /* Authentication */
1870 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1873 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1877 BT_DBG("%s %x", req->hdev->name, encrypt);
1880 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1883 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1885 __le16 policy = cpu_to_le16(opt);
1887 BT_DBG("%s %x", req->hdev->name, policy);
1889 /* Default link policy */
1890 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1893 /* Get HCI device by index.
1894 * Device is held on return. */
1895 struct hci_dev *hci_dev_get(int index)
1897 struct hci_dev *hdev = NULL, *d;
1899 BT_DBG("%d", index);
1904 read_lock(&hci_dev_list_lock);
1905 list_for_each_entry(d, &hci_dev_list, list) {
1906 if (d->id == index) {
1907 hdev = hci_dev_hold(d);
1911 read_unlock(&hci_dev_list_lock);
1915 /* ---- Inquiry support ---- */
1917 bool hci_discovery_active(struct hci_dev *hdev)
1919 struct discovery_state *discov = &hdev->discovery;
1921 switch (discov->state) {
1922 case DISCOVERY_FINDING:
1923 case DISCOVERY_RESOLVING:
1931 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1933 int old_state = hdev->discovery.state;
1935 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1937 if (old_state == state)
1940 hdev->discovery.state = state;
1943 case DISCOVERY_STOPPED:
1944 hci_update_background_scan(hdev);
1946 if (old_state != DISCOVERY_STARTING)
1947 mgmt_discovering(hdev, 0);
1949 case DISCOVERY_STARTING:
1951 case DISCOVERY_FINDING:
1952 mgmt_discovering(hdev, 1);
1954 case DISCOVERY_RESOLVING:
1956 case DISCOVERY_STOPPING:
1961 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1963 struct discovery_state *cache = &hdev->discovery;
1964 struct inquiry_entry *p, *n;
1966 list_for_each_entry_safe(p, n, &cache->all, all) {
1971 INIT_LIST_HEAD(&cache->unknown);
1972 INIT_LIST_HEAD(&cache->resolve);
1975 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1978 struct discovery_state *cache = &hdev->discovery;
1979 struct inquiry_entry *e;
1981 BT_DBG("cache %p, %pMR", cache, bdaddr);
1983 list_for_each_entry(e, &cache->all, all) {
1984 if (!bacmp(&e->data.bdaddr, bdaddr))
1991 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1994 struct discovery_state *cache = &hdev->discovery;
1995 struct inquiry_entry *e;
1997 BT_DBG("cache %p, %pMR", cache, bdaddr);
1999 list_for_each_entry(e, &cache->unknown, list) {
2000 if (!bacmp(&e->data.bdaddr, bdaddr))
2007 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2011 struct discovery_state *cache = &hdev->discovery;
2012 struct inquiry_entry *e;
2014 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2016 list_for_each_entry(e, &cache->resolve, list) {
2017 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2019 if (!bacmp(&e->data.bdaddr, bdaddr))
2026 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2027 struct inquiry_entry *ie)
2029 struct discovery_state *cache = &hdev->discovery;
2030 struct list_head *pos = &cache->resolve;
2031 struct inquiry_entry *p;
2033 list_del(&ie->list);
2035 list_for_each_entry(p, &cache->resolve, list) {
2036 if (p->name_state != NAME_PENDING &&
2037 abs(p->data.rssi) >= abs(ie->data.rssi))
2042 list_add(&ie->list, pos);
2045 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2048 struct discovery_state *cache = &hdev->discovery;
2049 struct inquiry_entry *ie;
2052 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2054 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2056 if (!data->ssp_mode)
2057 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2059 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2061 if (!ie->data.ssp_mode)
2062 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2064 if (ie->name_state == NAME_NEEDED &&
2065 data->rssi != ie->data.rssi) {
2066 ie->data.rssi = data->rssi;
2067 hci_inquiry_cache_update_resolve(hdev, ie);
2073 /* Entry not in the cache. Add new one. */
2074 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2076 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2080 list_add(&ie->all, &cache->all);
2083 ie->name_state = NAME_KNOWN;
2085 ie->name_state = NAME_NOT_KNOWN;
2086 list_add(&ie->list, &cache->unknown);
2090 if (name_known && ie->name_state != NAME_KNOWN &&
2091 ie->name_state != NAME_PENDING) {
2092 ie->name_state = NAME_KNOWN;
2093 list_del(&ie->list);
2096 memcpy(&ie->data, data, sizeof(*data));
2097 ie->timestamp = jiffies;
2098 cache->timestamp = jiffies;
2100 if (ie->name_state == NAME_NOT_KNOWN)
2101 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2107 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2109 struct discovery_state *cache = &hdev->discovery;
2110 struct inquiry_info *info = (struct inquiry_info *) buf;
2111 struct inquiry_entry *e;
2114 list_for_each_entry(e, &cache->all, all) {
2115 struct inquiry_data *data = &e->data;
2120 bacpy(&info->bdaddr, &data->bdaddr);
2121 info->pscan_rep_mode = data->pscan_rep_mode;
2122 info->pscan_period_mode = data->pscan_period_mode;
2123 info->pscan_mode = data->pscan_mode;
2124 memcpy(info->dev_class, data->dev_class, 3);
2125 info->clock_offset = data->clock_offset;
2131 BT_DBG("cache %p, copied %d", cache, copied);
2135 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2137 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2138 struct hci_dev *hdev = req->hdev;
2139 struct hci_cp_inquiry cp;
2141 BT_DBG("%s", hdev->name);
2143 if (test_bit(HCI_INQUIRY, &hdev->flags))
2147 memcpy(&cp.lap, &ir->lap, 3);
2148 cp.length = ir->length;
2149 cp.num_rsp = ir->num_rsp;
2150 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2153 static int wait_inquiry(void *word)
2156 return signal_pending(current);
2159 int hci_inquiry(void __user *arg)
2161 __u8 __user *ptr = arg;
2162 struct hci_inquiry_req ir;
2163 struct hci_dev *hdev;
2164 int err = 0, do_inquiry = 0, max_rsp;
2168 if (copy_from_user(&ir, ptr, sizeof(ir)))
2171 hdev = hci_dev_get(ir.dev_id);
2175 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2180 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2185 if (hdev->dev_type != HCI_BREDR) {
2190 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2196 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2197 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2198 hci_inquiry_cache_flush(hdev);
2201 hci_dev_unlock(hdev);
2203 timeo = ir.length * msecs_to_jiffies(2000);
2206 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2211 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2212 * cleared). If it is interrupted by a signal, return -EINTR.
2214 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2215 TASK_INTERRUPTIBLE))
2219 /* for unlimited number of responses we will use buffer with
2222 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2224 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2225 * copy it to the user space.
2227 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2234 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2235 hci_dev_unlock(hdev);
2237 BT_DBG("num_rsp %d", ir.num_rsp);
2239 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2241 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2254 static int hci_dev_do_open(struct hci_dev *hdev)
2258 BT_DBG("%s %p", hdev->name, hdev);
2262 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2267 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2268 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2269 /* Check for rfkill but allow the HCI setup stage to
2270 * proceed (which in itself doesn't cause any RF activity).
2272 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2277 /* Check for valid public address or a configured static
2278 * random adddress, but let the HCI setup proceed to
2279 * be able to determine if there is a public address
2282 * In case of user channel usage, it is not important
2283 * if a public address or static random address is
2286 * This check is only valid for BR/EDR controllers
2287 * since AMP controllers do not have an address.
2289 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2290 hdev->dev_type == HCI_BREDR &&
2291 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2292 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2293 ret = -EADDRNOTAVAIL;
2298 if (test_bit(HCI_UP, &hdev->flags)) {
2303 if (hdev->open(hdev)) {
2308 atomic_set(&hdev->cmd_cnt, 1);
2309 set_bit(HCI_INIT, &hdev->flags);
2311 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2313 ret = hdev->setup(hdev);
2315 /* The transport driver can set these quirks before
2316 * creating the HCI device or in its setup callback.
2318 * In case any of them is set, the controller has to
2319 * start up as unconfigured.
2321 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2322 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2323 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2325 /* For an unconfigured controller it is required to
2326 * read at least the version information provided by
2327 * the Read Local Version Information command.
2329 * If the set_bdaddr driver callback is provided, then
2330 * also the original Bluetooth public device address
2331 * will be read using the Read BD Address command.
2333 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2334 ret = __hci_unconf_init(hdev);
2337 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2338 /* If public address change is configured, ensure that
2339 * the address gets programmed. If the driver does not
2340 * support changing the public address, fail the power
2343 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2345 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2347 ret = -EADDRNOTAVAIL;
2351 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2352 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2353 ret = __hci_init(hdev);
2356 clear_bit(HCI_INIT, &hdev->flags);
2360 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2361 set_bit(HCI_UP, &hdev->flags);
2362 hci_notify(hdev, HCI_DEV_UP);
2363 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2364 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2365 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2366 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2367 hdev->dev_type == HCI_BREDR) {
2369 mgmt_powered(hdev, 1);
2370 hci_dev_unlock(hdev);
2373 /* Init failed, cleanup */
2374 flush_work(&hdev->tx_work);
2375 flush_work(&hdev->cmd_work);
2376 flush_work(&hdev->rx_work);
2378 skb_queue_purge(&hdev->cmd_q);
2379 skb_queue_purge(&hdev->rx_q);
2384 if (hdev->sent_cmd) {
2385 kfree_skb(hdev->sent_cmd);
2386 hdev->sent_cmd = NULL;
2390 hdev->flags &= BIT(HCI_RAW);
2394 hci_req_unlock(hdev);
2398 /* ---- HCI ioctl helpers ---- */
2400 int hci_dev_open(__u16 dev)
2402 struct hci_dev *hdev;
2405 hdev = hci_dev_get(dev);
2409 /* Devices that are marked as unconfigured can only be powered
2410 * up as user channel. Trying to bring them up as normal devices
2411 * will result into a failure. Only user channel operation is
2414 * When this function is called for a user channel, the flag
2415 * HCI_USER_CHANNEL will be set first before attempting to
2418 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2419 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2424 /* We need to ensure that no other power on/off work is pending
2425 * before proceeding to call hci_dev_do_open. This is
2426 * particularly important if the setup procedure has not yet
2429 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2430 cancel_delayed_work(&hdev->power_off);
2432 /* After this call it is guaranteed that the setup procedure
2433 * has finished. This means that error conditions like RFKILL
2434 * or no valid public or static random address apply.
2436 flush_workqueue(hdev->req_workqueue);
2438 err = hci_dev_do_open(hdev);
2445 /* This function requires the caller holds hdev->lock */
2446 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2448 struct hci_conn_params *p;
2450 list_for_each_entry(p, &hdev->le_conn_params, list)
2451 list_del_init(&p->action);
2453 BT_DBG("All LE pending actions cleared");
2456 static int hci_dev_do_close(struct hci_dev *hdev)
2458 BT_DBG("%s %p", hdev->name, hdev);
2460 cancel_delayed_work(&hdev->power_off);
2462 hci_req_cancel(hdev, ENODEV);
2465 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2466 cancel_delayed_work_sync(&hdev->cmd_timer);
2467 hci_req_unlock(hdev);
2471 /* Flush RX and TX works */
2472 flush_work(&hdev->tx_work);
2473 flush_work(&hdev->rx_work);
2475 if (hdev->discov_timeout > 0) {
2476 cancel_delayed_work(&hdev->discov_off);
2477 hdev->discov_timeout = 0;
2478 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2479 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2482 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2483 cancel_delayed_work(&hdev->service_cache);
2485 cancel_delayed_work_sync(&hdev->le_scan_disable);
2487 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2488 cancel_delayed_work_sync(&hdev->rpa_expired);
2491 hci_inquiry_cache_flush(hdev);
2492 hci_conn_hash_flush(hdev);
2493 hci_pend_le_actions_clear(hdev);
2494 hci_dev_unlock(hdev);
2496 hci_notify(hdev, HCI_DEV_DOWN);
2502 skb_queue_purge(&hdev->cmd_q);
2503 atomic_set(&hdev->cmd_cnt, 1);
2504 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2505 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2506 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2507 set_bit(HCI_INIT, &hdev->flags);
2508 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2509 clear_bit(HCI_INIT, &hdev->flags);
2512 /* flush cmd work */
2513 flush_work(&hdev->cmd_work);
2516 skb_queue_purge(&hdev->rx_q);
2517 skb_queue_purge(&hdev->cmd_q);
2518 skb_queue_purge(&hdev->raw_q);
2520 /* Drop last sent command */
2521 if (hdev->sent_cmd) {
2522 cancel_delayed_work_sync(&hdev->cmd_timer);
2523 kfree_skb(hdev->sent_cmd);
2524 hdev->sent_cmd = NULL;
2527 kfree_skb(hdev->recv_evt);
2528 hdev->recv_evt = NULL;
2530 /* After this point our queues are empty
2531 * and no tasks are scheduled. */
2535 hdev->flags &= BIT(HCI_RAW);
2536 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2538 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2539 if (hdev->dev_type == HCI_BREDR) {
2541 mgmt_powered(hdev, 0);
2542 hci_dev_unlock(hdev);
2546 /* Controller radio is available but is currently powered down */
2547 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2549 memset(hdev->eir, 0, sizeof(hdev->eir));
2550 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2551 bacpy(&hdev->random_addr, BDADDR_ANY);
2553 hci_req_unlock(hdev);
2559 int hci_dev_close(__u16 dev)
2561 struct hci_dev *hdev;
2564 hdev = hci_dev_get(dev);
2568 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2573 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2574 cancel_delayed_work(&hdev->power_off);
2576 err = hci_dev_do_close(hdev);
2583 int hci_dev_reset(__u16 dev)
2585 struct hci_dev *hdev;
2588 hdev = hci_dev_get(dev);
2594 if (!test_bit(HCI_UP, &hdev->flags)) {
2599 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2604 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2610 skb_queue_purge(&hdev->rx_q);
2611 skb_queue_purge(&hdev->cmd_q);
2614 hci_inquiry_cache_flush(hdev);
2615 hci_conn_hash_flush(hdev);
2616 hci_dev_unlock(hdev);
2621 atomic_set(&hdev->cmd_cnt, 1);
2622 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2624 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2627 hci_req_unlock(hdev);
2632 int hci_dev_reset_stat(__u16 dev)
2634 struct hci_dev *hdev;
2637 hdev = hci_dev_get(dev);
2641 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2646 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2651 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2658 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2660 bool conn_changed, discov_changed;
2662 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2664 if ((scan & SCAN_PAGE))
2665 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2668 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2671 if ((scan & SCAN_INQUIRY)) {
2672 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2675 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2676 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2680 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2683 if (conn_changed || discov_changed) {
2684 /* In case this was disabled through mgmt */
2685 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2687 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2688 mgmt_update_adv_data(hdev);
2690 mgmt_new_settings(hdev);
2694 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2696 struct hci_dev *hdev;
2697 struct hci_dev_req dr;
2700 if (copy_from_user(&dr, arg, sizeof(dr)))
2703 hdev = hci_dev_get(dr.dev_id);
2707 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2712 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2717 if (hdev->dev_type != HCI_BREDR) {
2722 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2729 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2734 if (!lmp_encrypt_capable(hdev)) {
2739 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2740 /* Auth must be enabled first */
2741 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2747 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2752 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2755 /* Ensure that the connectable and discoverable states
2756 * get correctly modified as this was a non-mgmt change.
2759 hci_update_scan_state(hdev, dr.dev_opt);
2763 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2767 case HCISETLINKMODE:
2768 hdev->link_mode = ((__u16) dr.dev_opt) &
2769 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2773 hdev->pkt_type = (__u16) dr.dev_opt;
2777 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2778 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2782 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2783 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2796 int hci_get_dev_list(void __user *arg)
2798 struct hci_dev *hdev;
2799 struct hci_dev_list_req *dl;
2800 struct hci_dev_req *dr;
2801 int n = 0, size, err;
2804 if (get_user(dev_num, (__u16 __user *) arg))
2807 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2810 size = sizeof(*dl) + dev_num * sizeof(*dr);
2812 dl = kzalloc(size, GFP_KERNEL);
2818 read_lock(&hci_dev_list_lock);
2819 list_for_each_entry(hdev, &hci_dev_list, list) {
2820 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2821 cancel_delayed_work(&hdev->power_off);
2823 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2824 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2826 (dr + n)->dev_id = hdev->id;
2827 (dr + n)->dev_opt = hdev->flags;
2832 read_unlock(&hci_dev_list_lock);
2835 size = sizeof(*dl) + n * sizeof(*dr);
2837 err = copy_to_user(arg, dl, size);
2840 return err ? -EFAULT : 0;
2843 int hci_get_dev_info(void __user *arg)
2845 struct hci_dev *hdev;
2846 struct hci_dev_info di;
2849 if (copy_from_user(&di, arg, sizeof(di)))
2852 hdev = hci_dev_get(di.dev_id);
2856 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2857 cancel_delayed_work_sync(&hdev->power_off);
2859 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2860 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2862 strcpy(di.name, hdev->name);
2863 di.bdaddr = hdev->bdaddr;
2864 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2865 di.flags = hdev->flags;
2866 di.pkt_type = hdev->pkt_type;
2867 if (lmp_bredr_capable(hdev)) {
2868 di.acl_mtu = hdev->acl_mtu;
2869 di.acl_pkts = hdev->acl_pkts;
2870 di.sco_mtu = hdev->sco_mtu;
2871 di.sco_pkts = hdev->sco_pkts;
2873 di.acl_mtu = hdev->le_mtu;
2874 di.acl_pkts = hdev->le_pkts;
2878 di.link_policy = hdev->link_policy;
2879 di.link_mode = hdev->link_mode;
2881 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2882 memcpy(&di.features, &hdev->features, sizeof(di.features));
2884 if (copy_to_user(arg, &di, sizeof(di)))
2892 /* ---- Interface to HCI drivers ---- */
2894 static int hci_rfkill_set_block(void *data, bool blocked)
2896 struct hci_dev *hdev = data;
2898 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2900 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2904 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2905 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2906 !test_bit(HCI_CONFIG, &hdev->dev_flags))
2907 hci_dev_do_close(hdev);
2909 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2915 static const struct rfkill_ops hci_rfkill_ops = {
2916 .set_block = hci_rfkill_set_block,
2919 static void hci_power_on(struct work_struct *work)
2921 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2924 BT_DBG("%s", hdev->name);
2926 err = hci_dev_do_open(hdev);
2928 mgmt_set_powered_failed(hdev, err);
2932 /* During the HCI setup phase, a few error conditions are
2933 * ignored and they need to be checked now. If they are still
2934 * valid, it is important to turn the device back off.
2936 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2937 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2938 (hdev->dev_type == HCI_BREDR &&
2939 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2940 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2941 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2942 hci_dev_do_close(hdev);
2943 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2944 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2945 HCI_AUTO_OFF_TIMEOUT);
2948 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2949 /* For unconfigured devices, set the HCI_RAW flag
2950 * so that userspace can easily identify them.
2952 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2953 set_bit(HCI_RAW, &hdev->flags);
2955 /* For fully configured devices, this will send
2956 * the Index Added event. For unconfigured devices,
2957 * it will send Unconfigued Index Added event.
2959 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2960 * and no event will be send.
2962 mgmt_index_added(hdev);
2963 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2964 /* When the controller is now configured, then it
2965 * is important to clear the HCI_RAW flag.
2967 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2968 clear_bit(HCI_RAW, &hdev->flags);
2970 /* Powering on the controller with HCI_CONFIG set only
2971 * happens with the transition from unconfigured to
2972 * configured. This will send the Index Added event.
2974 mgmt_index_added(hdev);
2978 static void hci_power_off(struct work_struct *work)
2980 struct hci_dev *hdev = container_of(work, struct hci_dev,
2983 BT_DBG("%s", hdev->name);
2985 hci_dev_do_close(hdev);
2988 static void hci_discov_off(struct work_struct *work)
2990 struct hci_dev *hdev;
2992 hdev = container_of(work, struct hci_dev, discov_off.work);
2994 BT_DBG("%s", hdev->name);
2996 mgmt_discoverable_timeout(hdev);
2999 void hci_uuids_clear(struct hci_dev *hdev)
3001 struct bt_uuid *uuid, *tmp;
3003 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3004 list_del(&uuid->list);
3009 void hci_link_keys_clear(struct hci_dev *hdev)
3011 struct list_head *p, *n;
3013 list_for_each_safe(p, n, &hdev->link_keys) {
3014 struct link_key *key;
3016 key = list_entry(p, struct link_key, list);
3023 void hci_smp_ltks_clear(struct hci_dev *hdev)
3025 struct smp_ltk *k, *tmp;
3027 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3033 void hci_smp_irks_clear(struct hci_dev *hdev)
3035 struct smp_irk *k, *tmp;
3037 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3043 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3047 list_for_each_entry(k, &hdev->link_keys, list)
3048 if (bacmp(bdaddr, &k->bdaddr) == 0)
3054 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3055 u8 key_type, u8 old_key_type)
3058 if (key_type < 0x03)
3061 /* Debug keys are insecure so don't store them persistently */
3062 if (key_type == HCI_LK_DEBUG_COMBINATION)
3065 /* Changed combination key and there's no previous one */
3066 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3069 /* Security mode 3 case */
3073 /* Neither local nor remote side had no-bonding as requirement */
3074 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3077 /* Local side had dedicated bonding as requirement */
3078 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3081 /* Remote side had dedicated bonding as requirement */
3082 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3085 /* If none of the above criteria match, then don't store the key
3090 static bool ltk_type_master(u8 type)
3092 return (type == SMP_LTK);
3095 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3100 list_for_each_entry(k, &hdev->long_term_keys, list) {
3101 if (k->ediv != ediv || k->rand != rand)
3104 if (ltk_type_master(k->type) != master)
3113 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3114 u8 addr_type, bool master)
3118 list_for_each_entry(k, &hdev->long_term_keys, list)
3119 if (addr_type == k->bdaddr_type &&
3120 bacmp(bdaddr, &k->bdaddr) == 0 &&
3121 ltk_type_master(k->type) == master)
3127 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3129 struct smp_irk *irk;
3131 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3132 if (!bacmp(&irk->rpa, rpa))
3136 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3137 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3138 bacpy(&irk->rpa, rpa);
3146 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3149 struct smp_irk *irk;
3151 /* Identity Address must be public or static random */
3152 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3155 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3156 if (addr_type == irk->addr_type &&
3157 bacmp(bdaddr, &irk->bdaddr) == 0)
3164 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3165 bdaddr_t *bdaddr, u8 *val, u8 type,
3166 u8 pin_len, bool *persistent)
3168 struct link_key *key, *old_key;
3171 old_key = hci_find_link_key(hdev, bdaddr);
3173 old_key_type = old_key->type;
3176 old_key_type = conn ? conn->key_type : 0xff;
3177 key = kzalloc(sizeof(*key), GFP_KERNEL);
3180 list_add(&key->list, &hdev->link_keys);
3183 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3185 /* Some buggy controller combinations generate a changed
3186 * combination key for legacy pairing even when there's no
3188 if (type == HCI_LK_CHANGED_COMBINATION &&
3189 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3190 type = HCI_LK_COMBINATION;
3192 conn->key_type = type;
3195 bacpy(&key->bdaddr, bdaddr);
3196 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3197 key->pin_len = pin_len;
3199 if (type == HCI_LK_CHANGED_COMBINATION)
3200 key->type = old_key_type;
3205 *persistent = hci_persistent_key(hdev, conn, type,
3211 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3212 u8 addr_type, u8 type, u8 authenticated,
3213 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3215 struct smp_ltk *key, *old_key;
3216 bool master = ltk_type_master(type);
3218 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3222 key = kzalloc(sizeof(*key), GFP_KERNEL);
3225 list_add(&key->list, &hdev->long_term_keys);
3228 bacpy(&key->bdaddr, bdaddr);
3229 key->bdaddr_type = addr_type;
3230 memcpy(key->val, tk, sizeof(key->val));
3231 key->authenticated = authenticated;
3234 key->enc_size = enc_size;
3240 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3241 u8 addr_type, u8 val[16], bdaddr_t *rpa)
3243 struct smp_irk *irk;
3245 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3247 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3251 bacpy(&irk->bdaddr, bdaddr);
3252 irk->addr_type = addr_type;
3254 list_add(&irk->list, &hdev->identity_resolving_keys);
3257 memcpy(irk->val, val, 16);
3258 bacpy(&irk->rpa, rpa);
3263 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3265 struct link_key *key;
3267 key = hci_find_link_key(hdev, bdaddr);
3271 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3273 list_del(&key->list);
3279 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3281 struct smp_ltk *k, *tmp;
3284 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3285 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3288 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3295 return removed ? 0 : -ENOENT;
3298 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3300 struct smp_irk *k, *tmp;
3302 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3303 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3306 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3313 /* HCI command timer function */
3314 static void hci_cmd_timeout(struct work_struct *work)
3316 struct hci_dev *hdev = container_of(work, struct hci_dev,
3319 if (hdev->sent_cmd) {
3320 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3321 u16 opcode = __le16_to_cpu(sent->opcode);
3323 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3325 BT_ERR("%s command tx timeout", hdev->name);
3328 atomic_set(&hdev->cmd_cnt, 1);
3329 queue_work(hdev->workqueue, &hdev->cmd_work);
3332 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3335 struct oob_data *data;
3337 list_for_each_entry(data, &hdev->remote_oob_data, list)
3338 if (bacmp(bdaddr, &data->bdaddr) == 0)
3344 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3346 struct oob_data *data;
3348 data = hci_find_remote_oob_data(hdev, bdaddr);
3352 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3354 list_del(&data->list);
3360 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3362 struct oob_data *data, *n;
3364 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3365 list_del(&data->list);
3370 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3371 u8 *hash, u8 *randomizer)
3373 struct oob_data *data;
3375 data = hci_find_remote_oob_data(hdev, bdaddr);
3377 data = kmalloc(sizeof(*data), GFP_KERNEL);
3381 bacpy(&data->bdaddr, bdaddr);
3382 list_add(&data->list, &hdev->remote_oob_data);
3385 memcpy(data->hash192, hash, sizeof(data->hash192));
3386 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3388 memset(data->hash256, 0, sizeof(data->hash256));
3389 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3391 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3396 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3397 u8 *hash192, u8 *randomizer192,
3398 u8 *hash256, u8 *randomizer256)
3400 struct oob_data *data;
3402 data = hci_find_remote_oob_data(hdev, bdaddr);
3404 data = kmalloc(sizeof(*data), GFP_KERNEL);
3408 bacpy(&data->bdaddr, bdaddr);
3409 list_add(&data->list, &hdev->remote_oob_data);
3412 memcpy(data->hash192, hash192, sizeof(data->hash192));
3413 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3415 memcpy(data->hash256, hash256, sizeof(data->hash256));
3416 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3418 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3423 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3424 bdaddr_t *bdaddr, u8 type)
3426 struct bdaddr_list *b;
3428 list_for_each_entry(b, bdaddr_list, list) {
3429 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3436 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3438 struct list_head *p, *n;
3440 list_for_each_safe(p, n, bdaddr_list) {
3441 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3448 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3450 struct bdaddr_list *entry;
3452 if (!bacmp(bdaddr, BDADDR_ANY))
3455 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3458 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3462 bacpy(&entry->bdaddr, bdaddr);
3463 entry->bdaddr_type = type;
3465 list_add(&entry->list, list);
3470 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3472 struct bdaddr_list *entry;
3474 if (!bacmp(bdaddr, BDADDR_ANY)) {
3475 hci_bdaddr_list_clear(list);
3479 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3483 list_del(&entry->list);
3489 /* This function requires the caller holds hdev->lock */
3490 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3491 bdaddr_t *addr, u8 addr_type)
3493 struct hci_conn_params *params;
3495 /* The conn params list only contains identity addresses */
3496 if (!hci_is_identity_address(addr, addr_type))
3499 list_for_each_entry(params, &hdev->le_conn_params, list) {
3500 if (bacmp(¶ms->addr, addr) == 0 &&
3501 params->addr_type == addr_type) {
3509 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3511 struct hci_conn *conn;
3513 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3517 if (conn->dst_type != type)
3520 if (conn->state != BT_CONNECTED)
3526 /* This function requires the caller holds hdev->lock */
3527 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3528 bdaddr_t *addr, u8 addr_type)
3530 struct hci_conn_params *param;
3532 /* The list only contains identity addresses */
3533 if (!hci_is_identity_address(addr, addr_type))
3536 list_for_each_entry(param, list, action) {
3537 if (bacmp(¶m->addr, addr) == 0 &&
3538 param->addr_type == addr_type)
3545 /* This function requires the caller holds hdev->lock */
3546 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3547 bdaddr_t *addr, u8 addr_type)
3549 struct hci_conn_params *params;
3551 if (!hci_is_identity_address(addr, addr_type))
3554 params = hci_conn_params_lookup(hdev, addr, addr_type);
3558 params = kzalloc(sizeof(*params), GFP_KERNEL);
3560 BT_ERR("Out of memory");
3564 bacpy(¶ms->addr, addr);
3565 params->addr_type = addr_type;
3567 list_add(¶ms->list, &hdev->le_conn_params);
3568 INIT_LIST_HEAD(¶ms->action);
3570 params->conn_min_interval = hdev->le_conn_min_interval;
3571 params->conn_max_interval = hdev->le_conn_max_interval;
3572 params->conn_latency = hdev->le_conn_latency;
3573 params->supervision_timeout = hdev->le_supv_timeout;
3574 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3576 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3581 /* This function requires the caller holds hdev->lock */
3582 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3585 struct hci_conn_params *params;
3587 params = hci_conn_params_add(hdev, addr, addr_type);
3591 if (params->auto_connect == auto_connect)
3594 list_del_init(¶ms->action);
3596 switch (auto_connect) {
3597 case HCI_AUTO_CONN_DISABLED:
3598 case HCI_AUTO_CONN_LINK_LOSS:
3599 hci_update_background_scan(hdev);
3601 case HCI_AUTO_CONN_REPORT:
3602 list_add(¶ms->action, &hdev->pend_le_reports);
3603 hci_update_background_scan(hdev);
3605 case HCI_AUTO_CONN_ALWAYS:
3606 if (!is_connected(hdev, addr, addr_type)) {
3607 list_add(¶ms->action, &hdev->pend_le_conns);
3608 hci_update_background_scan(hdev);
3613 params->auto_connect = auto_connect;
3615 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3621 /* This function requires the caller holds hdev->lock */
3622 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3624 struct hci_conn_params *params;
3626 params = hci_conn_params_lookup(hdev, addr, addr_type);
3630 list_del(¶ms->action);
3631 list_del(¶ms->list);
3634 hci_update_background_scan(hdev);
3636 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3639 /* This function requires the caller holds hdev->lock */
3640 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3642 struct hci_conn_params *params, *tmp;
3644 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3645 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3647 list_del(¶ms->list);
3651 BT_DBG("All LE disabled connection parameters were removed");
3654 /* This function requires the caller holds hdev->lock */
3655 void hci_conn_params_clear_all(struct hci_dev *hdev)
3657 struct hci_conn_params *params, *tmp;
3659 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3660 list_del(¶ms->action);
3661 list_del(¶ms->list);
3665 hci_update_background_scan(hdev);
3667 BT_DBG("All LE connection parameters were removed");
3670 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3673 BT_ERR("Failed to start inquiry: status %d", status);
3676 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3677 hci_dev_unlock(hdev);
3682 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3684 /* General inquiry access code (GIAC) */
3685 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3686 struct hci_request req;
3687 struct hci_cp_inquiry cp;
3691 BT_ERR("Failed to disable LE scanning: status %d", status);
3695 switch (hdev->discovery.type) {
3696 case DISCOV_TYPE_LE:
3698 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3699 hci_dev_unlock(hdev);
3702 case DISCOV_TYPE_INTERLEAVED:
3703 hci_req_init(&req, hdev);
3705 memset(&cp, 0, sizeof(cp));
3706 memcpy(&cp.lap, lap, sizeof(cp.lap));
3707 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3708 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3712 hci_inquiry_cache_flush(hdev);
3714 err = hci_req_run(&req, inquiry_complete);
3716 BT_ERR("Inquiry request failed: err %d", err);
3717 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3720 hci_dev_unlock(hdev);
3725 static void le_scan_disable_work(struct work_struct *work)
3727 struct hci_dev *hdev = container_of(work, struct hci_dev,
3728 le_scan_disable.work);
3729 struct hci_request req;
3732 BT_DBG("%s", hdev->name);
3734 hci_req_init(&req, hdev);
3736 hci_req_add_le_scan_disable(&req);
3738 err = hci_req_run(&req, le_scan_disable_work_complete);
3740 BT_ERR("Disable LE scanning request failed: err %d", err);
3743 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3745 struct hci_dev *hdev = req->hdev;
3747 /* If we're advertising or initiating an LE connection we can't
3748 * go ahead and change the random address at this time. This is
3749 * because the eventual initiator address used for the
3750 * subsequently created connection will be undefined (some
3751 * controllers use the new address and others the one we had
3752 * when the operation started).
3754 * In this kind of scenario skip the update and let the random
3755 * address be updated at the next cycle.
3757 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3758 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3759 BT_DBG("Deferring random address update");
3763 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3766 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3769 struct hci_dev *hdev = req->hdev;
3772 /* If privacy is enabled use a resolvable private address. If
3773 * current RPA has expired or there is something else than
3774 * the current RPA in use, then generate a new one.
3776 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3779 *own_addr_type = ADDR_LE_DEV_RANDOM;
3781 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3782 !bacmp(&hdev->random_addr, &hdev->rpa))
3785 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3787 BT_ERR("%s failed to generate new RPA", hdev->name);
3791 set_random_addr(req, &hdev->rpa);
3793 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3794 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3799 /* In case of required privacy without resolvable private address,
3800 * use an unresolvable private address. This is useful for active
3801 * scanning and non-connectable advertising.
3803 if (require_privacy) {
3806 get_random_bytes(&urpa, 6);
3807 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3809 *own_addr_type = ADDR_LE_DEV_RANDOM;
3810 set_random_addr(req, &urpa);
3814 /* If forcing static address is in use or there is no public
3815 * address use the static address as random address (but skip
3816 * the HCI command if the current random address is already the
3819 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3820 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3821 *own_addr_type = ADDR_LE_DEV_RANDOM;
3822 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3823 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3824 &hdev->static_addr);
3828 /* Neither privacy nor static address is being used so use a
3831 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3836 /* Copy the Identity Address of the controller.
3838 * If the controller has a public BD_ADDR, then by default use that one.
3839 * If this is a LE only controller without a public address, default to
3840 * the static random address.
3842 * For debugging purposes it is possible to force controllers with a
3843 * public address to use the static random address instead.
3845 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3848 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3849 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3850 bacpy(bdaddr, &hdev->static_addr);
3851 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3853 bacpy(bdaddr, &hdev->bdaddr);
3854 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3858 /* Alloc HCI device */
3859 struct hci_dev *hci_alloc_dev(void)
3861 struct hci_dev *hdev;
3863 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3867 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3868 hdev->esco_type = (ESCO_HV1);
3869 hdev->link_mode = (HCI_LM_ACCEPT);
3870 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3871 hdev->io_capability = 0x03; /* No Input No Output */
3872 hdev->manufacturer = 0xffff; /* Default to internal use */
3873 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3874 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3876 hdev->sniff_max_interval = 800;
3877 hdev->sniff_min_interval = 80;
3879 hdev->le_adv_channel_map = 0x07;
3880 hdev->le_scan_interval = 0x0060;
3881 hdev->le_scan_window = 0x0030;
3882 hdev->le_conn_min_interval = 0x0028;
3883 hdev->le_conn_max_interval = 0x0038;
3884 hdev->le_conn_latency = 0x0000;
3885 hdev->le_supv_timeout = 0x002a;
3887 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3888 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3889 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3890 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3892 mutex_init(&hdev->lock);
3893 mutex_init(&hdev->req_lock);
3895 INIT_LIST_HEAD(&hdev->mgmt_pending);
3896 INIT_LIST_HEAD(&hdev->blacklist);
3897 INIT_LIST_HEAD(&hdev->whitelist);
3898 INIT_LIST_HEAD(&hdev->uuids);
3899 INIT_LIST_HEAD(&hdev->link_keys);
3900 INIT_LIST_HEAD(&hdev->long_term_keys);
3901 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3902 INIT_LIST_HEAD(&hdev->remote_oob_data);
3903 INIT_LIST_HEAD(&hdev->le_white_list);
3904 INIT_LIST_HEAD(&hdev->le_conn_params);
3905 INIT_LIST_HEAD(&hdev->pend_le_conns);
3906 INIT_LIST_HEAD(&hdev->pend_le_reports);
3907 INIT_LIST_HEAD(&hdev->conn_hash.list);
3909 INIT_WORK(&hdev->rx_work, hci_rx_work);
3910 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3911 INIT_WORK(&hdev->tx_work, hci_tx_work);
3912 INIT_WORK(&hdev->power_on, hci_power_on);
3914 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3915 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3916 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3918 skb_queue_head_init(&hdev->rx_q);
3919 skb_queue_head_init(&hdev->cmd_q);
3920 skb_queue_head_init(&hdev->raw_q);
3922 init_waitqueue_head(&hdev->req_wait_q);
3924 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3926 hci_init_sysfs(hdev);
3927 discovery_init(hdev);
3931 EXPORT_SYMBOL(hci_alloc_dev);
3933 /* Free HCI device */
3934 void hci_free_dev(struct hci_dev *hdev)
3936 /* will free via device release */
3937 put_device(&hdev->dev);
3939 EXPORT_SYMBOL(hci_free_dev);
3941 /* Register HCI device */
3942 int hci_register_dev(struct hci_dev *hdev)
3946 if (!hdev->open || !hdev->close || !hdev->send)
3949 /* Do not allow HCI_AMP devices to register at index 0,
3950 * so the index can be used as the AMP controller ID.
3952 switch (hdev->dev_type) {
3954 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3957 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3966 sprintf(hdev->name, "hci%d", id);
3969 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3971 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3972 WQ_MEM_RECLAIM, 1, hdev->name);
3973 if (!hdev->workqueue) {
3978 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3979 WQ_MEM_RECLAIM, 1, hdev->name);
3980 if (!hdev->req_workqueue) {
3981 destroy_workqueue(hdev->workqueue);
3986 if (!IS_ERR_OR_NULL(bt_debugfs))
3987 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3989 dev_set_name(&hdev->dev, "%s", hdev->name);
3991 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3993 if (IS_ERR(hdev->tfm_aes)) {
3994 BT_ERR("Unable to create crypto context");
3995 error = PTR_ERR(hdev->tfm_aes);
3996 hdev->tfm_aes = NULL;
4000 error = device_add(&hdev->dev);
4004 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4005 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4008 if (rfkill_register(hdev->rfkill) < 0) {
4009 rfkill_destroy(hdev->rfkill);
4010 hdev->rfkill = NULL;
4014 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4015 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4017 set_bit(HCI_SETUP, &hdev->dev_flags);
4018 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4020 if (hdev->dev_type == HCI_BREDR) {
4021 /* Assume BR/EDR support until proven otherwise (such as
4022 * through reading supported features during init.
4024 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4027 write_lock(&hci_dev_list_lock);
4028 list_add(&hdev->list, &hci_dev_list);
4029 write_unlock(&hci_dev_list_lock);
4031 /* Devices that are marked for raw-only usage are unconfigured
4032 * and should not be included in normal operation.
4034 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4035 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4037 hci_notify(hdev, HCI_DEV_REG);
4040 queue_work(hdev->req_workqueue, &hdev->power_on);
4045 crypto_free_blkcipher(hdev->tfm_aes);
4047 destroy_workqueue(hdev->workqueue);
4048 destroy_workqueue(hdev->req_workqueue);
4050 ida_simple_remove(&hci_index_ida, hdev->id);
4054 EXPORT_SYMBOL(hci_register_dev);
4056 /* Unregister HCI device */
4057 void hci_unregister_dev(struct hci_dev *hdev)
4061 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4063 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4067 write_lock(&hci_dev_list_lock);
4068 list_del(&hdev->list);
4069 write_unlock(&hci_dev_list_lock);
4071 hci_dev_do_close(hdev);
4073 for (i = 0; i < NUM_REASSEMBLY; i++)
4074 kfree_skb(hdev->reassembly[i]);
4076 cancel_work_sync(&hdev->power_on);
4078 if (!test_bit(HCI_INIT, &hdev->flags) &&
4079 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4080 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4082 mgmt_index_removed(hdev);
4083 hci_dev_unlock(hdev);
4086 /* mgmt_index_removed should take care of emptying the
4088 BUG_ON(!list_empty(&hdev->mgmt_pending));
4090 hci_notify(hdev, HCI_DEV_UNREG);
4093 rfkill_unregister(hdev->rfkill);
4094 rfkill_destroy(hdev->rfkill);
4098 crypto_free_blkcipher(hdev->tfm_aes);
4100 device_del(&hdev->dev);
4102 debugfs_remove_recursive(hdev->debugfs);
4104 destroy_workqueue(hdev->workqueue);
4105 destroy_workqueue(hdev->req_workqueue);
4108 hci_bdaddr_list_clear(&hdev->blacklist);
4109 hci_bdaddr_list_clear(&hdev->whitelist);
4110 hci_uuids_clear(hdev);
4111 hci_link_keys_clear(hdev);
4112 hci_smp_ltks_clear(hdev);
4113 hci_smp_irks_clear(hdev);
4114 hci_remote_oob_data_clear(hdev);
4115 hci_bdaddr_list_clear(&hdev->le_white_list);
4116 hci_conn_params_clear_all(hdev);
4117 hci_dev_unlock(hdev);
4121 ida_simple_remove(&hci_index_ida, id);
4123 EXPORT_SYMBOL(hci_unregister_dev);
4125 /* Suspend HCI device */
4126 int hci_suspend_dev(struct hci_dev *hdev)
4128 hci_notify(hdev, HCI_DEV_SUSPEND);
4131 EXPORT_SYMBOL(hci_suspend_dev);
4133 /* Resume HCI device */
4134 int hci_resume_dev(struct hci_dev *hdev)
4136 hci_notify(hdev, HCI_DEV_RESUME);
4139 EXPORT_SYMBOL(hci_resume_dev);
4141 /* Receive frame from HCI drivers */
4142 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4144 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4145 && !test_bit(HCI_INIT, &hdev->flags))) {
4151 bt_cb(skb)->incoming = 1;
4154 __net_timestamp(skb);
4156 skb_queue_tail(&hdev->rx_q, skb);
4157 queue_work(hdev->workqueue, &hdev->rx_work);
4161 EXPORT_SYMBOL(hci_recv_frame);
4163 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4164 int count, __u8 index)
4169 struct sk_buff *skb;
4170 struct bt_skb_cb *scb;
4172 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4173 index >= NUM_REASSEMBLY)
4176 skb = hdev->reassembly[index];
4180 case HCI_ACLDATA_PKT:
4181 len = HCI_MAX_FRAME_SIZE;
4182 hlen = HCI_ACL_HDR_SIZE;
4185 len = HCI_MAX_EVENT_SIZE;
4186 hlen = HCI_EVENT_HDR_SIZE;
4188 case HCI_SCODATA_PKT:
4189 len = HCI_MAX_SCO_SIZE;
4190 hlen = HCI_SCO_HDR_SIZE;
4194 skb = bt_skb_alloc(len, GFP_ATOMIC);
4198 scb = (void *) skb->cb;
4200 scb->pkt_type = type;
4202 hdev->reassembly[index] = skb;
4206 scb = (void *) skb->cb;
4207 len = min_t(uint, scb->expect, count);
4209 memcpy(skb_put(skb, len), data, len);
4218 if (skb->len == HCI_EVENT_HDR_SIZE) {
4219 struct hci_event_hdr *h = hci_event_hdr(skb);
4220 scb->expect = h->plen;
4222 if (skb_tailroom(skb) < scb->expect) {
4224 hdev->reassembly[index] = NULL;
4230 case HCI_ACLDATA_PKT:
4231 if (skb->len == HCI_ACL_HDR_SIZE) {
4232 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4233 scb->expect = __le16_to_cpu(h->dlen);
4235 if (skb_tailroom(skb) < scb->expect) {
4237 hdev->reassembly[index] = NULL;
4243 case HCI_SCODATA_PKT:
4244 if (skb->len == HCI_SCO_HDR_SIZE) {
4245 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4246 scb->expect = h->dlen;
4248 if (skb_tailroom(skb) < scb->expect) {
4250 hdev->reassembly[index] = NULL;
4257 if (scb->expect == 0) {
4258 /* Complete frame */
4260 bt_cb(skb)->pkt_type = type;
4261 hci_recv_frame(hdev, skb);
4263 hdev->reassembly[index] = NULL;
4271 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4275 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4279 rem = hci_reassembly(hdev, type, data, count, type - 1);
4283 data += (count - rem);
4289 EXPORT_SYMBOL(hci_recv_fragment);
4291 #define STREAM_REASSEMBLY 0
4293 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4299 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4302 struct { char type; } *pkt;
4304 /* Start of the frame */
4311 type = bt_cb(skb)->pkt_type;
4313 rem = hci_reassembly(hdev, type, data, count,
4318 data += (count - rem);
4324 EXPORT_SYMBOL(hci_recv_stream_fragment);
4326 /* ---- Interface to upper protocols ---- */
4328 int hci_register_cb(struct hci_cb *cb)
4330 BT_DBG("%p name %s", cb, cb->name);
4332 write_lock(&hci_cb_list_lock);
4333 list_add(&cb->list, &hci_cb_list);
4334 write_unlock(&hci_cb_list_lock);
4338 EXPORT_SYMBOL(hci_register_cb);
4340 int hci_unregister_cb(struct hci_cb *cb)
4342 BT_DBG("%p name %s", cb, cb->name);
4344 write_lock(&hci_cb_list_lock);
4345 list_del(&cb->list);
4346 write_unlock(&hci_cb_list_lock);
4350 EXPORT_SYMBOL(hci_unregister_cb);
4352 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4356 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4359 __net_timestamp(skb);
4361 /* Send copy to monitor */
4362 hci_send_to_monitor(hdev, skb);
4364 if (atomic_read(&hdev->promisc)) {
4365 /* Send copy to the sockets */
4366 hci_send_to_sock(hdev, skb);
4369 /* Get rid of skb owner, prior to sending to the driver. */
4372 err = hdev->send(hdev, skb);
4374 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4379 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4381 skb_queue_head_init(&req->cmd_q);
4386 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4388 struct hci_dev *hdev = req->hdev;
4389 struct sk_buff *skb;
4390 unsigned long flags;
4392 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4394 /* If an error occured during request building, remove all HCI
4395 * commands queued on the HCI request queue.
4398 skb_queue_purge(&req->cmd_q);
4402 /* Do not allow empty requests */
4403 if (skb_queue_empty(&req->cmd_q))
4406 skb = skb_peek_tail(&req->cmd_q);
4407 bt_cb(skb)->req.complete = complete;
4409 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4410 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4411 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4413 queue_work(hdev->workqueue, &hdev->cmd_work);
4418 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4419 u32 plen, const void *param)
4421 int len = HCI_COMMAND_HDR_SIZE + plen;
4422 struct hci_command_hdr *hdr;
4423 struct sk_buff *skb;
4425 skb = bt_skb_alloc(len, GFP_ATOMIC);
4429 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4430 hdr->opcode = cpu_to_le16(opcode);
4434 memcpy(skb_put(skb, plen), param, plen);
4436 BT_DBG("skb len %d", skb->len);
4438 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4443 /* Send HCI command */
4444 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4447 struct sk_buff *skb;
4449 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4451 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4453 BT_ERR("%s no memory for command", hdev->name);
4457 /* Stand-alone HCI commands must be flaged as
4458 * single-command requests.
4460 bt_cb(skb)->req.start = true;
4462 skb_queue_tail(&hdev->cmd_q, skb);
4463 queue_work(hdev->workqueue, &hdev->cmd_work);
4468 /* Queue a command to an asynchronous HCI request */
4469 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4470 const void *param, u8 event)
4472 struct hci_dev *hdev = req->hdev;
4473 struct sk_buff *skb;
4475 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4477 /* If an error occured during request building, there is no point in
4478 * queueing the HCI command. We can simply return.
4483 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4485 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4486 hdev->name, opcode);
4491 if (skb_queue_empty(&req->cmd_q))
4492 bt_cb(skb)->req.start = true;
4494 bt_cb(skb)->req.event = event;
4496 skb_queue_tail(&req->cmd_q, skb);
4499 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4502 hci_req_add_ev(req, opcode, plen, param, 0);
4505 /* Get data from the previously sent command */
4506 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4508 struct hci_command_hdr *hdr;
4510 if (!hdev->sent_cmd)
4513 hdr = (void *) hdev->sent_cmd->data;
4515 if (hdr->opcode != cpu_to_le16(opcode))
4518 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4520 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4524 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4526 struct hci_acl_hdr *hdr;
4529 skb_push(skb, HCI_ACL_HDR_SIZE);
4530 skb_reset_transport_header(skb);
4531 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4532 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4533 hdr->dlen = cpu_to_le16(len);
4536 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4537 struct sk_buff *skb, __u16 flags)
4539 struct hci_conn *conn = chan->conn;
4540 struct hci_dev *hdev = conn->hdev;
4541 struct sk_buff *list;
4543 skb->len = skb_headlen(skb);
4546 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4548 switch (hdev->dev_type) {
4550 hci_add_acl_hdr(skb, conn->handle, flags);
4553 hci_add_acl_hdr(skb, chan->handle, flags);
4556 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4560 list = skb_shinfo(skb)->frag_list;
4562 /* Non fragmented */
4563 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4565 skb_queue_tail(queue, skb);
4568 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4570 skb_shinfo(skb)->frag_list = NULL;
4572 /* Queue all fragments atomically */
4573 spin_lock(&queue->lock);
4575 __skb_queue_tail(queue, skb);
4577 flags &= ~ACL_START;
4580 skb = list; list = list->next;
4582 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4583 hci_add_acl_hdr(skb, conn->handle, flags);
4585 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4587 __skb_queue_tail(queue, skb);
4590 spin_unlock(&queue->lock);
4594 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4596 struct hci_dev *hdev = chan->conn->hdev;
4598 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4600 hci_queue_acl(chan, &chan->data_q, skb, flags);
4602 queue_work(hdev->workqueue, &hdev->tx_work);
4606 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4608 struct hci_dev *hdev = conn->hdev;
4609 struct hci_sco_hdr hdr;
4611 BT_DBG("%s len %d", hdev->name, skb->len);
4613 hdr.handle = cpu_to_le16(conn->handle);
4614 hdr.dlen = skb->len;
4616 skb_push(skb, HCI_SCO_HDR_SIZE);
4617 skb_reset_transport_header(skb);
4618 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4620 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4622 skb_queue_tail(&conn->data_q, skb);
4623 queue_work(hdev->workqueue, &hdev->tx_work);
4626 /* ---- HCI TX task (outgoing data) ---- */
4628 /* HCI Connection scheduler */
4629 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4632 struct hci_conn_hash *h = &hdev->conn_hash;
4633 struct hci_conn *conn = NULL, *c;
4634 unsigned int num = 0, min = ~0;
4636 /* We don't have to lock device here. Connections are always
4637 * added and removed with TX task disabled. */
4641 list_for_each_entry_rcu(c, &h->list, list) {
4642 if (c->type != type || skb_queue_empty(&c->data_q))
4645 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4650 if (c->sent < min) {
4655 if (hci_conn_num(hdev, type) == num)
4664 switch (conn->type) {
4666 cnt = hdev->acl_cnt;
4670 cnt = hdev->sco_cnt;
4673 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4677 BT_ERR("Unknown link type");
4685 BT_DBG("conn %p quote %d", conn, *quote);
4689 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4691 struct hci_conn_hash *h = &hdev->conn_hash;
4694 BT_ERR("%s link tx timeout", hdev->name);
4698 /* Kill stalled connections */
4699 list_for_each_entry_rcu(c, &h->list, list) {
4700 if (c->type == type && c->sent) {
4701 BT_ERR("%s killing stalled connection %pMR",
4702 hdev->name, &c->dst);
4703 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4710 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4713 struct hci_conn_hash *h = &hdev->conn_hash;
4714 struct hci_chan *chan = NULL;
4715 unsigned int num = 0, min = ~0, cur_prio = 0;
4716 struct hci_conn *conn;
4717 int cnt, q, conn_num = 0;
4719 BT_DBG("%s", hdev->name);
4723 list_for_each_entry_rcu(conn, &h->list, list) {
4724 struct hci_chan *tmp;
4726 if (conn->type != type)
4729 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4734 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4735 struct sk_buff *skb;
4737 if (skb_queue_empty(&tmp->data_q))
4740 skb = skb_peek(&tmp->data_q);
4741 if (skb->priority < cur_prio)
4744 if (skb->priority > cur_prio) {
4747 cur_prio = skb->priority;
4752 if (conn->sent < min) {
4758 if (hci_conn_num(hdev, type) == conn_num)
4767 switch (chan->conn->type) {
4769 cnt = hdev->acl_cnt;
4772 cnt = hdev->block_cnt;
4776 cnt = hdev->sco_cnt;
4779 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4783 BT_ERR("Unknown link type");
4788 BT_DBG("chan %p quote %d", chan, *quote);
4792 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4794 struct hci_conn_hash *h = &hdev->conn_hash;
4795 struct hci_conn *conn;
4798 BT_DBG("%s", hdev->name);
4802 list_for_each_entry_rcu(conn, &h->list, list) {
4803 struct hci_chan *chan;
4805 if (conn->type != type)
4808 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4813 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4814 struct sk_buff *skb;
4821 if (skb_queue_empty(&chan->data_q))
4824 skb = skb_peek(&chan->data_q);
4825 if (skb->priority >= HCI_PRIO_MAX - 1)
4828 skb->priority = HCI_PRIO_MAX - 1;
4830 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4834 if (hci_conn_num(hdev, type) == num)
4842 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4844 /* Calculate count of blocks used by this packet */
4845 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4848 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4850 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4851 /* ACL tx timeout must be longer than maximum
4852 * link supervision timeout (40.9 seconds) */
4853 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4854 HCI_ACL_TX_TIMEOUT))
4855 hci_link_tx_to(hdev, ACL_LINK);
4859 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4861 unsigned int cnt = hdev->acl_cnt;
4862 struct hci_chan *chan;
4863 struct sk_buff *skb;
4866 __check_timeout(hdev, cnt);
4868 while (hdev->acl_cnt &&
4869 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4870 u32 priority = (skb_peek(&chan->data_q))->priority;
4871 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4872 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4873 skb->len, skb->priority);
4875 /* Stop if priority has changed */
4876 if (skb->priority < priority)
4879 skb = skb_dequeue(&chan->data_q);
4881 hci_conn_enter_active_mode(chan->conn,
4882 bt_cb(skb)->force_active);
4884 hci_send_frame(hdev, skb);
4885 hdev->acl_last_tx = jiffies;
4893 if (cnt != hdev->acl_cnt)
4894 hci_prio_recalculate(hdev, ACL_LINK);
4897 static void hci_sched_acl_blk(struct hci_dev *hdev)
4899 unsigned int cnt = hdev->block_cnt;
4900 struct hci_chan *chan;
4901 struct sk_buff *skb;
4905 __check_timeout(hdev, cnt);
4907 BT_DBG("%s", hdev->name);
4909 if (hdev->dev_type == HCI_AMP)
4914 while (hdev->block_cnt > 0 &&
4915 (chan = hci_chan_sent(hdev, type, "e))) {
4916 u32 priority = (skb_peek(&chan->data_q))->priority;
4917 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4920 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4921 skb->len, skb->priority);
4923 /* Stop if priority has changed */
4924 if (skb->priority < priority)
4927 skb = skb_dequeue(&chan->data_q);
4929 blocks = __get_blocks(hdev, skb);
4930 if (blocks > hdev->block_cnt)
4933 hci_conn_enter_active_mode(chan->conn,
4934 bt_cb(skb)->force_active);
4936 hci_send_frame(hdev, skb);
4937 hdev->acl_last_tx = jiffies;
4939 hdev->block_cnt -= blocks;
4942 chan->sent += blocks;
4943 chan->conn->sent += blocks;
4947 if (cnt != hdev->block_cnt)
4948 hci_prio_recalculate(hdev, type);
4951 static void hci_sched_acl(struct hci_dev *hdev)
4953 BT_DBG("%s", hdev->name);
4955 /* No ACL link over BR/EDR controller */
4956 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4959 /* No AMP link over AMP controller */
4960 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4963 switch (hdev->flow_ctl_mode) {
4964 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4965 hci_sched_acl_pkt(hdev);
4968 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4969 hci_sched_acl_blk(hdev);
4975 static void hci_sched_sco(struct hci_dev *hdev)
4977 struct hci_conn *conn;
4978 struct sk_buff *skb;
4981 BT_DBG("%s", hdev->name);
4983 if (!hci_conn_num(hdev, SCO_LINK))
4986 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
4987 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4988 BT_DBG("skb %p len %d", skb, skb->len);
4989 hci_send_frame(hdev, skb);
4992 if (conn->sent == ~0)
4998 static void hci_sched_esco(struct hci_dev *hdev)
5000 struct hci_conn *conn;
5001 struct sk_buff *skb;
5004 BT_DBG("%s", hdev->name);
5006 if (!hci_conn_num(hdev, ESCO_LINK))
5009 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5011 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5012 BT_DBG("skb %p len %d", skb, skb->len);
5013 hci_send_frame(hdev, skb);
5016 if (conn->sent == ~0)
5022 static void hci_sched_le(struct hci_dev *hdev)
5024 struct hci_chan *chan;
5025 struct sk_buff *skb;
5026 int quote, cnt, tmp;
5028 BT_DBG("%s", hdev->name);
5030 if (!hci_conn_num(hdev, LE_LINK))
5033 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5034 /* LE tx timeout must be longer than maximum
5035 * link supervision timeout (40.9 seconds) */
5036 if (!hdev->le_cnt && hdev->le_pkts &&
5037 time_after(jiffies, hdev->le_last_tx + HZ * 45))
5038 hci_link_tx_to(hdev, LE_LINK);
5041 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5043 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
5044 u32 priority = (skb_peek(&chan->data_q))->priority;
5045 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5046 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5047 skb->len, skb->priority);
5049 /* Stop if priority has changed */
5050 if (skb->priority < priority)
5053 skb = skb_dequeue(&chan->data_q);
5055 hci_send_frame(hdev, skb);
5056 hdev->le_last_tx = jiffies;
5067 hdev->acl_cnt = cnt;
5070 hci_prio_recalculate(hdev, LE_LINK);
5073 static void hci_tx_work(struct work_struct *work)
5075 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5076 struct sk_buff *skb;
5078 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5079 hdev->sco_cnt, hdev->le_cnt);
5081 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5082 /* Schedule queues and send stuff to HCI driver */
5083 hci_sched_acl(hdev);
5084 hci_sched_sco(hdev);
5085 hci_sched_esco(hdev);
5089 /* Send next queued raw (unknown type) packet */
5090 while ((skb = skb_dequeue(&hdev->raw_q)))
5091 hci_send_frame(hdev, skb);
5094 /* ----- HCI RX task (incoming data processing) ----- */
5096 /* ACL data packet */
5097 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5099 struct hci_acl_hdr *hdr = (void *) skb->data;
5100 struct hci_conn *conn;
5101 __u16 handle, flags;
5103 skb_pull(skb, HCI_ACL_HDR_SIZE);
5105 handle = __le16_to_cpu(hdr->handle);
5106 flags = hci_flags(handle);
5107 handle = hci_handle(handle);
5109 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5112 hdev->stat.acl_rx++;
5115 conn = hci_conn_hash_lookup_handle(hdev, handle);
5116 hci_dev_unlock(hdev);
5119 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5121 /* Send to upper protocol */
5122 l2cap_recv_acldata(conn, skb, flags);
5125 BT_ERR("%s ACL packet for unknown connection handle %d",
5126 hdev->name, handle);
5132 /* SCO data packet */
5133 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5135 struct hci_sco_hdr *hdr = (void *) skb->data;
5136 struct hci_conn *conn;
5139 skb_pull(skb, HCI_SCO_HDR_SIZE);
5141 handle = __le16_to_cpu(hdr->handle);
5143 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5145 hdev->stat.sco_rx++;
5148 conn = hci_conn_hash_lookup_handle(hdev, handle);
5149 hci_dev_unlock(hdev);
5152 /* Send to upper protocol */
5153 sco_recv_scodata(conn, skb);
5156 BT_ERR("%s SCO packet for unknown connection handle %d",
5157 hdev->name, handle);
5163 static bool hci_req_is_complete(struct hci_dev *hdev)
5165 struct sk_buff *skb;
5167 skb = skb_peek(&hdev->cmd_q);
5171 return bt_cb(skb)->req.start;
5174 static void hci_resend_last(struct hci_dev *hdev)
5176 struct hci_command_hdr *sent;
5177 struct sk_buff *skb;
5180 if (!hdev->sent_cmd)
5183 sent = (void *) hdev->sent_cmd->data;
5184 opcode = __le16_to_cpu(sent->opcode);
5185 if (opcode == HCI_OP_RESET)
5188 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5192 skb_queue_head(&hdev->cmd_q, skb);
5193 queue_work(hdev->workqueue, &hdev->cmd_work);
5196 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5198 hci_req_complete_t req_complete = NULL;
5199 struct sk_buff *skb;
5200 unsigned long flags;
5202 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5204 /* If the completed command doesn't match the last one that was
5205 * sent we need to do special handling of it.
5207 if (!hci_sent_cmd_data(hdev, opcode)) {
5208 /* Some CSR based controllers generate a spontaneous
5209 * reset complete event during init and any pending
5210 * command will never be completed. In such a case we
5211 * need to resend whatever was the last sent
5214 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5215 hci_resend_last(hdev);
5220 /* If the command succeeded and there's still more commands in
5221 * this request the request is not yet complete.
5223 if (!status && !hci_req_is_complete(hdev))
5226 /* If this was the last command in a request the complete
5227 * callback would be found in hdev->sent_cmd instead of the
5228 * command queue (hdev->cmd_q).
5230 if (hdev->sent_cmd) {
5231 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5234 /* We must set the complete callback to NULL to
5235 * avoid calling the callback more than once if
5236 * this function gets called again.
5238 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5244 /* Remove all pending commands belonging to this request */
5245 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5246 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5247 if (bt_cb(skb)->req.start) {
5248 __skb_queue_head(&hdev->cmd_q, skb);
5252 req_complete = bt_cb(skb)->req.complete;
5255 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5259 req_complete(hdev, status);
5262 static void hci_rx_work(struct work_struct *work)
5264 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5265 struct sk_buff *skb;
5267 BT_DBG("%s", hdev->name);
5269 while ((skb = skb_dequeue(&hdev->rx_q))) {
5270 /* Send copy to monitor */
5271 hci_send_to_monitor(hdev, skb);
5273 if (atomic_read(&hdev->promisc)) {
5274 /* Send copy to the sockets */
5275 hci_send_to_sock(hdev, skb);
5278 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5283 if (test_bit(HCI_INIT, &hdev->flags)) {
5284 /* Don't process data packets in this states. */
5285 switch (bt_cb(skb)->pkt_type) {
5286 case HCI_ACLDATA_PKT:
5287 case HCI_SCODATA_PKT:
5294 switch (bt_cb(skb)->pkt_type) {
5296 BT_DBG("%s Event packet", hdev->name);
5297 hci_event_packet(hdev, skb);
5300 case HCI_ACLDATA_PKT:
5301 BT_DBG("%s ACL data packet", hdev->name);
5302 hci_acldata_packet(hdev, skb);
5305 case HCI_SCODATA_PKT:
5306 BT_DBG("%s SCO data packet", hdev->name);
5307 hci_scodata_packet(hdev, skb);
5317 static void hci_cmd_work(struct work_struct *work)
5319 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5320 struct sk_buff *skb;
5322 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5323 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5325 /* Send queued commands */
5326 if (atomic_read(&hdev->cmd_cnt)) {
5327 skb = skb_dequeue(&hdev->cmd_q);
5331 kfree_skb(hdev->sent_cmd);
5333 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5334 if (hdev->sent_cmd) {
5335 atomic_dec(&hdev->cmd_cnt);
5336 hci_send_frame(hdev, skb);
5337 if (test_bit(HCI_RESET, &hdev->flags))
5338 cancel_delayed_work(&hdev->cmd_timer);
5340 schedule_delayed_work(&hdev->cmd_timer,
5343 skb_queue_head(&hdev->cmd_q, skb);
5344 queue_work(hdev->workqueue, &hdev->cmd_work);
5349 void hci_req_add_le_scan_disable(struct hci_request *req)
5351 struct hci_cp_le_set_scan_enable cp;
5353 memset(&cp, 0, sizeof(cp));
5354 cp.enable = LE_SCAN_DISABLE;
5355 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5358 void hci_req_add_le_passive_scan(struct hci_request *req)
5360 struct hci_cp_le_set_scan_param param_cp;
5361 struct hci_cp_le_set_scan_enable enable_cp;
5362 struct hci_dev *hdev = req->hdev;
5365 /* Set require_privacy to false since no SCAN_REQ are send
5366 * during passive scanning. Not using an unresolvable address
5367 * here is important so that peer devices using direct
5368 * advertising with our address will be correctly reported
5369 * by the controller.
5371 if (hci_update_random_address(req, false, &own_addr_type))
5374 memset(¶m_cp, 0, sizeof(param_cp));
5375 param_cp.type = LE_SCAN_PASSIVE;
5376 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5377 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5378 param_cp.own_address_type = own_addr_type;
5379 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5382 memset(&enable_cp, 0, sizeof(enable_cp));
5383 enable_cp.enable = LE_SCAN_ENABLE;
5384 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5385 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5389 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5392 BT_DBG("HCI request failed to update background scanning: "
5393 "status 0x%2.2x", status);
5396 /* This function controls the background scanning based on hdev->pend_le_conns
5397 * list. If there are pending LE connection we start the background scanning,
5398 * otherwise we stop it.
5400 * This function requires the caller holds hdev->lock.
5402 void hci_update_background_scan(struct hci_dev *hdev)
5404 struct hci_request req;
5405 struct hci_conn *conn;
5408 if (!test_bit(HCI_UP, &hdev->flags) ||
5409 test_bit(HCI_INIT, &hdev->flags) ||
5410 test_bit(HCI_SETUP, &hdev->dev_flags) ||
5411 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5412 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5413 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5416 /* No point in doing scanning if LE support hasn't been enabled */
5417 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5420 /* If discovery is active don't interfere with it */
5421 if (hdev->discovery.state != DISCOVERY_STOPPED)
5424 hci_req_init(&req, hdev);
5426 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
5427 list_empty(&hdev->pend_le_conns) &&
5428 list_empty(&hdev->pend_le_reports)) {
5429 /* If there is no pending LE connections or devices
5430 * to be scanned for, we should stop the background
5434 /* If controller is not scanning we are done. */
5435 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5438 hci_req_add_le_scan_disable(&req);
5440 BT_DBG("%s stopping background scanning", hdev->name);
5442 /* If there is at least one pending LE connection, we should
5443 * keep the background scan running.
5446 /* If controller is connecting, we should not start scanning
5447 * since some controllers are not able to scan and connect at
5450 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5454 /* If controller is currently scanning, we stop it to ensure we
5455 * don't miss any advertising (due to duplicates filter).
5457 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5458 hci_req_add_le_scan_disable(&req);
5460 hci_req_add_le_passive_scan(&req);
5462 BT_DBG("%s starting background scanning", hdev->name);
5465 err = hci_req_run(&req, update_background_scan_complete);
5467 BT_ERR("Failed to run HCI request: err %d", err);