Bluetooth: Add support for changing the public device address
[cascardo/linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "smp.h"
41
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
45
46 /* HCI device list */
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
49
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
53
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
56
57 /* ---- HCI notifications ---- */
58
59 static void hci_notify(struct hci_dev *hdev, int event)
60 {
61         hci_sock_dev_event(hdev, event);
62 }
63
64 /* ---- HCI debugfs entries ---- */
65
66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67                              size_t count, loff_t *ppos)
68 {
69         struct hci_dev *hdev = file->private_data;
70         char buf[3];
71
72         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
73         buf[1] = '\n';
74         buf[2] = '\0';
75         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76 }
77
78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79                               size_t count, loff_t *ppos)
80 {
81         struct hci_dev *hdev = file->private_data;
82         struct sk_buff *skb;
83         char buf[32];
84         size_t buf_size = min(count, (sizeof(buf)-1));
85         bool enable;
86         int err;
87
88         if (!test_bit(HCI_UP, &hdev->flags))
89                 return -ENETDOWN;
90
91         if (copy_from_user(buf, user_buf, buf_size))
92                 return -EFAULT;
93
94         buf[buf_size] = '\0';
95         if (strtobool(buf, &enable))
96                 return -EINVAL;
97
98         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
99                 return -EALREADY;
100
101         hci_req_lock(hdev);
102         if (enable)
103                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
104                                      HCI_CMD_TIMEOUT);
105         else
106                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
107                                      HCI_CMD_TIMEOUT);
108         hci_req_unlock(hdev);
109
110         if (IS_ERR(skb))
111                 return PTR_ERR(skb);
112
113         err = -bt_to_errno(skb->data[0]);
114         kfree_skb(skb);
115
116         if (err < 0)
117                 return err;
118
119         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
120
121         return count;
122 }
123
124 static const struct file_operations dut_mode_fops = {
125         .open           = simple_open,
126         .read           = dut_mode_read,
127         .write          = dut_mode_write,
128         .llseek         = default_llseek,
129 };
130
131 static int features_show(struct seq_file *f, void *ptr)
132 {
133         struct hci_dev *hdev = f->private;
134         u8 p;
135
136         hci_dev_lock(hdev);
137         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
138                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
139                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140                            hdev->features[p][0], hdev->features[p][1],
141                            hdev->features[p][2], hdev->features[p][3],
142                            hdev->features[p][4], hdev->features[p][5],
143                            hdev->features[p][6], hdev->features[p][7]);
144         }
145         if (lmp_le_capable(hdev))
146                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148                            hdev->le_features[0], hdev->le_features[1],
149                            hdev->le_features[2], hdev->le_features[3],
150                            hdev->le_features[4], hdev->le_features[5],
151                            hdev->le_features[6], hdev->le_features[7]);
152         hci_dev_unlock(hdev);
153
154         return 0;
155 }
156
157 static int features_open(struct inode *inode, struct file *file)
158 {
159         return single_open(file, features_show, inode->i_private);
160 }
161
162 static const struct file_operations features_fops = {
163         .open           = features_open,
164         .read           = seq_read,
165         .llseek         = seq_lseek,
166         .release        = single_release,
167 };
168
169 static int blacklist_show(struct seq_file *f, void *p)
170 {
171         struct hci_dev *hdev = f->private;
172         struct bdaddr_list *b;
173
174         hci_dev_lock(hdev);
175         list_for_each_entry(b, &hdev->blacklist, list)
176                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
177         hci_dev_unlock(hdev);
178
179         return 0;
180 }
181
182 static int blacklist_open(struct inode *inode, struct file *file)
183 {
184         return single_open(file, blacklist_show, inode->i_private);
185 }
186
187 static const struct file_operations blacklist_fops = {
188         .open           = blacklist_open,
189         .read           = seq_read,
190         .llseek         = seq_lseek,
191         .release        = single_release,
192 };
193
194 static int uuids_show(struct seq_file *f, void *p)
195 {
196         struct hci_dev *hdev = f->private;
197         struct bt_uuid *uuid;
198
199         hci_dev_lock(hdev);
200         list_for_each_entry(uuid, &hdev->uuids, list) {
201                 u8 i, val[16];
202
203                 /* The Bluetooth UUID values are stored in big endian,
204                  * but with reversed byte order. So convert them into
205                  * the right order for the %pUb modifier.
206                  */
207                 for (i = 0; i < 16; i++)
208                         val[i] = uuid->uuid[15 - i];
209
210                 seq_printf(f, "%pUb\n", val);
211         }
212         hci_dev_unlock(hdev);
213
214         return 0;
215 }
216
217 static int uuids_open(struct inode *inode, struct file *file)
218 {
219         return single_open(file, uuids_show, inode->i_private);
220 }
221
222 static const struct file_operations uuids_fops = {
223         .open           = uuids_open,
224         .read           = seq_read,
225         .llseek         = seq_lseek,
226         .release        = single_release,
227 };
228
229 static int inquiry_cache_show(struct seq_file *f, void *p)
230 {
231         struct hci_dev *hdev = f->private;
232         struct discovery_state *cache = &hdev->discovery;
233         struct inquiry_entry *e;
234
235         hci_dev_lock(hdev);
236
237         list_for_each_entry(e, &cache->all, all) {
238                 struct inquiry_data *data = &e->data;
239                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
240                            &data->bdaddr,
241                            data->pscan_rep_mode, data->pscan_period_mode,
242                            data->pscan_mode, data->dev_class[2],
243                            data->dev_class[1], data->dev_class[0],
244                            __le16_to_cpu(data->clock_offset),
245                            data->rssi, data->ssp_mode, e->timestamp);
246         }
247
248         hci_dev_unlock(hdev);
249
250         return 0;
251 }
252
253 static int inquiry_cache_open(struct inode *inode, struct file *file)
254 {
255         return single_open(file, inquiry_cache_show, inode->i_private);
256 }
257
258 static const struct file_operations inquiry_cache_fops = {
259         .open           = inquiry_cache_open,
260         .read           = seq_read,
261         .llseek         = seq_lseek,
262         .release        = single_release,
263 };
264
265 static int link_keys_show(struct seq_file *f, void *ptr)
266 {
267         struct hci_dev *hdev = f->private;
268         struct list_head *p, *n;
269
270         hci_dev_lock(hdev);
271         list_for_each_safe(p, n, &hdev->link_keys) {
272                 struct link_key *key = list_entry(p, struct link_key, list);
273                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
274                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
275         }
276         hci_dev_unlock(hdev);
277
278         return 0;
279 }
280
281 static int link_keys_open(struct inode *inode, struct file *file)
282 {
283         return single_open(file, link_keys_show, inode->i_private);
284 }
285
286 static const struct file_operations link_keys_fops = {
287         .open           = link_keys_open,
288         .read           = seq_read,
289         .llseek         = seq_lseek,
290         .release        = single_release,
291 };
292
293 static int dev_class_show(struct seq_file *f, void *ptr)
294 {
295         struct hci_dev *hdev = f->private;
296
297         hci_dev_lock(hdev);
298         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
299                    hdev->dev_class[1], hdev->dev_class[0]);
300         hci_dev_unlock(hdev);
301
302         return 0;
303 }
304
305 static int dev_class_open(struct inode *inode, struct file *file)
306 {
307         return single_open(file, dev_class_show, inode->i_private);
308 }
309
310 static const struct file_operations dev_class_fops = {
311         .open           = dev_class_open,
312         .read           = seq_read,
313         .llseek         = seq_lseek,
314         .release        = single_release,
315 };
316
317 static int voice_setting_get(void *data, u64 *val)
318 {
319         struct hci_dev *hdev = data;
320
321         hci_dev_lock(hdev);
322         *val = hdev->voice_setting;
323         hci_dev_unlock(hdev);
324
325         return 0;
326 }
327
328 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
329                         NULL, "0x%4.4llx\n");
330
331 static int auto_accept_delay_set(void *data, u64 val)
332 {
333         struct hci_dev *hdev = data;
334
335         hci_dev_lock(hdev);
336         hdev->auto_accept_delay = val;
337         hci_dev_unlock(hdev);
338
339         return 0;
340 }
341
342 static int auto_accept_delay_get(void *data, u64 *val)
343 {
344         struct hci_dev *hdev = data;
345
346         hci_dev_lock(hdev);
347         *val = hdev->auto_accept_delay;
348         hci_dev_unlock(hdev);
349
350         return 0;
351 }
352
353 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
354                         auto_accept_delay_set, "%llu\n");
355
356 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
357                                      size_t count, loff_t *ppos)
358 {
359         struct hci_dev *hdev = file->private_data;
360         char buf[3];
361
362         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
363         buf[1] = '\n';
364         buf[2] = '\0';
365         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
366 }
367
368 static ssize_t force_sc_support_write(struct file *file,
369                                       const char __user *user_buf,
370                                       size_t count, loff_t *ppos)
371 {
372         struct hci_dev *hdev = file->private_data;
373         char buf[32];
374         size_t buf_size = min(count, (sizeof(buf)-1));
375         bool enable;
376
377         if (test_bit(HCI_UP, &hdev->flags))
378                 return -EBUSY;
379
380         if (copy_from_user(buf, user_buf, buf_size))
381                 return -EFAULT;
382
383         buf[buf_size] = '\0';
384         if (strtobool(buf, &enable))
385                 return -EINVAL;
386
387         if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
388                 return -EALREADY;
389
390         change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
391
392         return count;
393 }
394
395 static const struct file_operations force_sc_support_fops = {
396         .open           = simple_open,
397         .read           = force_sc_support_read,
398         .write          = force_sc_support_write,
399         .llseek         = default_llseek,
400 };
401
402 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
403                                  size_t count, loff_t *ppos)
404 {
405         struct hci_dev *hdev = file->private_data;
406         char buf[3];
407
408         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
409         buf[1] = '\n';
410         buf[2] = '\0';
411         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
412 }
413
414 static const struct file_operations sc_only_mode_fops = {
415         .open           = simple_open,
416         .read           = sc_only_mode_read,
417         .llseek         = default_llseek,
418 };
419
420 static int idle_timeout_set(void *data, u64 val)
421 {
422         struct hci_dev *hdev = data;
423
424         if (val != 0 && (val < 500 || val > 3600000))
425                 return -EINVAL;
426
427         hci_dev_lock(hdev);
428         hdev->idle_timeout = val;
429         hci_dev_unlock(hdev);
430
431         return 0;
432 }
433
434 static int idle_timeout_get(void *data, u64 *val)
435 {
436         struct hci_dev *hdev = data;
437
438         hci_dev_lock(hdev);
439         *val = hdev->idle_timeout;
440         hci_dev_unlock(hdev);
441
442         return 0;
443 }
444
445 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
446                         idle_timeout_set, "%llu\n");
447
448 static int rpa_timeout_set(void *data, u64 val)
449 {
450         struct hci_dev *hdev = data;
451
452         /* Require the RPA timeout to be at least 30 seconds and at most
453          * 24 hours.
454          */
455         if (val < 30 || val > (60 * 60 * 24))
456                 return -EINVAL;
457
458         hci_dev_lock(hdev);
459         hdev->rpa_timeout = val;
460         hci_dev_unlock(hdev);
461
462         return 0;
463 }
464
465 static int rpa_timeout_get(void *data, u64 *val)
466 {
467         struct hci_dev *hdev = data;
468
469         hci_dev_lock(hdev);
470         *val = hdev->rpa_timeout;
471         hci_dev_unlock(hdev);
472
473         return 0;
474 }
475
476 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
477                         rpa_timeout_set, "%llu\n");
478
479 static int sniff_min_interval_set(void *data, u64 val)
480 {
481         struct hci_dev *hdev = data;
482
483         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
484                 return -EINVAL;
485
486         hci_dev_lock(hdev);
487         hdev->sniff_min_interval = val;
488         hci_dev_unlock(hdev);
489
490         return 0;
491 }
492
493 static int sniff_min_interval_get(void *data, u64 *val)
494 {
495         struct hci_dev *hdev = data;
496
497         hci_dev_lock(hdev);
498         *val = hdev->sniff_min_interval;
499         hci_dev_unlock(hdev);
500
501         return 0;
502 }
503
504 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
505                         sniff_min_interval_set, "%llu\n");
506
507 static int sniff_max_interval_set(void *data, u64 val)
508 {
509         struct hci_dev *hdev = data;
510
511         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
512                 return -EINVAL;
513
514         hci_dev_lock(hdev);
515         hdev->sniff_max_interval = val;
516         hci_dev_unlock(hdev);
517
518         return 0;
519 }
520
521 static int sniff_max_interval_get(void *data, u64 *val)
522 {
523         struct hci_dev *hdev = data;
524
525         hci_dev_lock(hdev);
526         *val = hdev->sniff_max_interval;
527         hci_dev_unlock(hdev);
528
529         return 0;
530 }
531
532 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
533                         sniff_max_interval_set, "%llu\n");
534
535 static int conn_info_min_age_set(void *data, u64 val)
536 {
537         struct hci_dev *hdev = data;
538
539         if (val == 0 || val > hdev->conn_info_max_age)
540                 return -EINVAL;
541
542         hci_dev_lock(hdev);
543         hdev->conn_info_min_age = val;
544         hci_dev_unlock(hdev);
545
546         return 0;
547 }
548
549 static int conn_info_min_age_get(void *data, u64 *val)
550 {
551         struct hci_dev *hdev = data;
552
553         hci_dev_lock(hdev);
554         *val = hdev->conn_info_min_age;
555         hci_dev_unlock(hdev);
556
557         return 0;
558 }
559
560 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
561                         conn_info_min_age_set, "%llu\n");
562
563 static int conn_info_max_age_set(void *data, u64 val)
564 {
565         struct hci_dev *hdev = data;
566
567         if (val == 0 || val < hdev->conn_info_min_age)
568                 return -EINVAL;
569
570         hci_dev_lock(hdev);
571         hdev->conn_info_max_age = val;
572         hci_dev_unlock(hdev);
573
574         return 0;
575 }
576
577 static int conn_info_max_age_get(void *data, u64 *val)
578 {
579         struct hci_dev *hdev = data;
580
581         hci_dev_lock(hdev);
582         *val = hdev->conn_info_max_age;
583         hci_dev_unlock(hdev);
584
585         return 0;
586 }
587
588 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
589                         conn_info_max_age_set, "%llu\n");
590
591 static int identity_show(struct seq_file *f, void *p)
592 {
593         struct hci_dev *hdev = f->private;
594         bdaddr_t addr;
595         u8 addr_type;
596
597         hci_dev_lock(hdev);
598
599         hci_copy_identity_address(hdev, &addr, &addr_type);
600
601         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
602                    16, hdev->irk, &hdev->rpa);
603
604         hci_dev_unlock(hdev);
605
606         return 0;
607 }
608
609 static int identity_open(struct inode *inode, struct file *file)
610 {
611         return single_open(file, identity_show, inode->i_private);
612 }
613
614 static const struct file_operations identity_fops = {
615         .open           = identity_open,
616         .read           = seq_read,
617         .llseek         = seq_lseek,
618         .release        = single_release,
619 };
620
621 static int random_address_show(struct seq_file *f, void *p)
622 {
623         struct hci_dev *hdev = f->private;
624
625         hci_dev_lock(hdev);
626         seq_printf(f, "%pMR\n", &hdev->random_addr);
627         hci_dev_unlock(hdev);
628
629         return 0;
630 }
631
632 static int random_address_open(struct inode *inode, struct file *file)
633 {
634         return single_open(file, random_address_show, inode->i_private);
635 }
636
637 static const struct file_operations random_address_fops = {
638         .open           = random_address_open,
639         .read           = seq_read,
640         .llseek         = seq_lseek,
641         .release        = single_release,
642 };
643
644 static int static_address_show(struct seq_file *f, void *p)
645 {
646         struct hci_dev *hdev = f->private;
647
648         hci_dev_lock(hdev);
649         seq_printf(f, "%pMR\n", &hdev->static_addr);
650         hci_dev_unlock(hdev);
651
652         return 0;
653 }
654
655 static int static_address_open(struct inode *inode, struct file *file)
656 {
657         return single_open(file, static_address_show, inode->i_private);
658 }
659
660 static const struct file_operations static_address_fops = {
661         .open           = static_address_open,
662         .read           = seq_read,
663         .llseek         = seq_lseek,
664         .release        = single_release,
665 };
666
667 static ssize_t force_static_address_read(struct file *file,
668                                          char __user *user_buf,
669                                          size_t count, loff_t *ppos)
670 {
671         struct hci_dev *hdev = file->private_data;
672         char buf[3];
673
674         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
675         buf[1] = '\n';
676         buf[2] = '\0';
677         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
678 }
679
680 static ssize_t force_static_address_write(struct file *file,
681                                           const char __user *user_buf,
682                                           size_t count, loff_t *ppos)
683 {
684         struct hci_dev *hdev = file->private_data;
685         char buf[32];
686         size_t buf_size = min(count, (sizeof(buf)-1));
687         bool enable;
688
689         if (test_bit(HCI_UP, &hdev->flags))
690                 return -EBUSY;
691
692         if (copy_from_user(buf, user_buf, buf_size))
693                 return -EFAULT;
694
695         buf[buf_size] = '\0';
696         if (strtobool(buf, &enable))
697                 return -EINVAL;
698
699         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
700                 return -EALREADY;
701
702         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
703
704         return count;
705 }
706
707 static const struct file_operations force_static_address_fops = {
708         .open           = simple_open,
709         .read           = force_static_address_read,
710         .write          = force_static_address_write,
711         .llseek         = default_llseek,
712 };
713
714 static int white_list_show(struct seq_file *f, void *ptr)
715 {
716         struct hci_dev *hdev = f->private;
717         struct bdaddr_list *b;
718
719         hci_dev_lock(hdev);
720         list_for_each_entry(b, &hdev->le_white_list, list)
721                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
722         hci_dev_unlock(hdev);
723
724         return 0;
725 }
726
727 static int white_list_open(struct inode *inode, struct file *file)
728 {
729         return single_open(file, white_list_show, inode->i_private);
730 }
731
732 static const struct file_operations white_list_fops = {
733         .open           = white_list_open,
734         .read           = seq_read,
735         .llseek         = seq_lseek,
736         .release        = single_release,
737 };
738
739 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
740 {
741         struct hci_dev *hdev = f->private;
742         struct list_head *p, *n;
743
744         hci_dev_lock(hdev);
745         list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
746                 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
747                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
748                            &irk->bdaddr, irk->addr_type,
749                            16, irk->val, &irk->rpa);
750         }
751         hci_dev_unlock(hdev);
752
753         return 0;
754 }
755
756 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
757 {
758         return single_open(file, identity_resolving_keys_show,
759                            inode->i_private);
760 }
761
762 static const struct file_operations identity_resolving_keys_fops = {
763         .open           = identity_resolving_keys_open,
764         .read           = seq_read,
765         .llseek         = seq_lseek,
766         .release        = single_release,
767 };
768
769 static int long_term_keys_show(struct seq_file *f, void *ptr)
770 {
771         struct hci_dev *hdev = f->private;
772         struct list_head *p, *n;
773
774         hci_dev_lock(hdev);
775         list_for_each_safe(p, n, &hdev->long_term_keys) {
776                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
777                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
778                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
779                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
780                            __le64_to_cpu(ltk->rand), 16, ltk->val);
781         }
782         hci_dev_unlock(hdev);
783
784         return 0;
785 }
786
787 static int long_term_keys_open(struct inode *inode, struct file *file)
788 {
789         return single_open(file, long_term_keys_show, inode->i_private);
790 }
791
792 static const struct file_operations long_term_keys_fops = {
793         .open           = long_term_keys_open,
794         .read           = seq_read,
795         .llseek         = seq_lseek,
796         .release        = single_release,
797 };
798
799 static int conn_min_interval_set(void *data, u64 val)
800 {
801         struct hci_dev *hdev = data;
802
803         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
804                 return -EINVAL;
805
806         hci_dev_lock(hdev);
807         hdev->le_conn_min_interval = val;
808         hci_dev_unlock(hdev);
809
810         return 0;
811 }
812
813 static int conn_min_interval_get(void *data, u64 *val)
814 {
815         struct hci_dev *hdev = data;
816
817         hci_dev_lock(hdev);
818         *val = hdev->le_conn_min_interval;
819         hci_dev_unlock(hdev);
820
821         return 0;
822 }
823
824 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
825                         conn_min_interval_set, "%llu\n");
826
827 static int conn_max_interval_set(void *data, u64 val)
828 {
829         struct hci_dev *hdev = data;
830
831         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
832                 return -EINVAL;
833
834         hci_dev_lock(hdev);
835         hdev->le_conn_max_interval = val;
836         hci_dev_unlock(hdev);
837
838         return 0;
839 }
840
841 static int conn_max_interval_get(void *data, u64 *val)
842 {
843         struct hci_dev *hdev = data;
844
845         hci_dev_lock(hdev);
846         *val = hdev->le_conn_max_interval;
847         hci_dev_unlock(hdev);
848
849         return 0;
850 }
851
852 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
853                         conn_max_interval_set, "%llu\n");
854
855 static int conn_latency_set(void *data, u64 val)
856 {
857         struct hci_dev *hdev = data;
858
859         if (val > 0x01f3)
860                 return -EINVAL;
861
862         hci_dev_lock(hdev);
863         hdev->le_conn_latency = val;
864         hci_dev_unlock(hdev);
865
866         return 0;
867 }
868
869 static int conn_latency_get(void *data, u64 *val)
870 {
871         struct hci_dev *hdev = data;
872
873         hci_dev_lock(hdev);
874         *val = hdev->le_conn_latency;
875         hci_dev_unlock(hdev);
876
877         return 0;
878 }
879
880 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
881                         conn_latency_set, "%llu\n");
882
883 static int supervision_timeout_set(void *data, u64 val)
884 {
885         struct hci_dev *hdev = data;
886
887         if (val < 0x000a || val > 0x0c80)
888                 return -EINVAL;
889
890         hci_dev_lock(hdev);
891         hdev->le_supv_timeout = val;
892         hci_dev_unlock(hdev);
893
894         return 0;
895 }
896
897 static int supervision_timeout_get(void *data, u64 *val)
898 {
899         struct hci_dev *hdev = data;
900
901         hci_dev_lock(hdev);
902         *val = hdev->le_supv_timeout;
903         hci_dev_unlock(hdev);
904
905         return 0;
906 }
907
908 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
909                         supervision_timeout_set, "%llu\n");
910
911 static int adv_channel_map_set(void *data, u64 val)
912 {
913         struct hci_dev *hdev = data;
914
915         if (val < 0x01 || val > 0x07)
916                 return -EINVAL;
917
918         hci_dev_lock(hdev);
919         hdev->le_adv_channel_map = val;
920         hci_dev_unlock(hdev);
921
922         return 0;
923 }
924
925 static int adv_channel_map_get(void *data, u64 *val)
926 {
927         struct hci_dev *hdev = data;
928
929         hci_dev_lock(hdev);
930         *val = hdev->le_adv_channel_map;
931         hci_dev_unlock(hdev);
932
933         return 0;
934 }
935
936 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
937                         adv_channel_map_set, "%llu\n");
938
939 static int device_list_show(struct seq_file *f, void *ptr)
940 {
941         struct hci_dev *hdev = f->private;
942         struct hci_conn_params *p;
943
944         hci_dev_lock(hdev);
945         list_for_each_entry(p, &hdev->le_conn_params, list) {
946                 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
947                            p->auto_connect);
948         }
949         hci_dev_unlock(hdev);
950
951         return 0;
952 }
953
954 static int device_list_open(struct inode *inode, struct file *file)
955 {
956         return single_open(file, device_list_show, inode->i_private);
957 }
958
959 static const struct file_operations device_list_fops = {
960         .open           = device_list_open,
961         .read           = seq_read,
962         .llseek         = seq_lseek,
963         .release        = single_release,
964 };
965
966 /* ---- HCI requests ---- */
967
968 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
969 {
970         BT_DBG("%s result 0x%2.2x", hdev->name, result);
971
972         if (hdev->req_status == HCI_REQ_PEND) {
973                 hdev->req_result = result;
974                 hdev->req_status = HCI_REQ_DONE;
975                 wake_up_interruptible(&hdev->req_wait_q);
976         }
977 }
978
979 static void hci_req_cancel(struct hci_dev *hdev, int err)
980 {
981         BT_DBG("%s err 0x%2.2x", hdev->name, err);
982
983         if (hdev->req_status == HCI_REQ_PEND) {
984                 hdev->req_result = err;
985                 hdev->req_status = HCI_REQ_CANCELED;
986                 wake_up_interruptible(&hdev->req_wait_q);
987         }
988 }
989
990 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
991                                             u8 event)
992 {
993         struct hci_ev_cmd_complete *ev;
994         struct hci_event_hdr *hdr;
995         struct sk_buff *skb;
996
997         hci_dev_lock(hdev);
998
999         skb = hdev->recv_evt;
1000         hdev->recv_evt = NULL;
1001
1002         hci_dev_unlock(hdev);
1003
1004         if (!skb)
1005                 return ERR_PTR(-ENODATA);
1006
1007         if (skb->len < sizeof(*hdr)) {
1008                 BT_ERR("Too short HCI event");
1009                 goto failed;
1010         }
1011
1012         hdr = (void *) skb->data;
1013         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1014
1015         if (event) {
1016                 if (hdr->evt != event)
1017                         goto failed;
1018                 return skb;
1019         }
1020
1021         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1022                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1023                 goto failed;
1024         }
1025
1026         if (skb->len < sizeof(*ev)) {
1027                 BT_ERR("Too short cmd_complete event");
1028                 goto failed;
1029         }
1030
1031         ev = (void *) skb->data;
1032         skb_pull(skb, sizeof(*ev));
1033
1034         if (opcode == __le16_to_cpu(ev->opcode))
1035                 return skb;
1036
1037         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1038                __le16_to_cpu(ev->opcode));
1039
1040 failed:
1041         kfree_skb(skb);
1042         return ERR_PTR(-ENODATA);
1043 }
1044
1045 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1046                                   const void *param, u8 event, u32 timeout)
1047 {
1048         DECLARE_WAITQUEUE(wait, current);
1049         struct hci_request req;
1050         int err = 0;
1051
1052         BT_DBG("%s", hdev->name);
1053
1054         hci_req_init(&req, hdev);
1055
1056         hci_req_add_ev(&req, opcode, plen, param, event);
1057
1058         hdev->req_status = HCI_REQ_PEND;
1059
1060         err = hci_req_run(&req, hci_req_sync_complete);
1061         if (err < 0)
1062                 return ERR_PTR(err);
1063
1064         add_wait_queue(&hdev->req_wait_q, &wait);
1065         set_current_state(TASK_INTERRUPTIBLE);
1066
1067         schedule_timeout(timeout);
1068
1069         remove_wait_queue(&hdev->req_wait_q, &wait);
1070
1071         if (signal_pending(current))
1072                 return ERR_PTR(-EINTR);
1073
1074         switch (hdev->req_status) {
1075         case HCI_REQ_DONE:
1076                 err = -bt_to_errno(hdev->req_result);
1077                 break;
1078
1079         case HCI_REQ_CANCELED:
1080                 err = -hdev->req_result;
1081                 break;
1082
1083         default:
1084                 err = -ETIMEDOUT;
1085                 break;
1086         }
1087
1088         hdev->req_status = hdev->req_result = 0;
1089
1090         BT_DBG("%s end: err %d", hdev->name, err);
1091
1092         if (err < 0)
1093                 return ERR_PTR(err);
1094
1095         return hci_get_cmd_complete(hdev, opcode, event);
1096 }
1097 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1098
1099 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1100                                const void *param, u32 timeout)
1101 {
1102         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1103 }
1104 EXPORT_SYMBOL(__hci_cmd_sync);
1105
1106 /* Execute request and wait for completion. */
1107 static int __hci_req_sync(struct hci_dev *hdev,
1108                           void (*func)(struct hci_request *req,
1109                                       unsigned long opt),
1110                           unsigned long opt, __u32 timeout)
1111 {
1112         struct hci_request req;
1113         DECLARE_WAITQUEUE(wait, current);
1114         int err = 0;
1115
1116         BT_DBG("%s start", hdev->name);
1117
1118         hci_req_init(&req, hdev);
1119
1120         hdev->req_status = HCI_REQ_PEND;
1121
1122         func(&req, opt);
1123
1124         err = hci_req_run(&req, hci_req_sync_complete);
1125         if (err < 0) {
1126                 hdev->req_status = 0;
1127
1128                 /* ENODATA means the HCI request command queue is empty.
1129                  * This can happen when a request with conditionals doesn't
1130                  * trigger any commands to be sent. This is normal behavior
1131                  * and should not trigger an error return.
1132                  */
1133                 if (err == -ENODATA)
1134                         return 0;
1135
1136                 return err;
1137         }
1138
1139         add_wait_queue(&hdev->req_wait_q, &wait);
1140         set_current_state(TASK_INTERRUPTIBLE);
1141
1142         schedule_timeout(timeout);
1143
1144         remove_wait_queue(&hdev->req_wait_q, &wait);
1145
1146         if (signal_pending(current))
1147                 return -EINTR;
1148
1149         switch (hdev->req_status) {
1150         case HCI_REQ_DONE:
1151                 err = -bt_to_errno(hdev->req_result);
1152                 break;
1153
1154         case HCI_REQ_CANCELED:
1155                 err = -hdev->req_result;
1156                 break;
1157
1158         default:
1159                 err = -ETIMEDOUT;
1160                 break;
1161         }
1162
1163         hdev->req_status = hdev->req_result = 0;
1164
1165         BT_DBG("%s end: err %d", hdev->name, err);
1166
1167         return err;
1168 }
1169
1170 static int hci_req_sync(struct hci_dev *hdev,
1171                         void (*req)(struct hci_request *req,
1172                                     unsigned long opt),
1173                         unsigned long opt, __u32 timeout)
1174 {
1175         int ret;
1176
1177         if (!test_bit(HCI_UP, &hdev->flags))
1178                 return -ENETDOWN;
1179
1180         /* Serialize all requests */
1181         hci_req_lock(hdev);
1182         ret = __hci_req_sync(hdev, req, opt, timeout);
1183         hci_req_unlock(hdev);
1184
1185         return ret;
1186 }
1187
1188 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1189 {
1190         BT_DBG("%s %ld", req->hdev->name, opt);
1191
1192         /* Reset device */
1193         set_bit(HCI_RESET, &req->hdev->flags);
1194         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1195 }
1196
1197 static void bredr_init(struct hci_request *req)
1198 {
1199         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1200
1201         /* Read Local Supported Features */
1202         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1203
1204         /* Read Local Version */
1205         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1206
1207         /* Read BD Address */
1208         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1209 }
1210
1211 static void amp_init(struct hci_request *req)
1212 {
1213         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1214
1215         /* Read Local Version */
1216         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1217
1218         /* Read Local Supported Commands */
1219         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1220
1221         /* Read Local Supported Features */
1222         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1223
1224         /* Read Local AMP Info */
1225         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1226
1227         /* Read Data Blk size */
1228         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1229
1230         /* Read Flow Control Mode */
1231         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1232
1233         /* Read Location Data */
1234         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1235 }
1236
1237 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1238 {
1239         struct hci_dev *hdev = req->hdev;
1240
1241         BT_DBG("%s %ld", hdev->name, opt);
1242
1243         /* Reset */
1244         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1245                 hci_reset_req(req, 0);
1246
1247         switch (hdev->dev_type) {
1248         case HCI_BREDR:
1249                 bredr_init(req);
1250                 break;
1251
1252         case HCI_AMP:
1253                 amp_init(req);
1254                 break;
1255
1256         default:
1257                 BT_ERR("Unknown device type %d", hdev->dev_type);
1258                 break;
1259         }
1260 }
1261
1262 static void bredr_setup(struct hci_request *req)
1263 {
1264         struct hci_dev *hdev = req->hdev;
1265
1266         __le16 param;
1267         __u8 flt_type;
1268
1269         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1270         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1271
1272         /* Read Class of Device */
1273         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1274
1275         /* Read Local Name */
1276         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1277
1278         /* Read Voice Setting */
1279         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1280
1281         /* Read Number of Supported IAC */
1282         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1283
1284         /* Read Current IAC LAP */
1285         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1286
1287         /* Clear Event Filters */
1288         flt_type = HCI_FLT_CLEAR_ALL;
1289         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1290
1291         /* Connection accept timeout ~20 secs */
1292         param = cpu_to_le16(0x7d00);
1293         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1294
1295         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1296          * but it does not support page scan related HCI commands.
1297          */
1298         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1299                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1300                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1301         }
1302 }
1303
1304 static void le_setup(struct hci_request *req)
1305 {
1306         struct hci_dev *hdev = req->hdev;
1307
1308         /* Read LE Buffer Size */
1309         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1310
1311         /* Read LE Local Supported Features */
1312         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1313
1314         /* Read LE Supported States */
1315         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1316
1317         /* Read LE Advertising Channel TX Power */
1318         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1319
1320         /* Read LE White List Size */
1321         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1322
1323         /* Clear LE White List */
1324         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1325
1326         /* LE-only controllers have LE implicitly enabled */
1327         if (!lmp_bredr_capable(hdev))
1328                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1329 }
1330
1331 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1332 {
1333         if (lmp_ext_inq_capable(hdev))
1334                 return 0x02;
1335
1336         if (lmp_inq_rssi_capable(hdev))
1337                 return 0x01;
1338
1339         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1340             hdev->lmp_subver == 0x0757)
1341                 return 0x01;
1342
1343         if (hdev->manufacturer == 15) {
1344                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1345                         return 0x01;
1346                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1347                         return 0x01;
1348                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1349                         return 0x01;
1350         }
1351
1352         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1353             hdev->lmp_subver == 0x1805)
1354                 return 0x01;
1355
1356         return 0x00;
1357 }
1358
1359 static void hci_setup_inquiry_mode(struct hci_request *req)
1360 {
1361         u8 mode;
1362
1363         mode = hci_get_inquiry_mode(req->hdev);
1364
1365         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1366 }
1367
1368 static void hci_setup_event_mask(struct hci_request *req)
1369 {
1370         struct hci_dev *hdev = req->hdev;
1371
1372         /* The second byte is 0xff instead of 0x9f (two reserved bits
1373          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1374          * command otherwise.
1375          */
1376         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1377
1378         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1379          * any event mask for pre 1.2 devices.
1380          */
1381         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1382                 return;
1383
1384         if (lmp_bredr_capable(hdev)) {
1385                 events[4] |= 0x01; /* Flow Specification Complete */
1386                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1387                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1388                 events[5] |= 0x08; /* Synchronous Connection Complete */
1389                 events[5] |= 0x10; /* Synchronous Connection Changed */
1390         } else {
1391                 /* Use a different default for LE-only devices */
1392                 memset(events, 0, sizeof(events));
1393                 events[0] |= 0x10; /* Disconnection Complete */
1394                 events[0] |= 0x80; /* Encryption Change */
1395                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1396                 events[1] |= 0x20; /* Command Complete */
1397                 events[1] |= 0x40; /* Command Status */
1398                 events[1] |= 0x80; /* Hardware Error */
1399                 events[2] |= 0x04; /* Number of Completed Packets */
1400                 events[3] |= 0x02; /* Data Buffer Overflow */
1401                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1402         }
1403
1404         if (lmp_inq_rssi_capable(hdev))
1405                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1406
1407         if (lmp_sniffsubr_capable(hdev))
1408                 events[5] |= 0x20; /* Sniff Subrating */
1409
1410         if (lmp_pause_enc_capable(hdev))
1411                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1412
1413         if (lmp_ext_inq_capable(hdev))
1414                 events[5] |= 0x40; /* Extended Inquiry Result */
1415
1416         if (lmp_no_flush_capable(hdev))
1417                 events[7] |= 0x01; /* Enhanced Flush Complete */
1418
1419         if (lmp_lsto_capable(hdev))
1420                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1421
1422         if (lmp_ssp_capable(hdev)) {
1423                 events[6] |= 0x01;      /* IO Capability Request */
1424                 events[6] |= 0x02;      /* IO Capability Response */
1425                 events[6] |= 0x04;      /* User Confirmation Request */
1426                 events[6] |= 0x08;      /* User Passkey Request */
1427                 events[6] |= 0x10;      /* Remote OOB Data Request */
1428                 events[6] |= 0x20;      /* Simple Pairing Complete */
1429                 events[7] |= 0x04;      /* User Passkey Notification */
1430                 events[7] |= 0x08;      /* Keypress Notification */
1431                 events[7] |= 0x10;      /* Remote Host Supported
1432                                          * Features Notification
1433                                          */
1434         }
1435
1436         if (lmp_le_capable(hdev))
1437                 events[7] |= 0x20;      /* LE Meta-Event */
1438
1439         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1440 }
1441
1442 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1443 {
1444         struct hci_dev *hdev = req->hdev;
1445
1446         if (lmp_bredr_capable(hdev))
1447                 bredr_setup(req);
1448         else
1449                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1450
1451         if (lmp_le_capable(hdev))
1452                 le_setup(req);
1453
1454         hci_setup_event_mask(req);
1455
1456         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1457          * local supported commands HCI command.
1458          */
1459         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1460                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1461
1462         if (lmp_ssp_capable(hdev)) {
1463                 /* When SSP is available, then the host features page
1464                  * should also be available as well. However some
1465                  * controllers list the max_page as 0 as long as SSP
1466                  * has not been enabled. To achieve proper debugging
1467                  * output, force the minimum max_page to 1 at least.
1468                  */
1469                 hdev->max_page = 0x01;
1470
1471                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1472                         u8 mode = 0x01;
1473                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1474                                     sizeof(mode), &mode);
1475                 } else {
1476                         struct hci_cp_write_eir cp;
1477
1478                         memset(hdev->eir, 0, sizeof(hdev->eir));
1479                         memset(&cp, 0, sizeof(cp));
1480
1481                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1482                 }
1483         }
1484
1485         if (lmp_inq_rssi_capable(hdev))
1486                 hci_setup_inquiry_mode(req);
1487
1488         if (lmp_inq_tx_pwr_capable(hdev))
1489                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1490
1491         if (lmp_ext_feat_capable(hdev)) {
1492                 struct hci_cp_read_local_ext_features cp;
1493
1494                 cp.page = 0x01;
1495                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1496                             sizeof(cp), &cp);
1497         }
1498
1499         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1500                 u8 enable = 1;
1501                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1502                             &enable);
1503         }
1504 }
1505
1506 static void hci_setup_link_policy(struct hci_request *req)
1507 {
1508         struct hci_dev *hdev = req->hdev;
1509         struct hci_cp_write_def_link_policy cp;
1510         u16 link_policy = 0;
1511
1512         if (lmp_rswitch_capable(hdev))
1513                 link_policy |= HCI_LP_RSWITCH;
1514         if (lmp_hold_capable(hdev))
1515                 link_policy |= HCI_LP_HOLD;
1516         if (lmp_sniff_capable(hdev))
1517                 link_policy |= HCI_LP_SNIFF;
1518         if (lmp_park_capable(hdev))
1519                 link_policy |= HCI_LP_PARK;
1520
1521         cp.policy = cpu_to_le16(link_policy);
1522         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1523 }
1524
1525 static void hci_set_le_support(struct hci_request *req)
1526 {
1527         struct hci_dev *hdev = req->hdev;
1528         struct hci_cp_write_le_host_supported cp;
1529
1530         /* LE-only devices do not support explicit enablement */
1531         if (!lmp_bredr_capable(hdev))
1532                 return;
1533
1534         memset(&cp, 0, sizeof(cp));
1535
1536         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1537                 cp.le = 0x01;
1538                 cp.simul = lmp_le_br_capable(hdev);
1539         }
1540
1541         if (cp.le != lmp_host_le_capable(hdev))
1542                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1543                             &cp);
1544 }
1545
1546 static void hci_set_event_mask_page_2(struct hci_request *req)
1547 {
1548         struct hci_dev *hdev = req->hdev;
1549         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1550
1551         /* If Connectionless Slave Broadcast master role is supported
1552          * enable all necessary events for it.
1553          */
1554         if (lmp_csb_master_capable(hdev)) {
1555                 events[1] |= 0x40;      /* Triggered Clock Capture */
1556                 events[1] |= 0x80;      /* Synchronization Train Complete */
1557                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1558                 events[2] |= 0x20;      /* CSB Channel Map Change */
1559         }
1560
1561         /* If Connectionless Slave Broadcast slave role is supported
1562          * enable all necessary events for it.
1563          */
1564         if (lmp_csb_slave_capable(hdev)) {
1565                 events[2] |= 0x01;      /* Synchronization Train Received */
1566                 events[2] |= 0x02;      /* CSB Receive */
1567                 events[2] |= 0x04;      /* CSB Timeout */
1568                 events[2] |= 0x08;      /* Truncated Page Complete */
1569         }
1570
1571         /* Enable Authenticated Payload Timeout Expired event if supported */
1572         if (lmp_ping_capable(hdev))
1573                 events[2] |= 0x80;
1574
1575         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1576 }
1577
1578 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1579 {
1580         struct hci_dev *hdev = req->hdev;
1581         u8 p;
1582
1583         /* Some Broadcom based Bluetooth controllers do not support the
1584          * Delete Stored Link Key command. They are clearly indicating its
1585          * absence in the bit mask of supported commands.
1586          *
1587          * Check the supported commands and only if the the command is marked
1588          * as supported send it. If not supported assume that the controller
1589          * does not have actual support for stored link keys which makes this
1590          * command redundant anyway.
1591          *
1592          * Some controllers indicate that they support handling deleting
1593          * stored link keys, but they don't. The quirk lets a driver
1594          * just disable this command.
1595          */
1596         if (hdev->commands[6] & 0x80 &&
1597             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1598                 struct hci_cp_delete_stored_link_key cp;
1599
1600                 bacpy(&cp.bdaddr, BDADDR_ANY);
1601                 cp.delete_all = 0x01;
1602                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1603                             sizeof(cp), &cp);
1604         }
1605
1606         if (hdev->commands[5] & 0x10)
1607                 hci_setup_link_policy(req);
1608
1609         if (lmp_le_capable(hdev)) {
1610                 u8 events[8];
1611
1612                 memset(events, 0, sizeof(events));
1613                 events[0] = 0x1f;
1614
1615                 /* If controller supports the Connection Parameters Request
1616                  * Link Layer Procedure, enable the corresponding event.
1617                  */
1618                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1619                         events[0] |= 0x20;      /* LE Remote Connection
1620                                                  * Parameter Request
1621                                                  */
1622
1623                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1624                             events);
1625
1626                 hci_set_le_support(req);
1627         }
1628
1629         /* Read features beyond page 1 if available */
1630         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1631                 struct hci_cp_read_local_ext_features cp;
1632
1633                 cp.page = p;
1634                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1635                             sizeof(cp), &cp);
1636         }
1637 }
1638
1639 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1640 {
1641         struct hci_dev *hdev = req->hdev;
1642
1643         /* Set event mask page 2 if the HCI command for it is supported */
1644         if (hdev->commands[22] & 0x04)
1645                 hci_set_event_mask_page_2(req);
1646
1647         /* Check for Synchronization Train support */
1648         if (lmp_sync_train_capable(hdev))
1649                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1650
1651         /* Enable Secure Connections if supported and configured */
1652         if ((lmp_sc_capable(hdev) ||
1653              test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1654             test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1655                 u8 support = 0x01;
1656                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1657                             sizeof(support), &support);
1658         }
1659 }
1660
1661 static int __hci_init(struct hci_dev *hdev)
1662 {
1663         int err;
1664
1665         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1666         if (err < 0)
1667                 return err;
1668
1669         /* The Device Under Test (DUT) mode is special and available for
1670          * all controller types. So just create it early on.
1671          */
1672         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1673                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1674                                     &dut_mode_fops);
1675         }
1676
1677         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1678          * BR/EDR/LE type controllers. AMP controllers only need the
1679          * first stage init.
1680          */
1681         if (hdev->dev_type != HCI_BREDR)
1682                 return 0;
1683
1684         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1685         if (err < 0)
1686                 return err;
1687
1688         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1689         if (err < 0)
1690                 return err;
1691
1692         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1693         if (err < 0)
1694                 return err;
1695
1696         /* Only create debugfs entries during the initial setup
1697          * phase and not every time the controller gets powered on.
1698          */
1699         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1700                 return 0;
1701
1702         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1703                             &features_fops);
1704         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1705                            &hdev->manufacturer);
1706         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1707         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1708         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1709                             &blacklist_fops);
1710         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1711
1712         debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1713                             &conn_info_min_age_fops);
1714         debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1715                             &conn_info_max_age_fops);
1716
1717         if (lmp_bredr_capable(hdev)) {
1718                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1719                                     hdev, &inquiry_cache_fops);
1720                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1721                                     hdev, &link_keys_fops);
1722                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1723                                     hdev, &dev_class_fops);
1724                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1725                                     hdev, &voice_setting_fops);
1726         }
1727
1728         if (lmp_ssp_capable(hdev)) {
1729                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1730                                     hdev, &auto_accept_delay_fops);
1731                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1732                                     hdev, &force_sc_support_fops);
1733                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1734                                     hdev, &sc_only_mode_fops);
1735         }
1736
1737         if (lmp_sniff_capable(hdev)) {
1738                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1739                                     hdev, &idle_timeout_fops);
1740                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1741                                     hdev, &sniff_min_interval_fops);
1742                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1743                                     hdev, &sniff_max_interval_fops);
1744         }
1745
1746         if (lmp_le_capable(hdev)) {
1747                 debugfs_create_file("identity", 0400, hdev->debugfs,
1748                                     hdev, &identity_fops);
1749                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1750                                     hdev, &rpa_timeout_fops);
1751                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1752                                     hdev, &random_address_fops);
1753                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1754                                     hdev, &static_address_fops);
1755
1756                 /* For controllers with a public address, provide a debug
1757                  * option to force the usage of the configured static
1758                  * address. By default the public address is used.
1759                  */
1760                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1761                         debugfs_create_file("force_static_address", 0644,
1762                                             hdev->debugfs, hdev,
1763                                             &force_static_address_fops);
1764
1765                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1766                                   &hdev->le_white_list_size);
1767                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1768                                     &white_list_fops);
1769                 debugfs_create_file("identity_resolving_keys", 0400,
1770                                     hdev->debugfs, hdev,
1771                                     &identity_resolving_keys_fops);
1772                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1773                                     hdev, &long_term_keys_fops);
1774                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1775                                     hdev, &conn_min_interval_fops);
1776                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1777                                     hdev, &conn_max_interval_fops);
1778                 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1779                                     hdev, &conn_latency_fops);
1780                 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1781                                     hdev, &supervision_timeout_fops);
1782                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1783                                     hdev, &adv_channel_map_fops);
1784                 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1785                                     &device_list_fops);
1786                 debugfs_create_u16("discov_interleaved_timeout", 0644,
1787                                    hdev->debugfs,
1788                                    &hdev->discov_interleaved_timeout);
1789         }
1790
1791         return 0;
1792 }
1793
1794 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1795 {
1796         struct hci_dev *hdev = req->hdev;
1797
1798         BT_DBG("%s %ld", hdev->name, opt);
1799
1800         /* Reset */
1801         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1802                 hci_reset_req(req, 0);
1803
1804         /* Read Local Version */
1805         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1806
1807         /* Read BD Address */
1808         if (hdev->set_bdaddr)
1809                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1810 }
1811
1812 static int __hci_unconf_init(struct hci_dev *hdev)
1813 {
1814         int err;
1815
1816         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1817         if (err < 0)
1818                 return err;
1819
1820         return 0;
1821 }
1822
1823 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1824 {
1825         __u8 scan = opt;
1826
1827         BT_DBG("%s %x", req->hdev->name, scan);
1828
1829         /* Inquiry and Page scans */
1830         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1831 }
1832
1833 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1834 {
1835         __u8 auth = opt;
1836
1837         BT_DBG("%s %x", req->hdev->name, auth);
1838
1839         /* Authentication */
1840         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1841 }
1842
1843 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1844 {
1845         __u8 encrypt = opt;
1846
1847         BT_DBG("%s %x", req->hdev->name, encrypt);
1848
1849         /* Encryption */
1850         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1851 }
1852
1853 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1854 {
1855         __le16 policy = cpu_to_le16(opt);
1856
1857         BT_DBG("%s %x", req->hdev->name, policy);
1858
1859         /* Default link policy */
1860         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1861 }
1862
1863 /* Get HCI device by index.
1864  * Device is held on return. */
1865 struct hci_dev *hci_dev_get(int index)
1866 {
1867         struct hci_dev *hdev = NULL, *d;
1868
1869         BT_DBG("%d", index);
1870
1871         if (index < 0)
1872                 return NULL;
1873
1874         read_lock(&hci_dev_list_lock);
1875         list_for_each_entry(d, &hci_dev_list, list) {
1876                 if (d->id == index) {
1877                         hdev = hci_dev_hold(d);
1878                         break;
1879                 }
1880         }
1881         read_unlock(&hci_dev_list_lock);
1882         return hdev;
1883 }
1884
1885 /* ---- Inquiry support ---- */
1886
1887 bool hci_discovery_active(struct hci_dev *hdev)
1888 {
1889         struct discovery_state *discov = &hdev->discovery;
1890
1891         switch (discov->state) {
1892         case DISCOVERY_FINDING:
1893         case DISCOVERY_RESOLVING:
1894                 return true;
1895
1896         default:
1897                 return false;
1898         }
1899 }
1900
1901 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1902 {
1903         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1904
1905         if (hdev->discovery.state == state)
1906                 return;
1907
1908         switch (state) {
1909         case DISCOVERY_STOPPED:
1910                 hci_update_background_scan(hdev);
1911
1912                 if (hdev->discovery.state != DISCOVERY_STARTING)
1913                         mgmt_discovering(hdev, 0);
1914                 break;
1915         case DISCOVERY_STARTING:
1916                 break;
1917         case DISCOVERY_FINDING:
1918                 mgmt_discovering(hdev, 1);
1919                 break;
1920         case DISCOVERY_RESOLVING:
1921                 break;
1922         case DISCOVERY_STOPPING:
1923                 break;
1924         }
1925
1926         hdev->discovery.state = state;
1927 }
1928
1929 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1930 {
1931         struct discovery_state *cache = &hdev->discovery;
1932         struct inquiry_entry *p, *n;
1933
1934         list_for_each_entry_safe(p, n, &cache->all, all) {
1935                 list_del(&p->all);
1936                 kfree(p);
1937         }
1938
1939         INIT_LIST_HEAD(&cache->unknown);
1940         INIT_LIST_HEAD(&cache->resolve);
1941 }
1942
1943 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1944                                                bdaddr_t *bdaddr)
1945 {
1946         struct discovery_state *cache = &hdev->discovery;
1947         struct inquiry_entry *e;
1948
1949         BT_DBG("cache %p, %pMR", cache, bdaddr);
1950
1951         list_for_each_entry(e, &cache->all, all) {
1952                 if (!bacmp(&e->data.bdaddr, bdaddr))
1953                         return e;
1954         }
1955
1956         return NULL;
1957 }
1958
1959 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1960                                                        bdaddr_t *bdaddr)
1961 {
1962         struct discovery_state *cache = &hdev->discovery;
1963         struct inquiry_entry *e;
1964
1965         BT_DBG("cache %p, %pMR", cache, bdaddr);
1966
1967         list_for_each_entry(e, &cache->unknown, list) {
1968                 if (!bacmp(&e->data.bdaddr, bdaddr))
1969                         return e;
1970         }
1971
1972         return NULL;
1973 }
1974
1975 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1976                                                        bdaddr_t *bdaddr,
1977                                                        int state)
1978 {
1979         struct discovery_state *cache = &hdev->discovery;
1980         struct inquiry_entry *e;
1981
1982         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1983
1984         list_for_each_entry(e, &cache->resolve, list) {
1985                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1986                         return e;
1987                 if (!bacmp(&e->data.bdaddr, bdaddr))
1988                         return e;
1989         }
1990
1991         return NULL;
1992 }
1993
1994 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1995                                       struct inquiry_entry *ie)
1996 {
1997         struct discovery_state *cache = &hdev->discovery;
1998         struct list_head *pos = &cache->resolve;
1999         struct inquiry_entry *p;
2000
2001         list_del(&ie->list);
2002
2003         list_for_each_entry(p, &cache->resolve, list) {
2004                 if (p->name_state != NAME_PENDING &&
2005                     abs(p->data.rssi) >= abs(ie->data.rssi))
2006                         break;
2007                 pos = &p->list;
2008         }
2009
2010         list_add(&ie->list, pos);
2011 }
2012
2013 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2014                              bool name_known)
2015 {
2016         struct discovery_state *cache = &hdev->discovery;
2017         struct inquiry_entry *ie;
2018         u32 flags = 0;
2019
2020         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2021
2022         hci_remove_remote_oob_data(hdev, &data->bdaddr);
2023
2024         if (!data->ssp_mode)
2025                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2026
2027         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2028         if (ie) {
2029                 if (!ie->data.ssp_mode)
2030                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2031
2032                 if (ie->name_state == NAME_NEEDED &&
2033                     data->rssi != ie->data.rssi) {
2034                         ie->data.rssi = data->rssi;
2035                         hci_inquiry_cache_update_resolve(hdev, ie);
2036                 }
2037
2038                 goto update;
2039         }
2040
2041         /* Entry not in the cache. Add new one. */
2042         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2043         if (!ie) {
2044                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2045                 goto done;
2046         }
2047
2048         list_add(&ie->all, &cache->all);
2049
2050         if (name_known) {
2051                 ie->name_state = NAME_KNOWN;
2052         } else {
2053                 ie->name_state = NAME_NOT_KNOWN;
2054                 list_add(&ie->list, &cache->unknown);
2055         }
2056
2057 update:
2058         if (name_known && ie->name_state != NAME_KNOWN &&
2059             ie->name_state != NAME_PENDING) {
2060                 ie->name_state = NAME_KNOWN;
2061                 list_del(&ie->list);
2062         }
2063
2064         memcpy(&ie->data, data, sizeof(*data));
2065         ie->timestamp = jiffies;
2066         cache->timestamp = jiffies;
2067
2068         if (ie->name_state == NAME_NOT_KNOWN)
2069                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2070
2071 done:
2072         return flags;
2073 }
2074
2075 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2076 {
2077         struct discovery_state *cache = &hdev->discovery;
2078         struct inquiry_info *info = (struct inquiry_info *) buf;
2079         struct inquiry_entry *e;
2080         int copied = 0;
2081
2082         list_for_each_entry(e, &cache->all, all) {
2083                 struct inquiry_data *data = &e->data;
2084
2085                 if (copied >= num)
2086                         break;
2087
2088                 bacpy(&info->bdaddr, &data->bdaddr);
2089                 info->pscan_rep_mode    = data->pscan_rep_mode;
2090                 info->pscan_period_mode = data->pscan_period_mode;
2091                 info->pscan_mode        = data->pscan_mode;
2092                 memcpy(info->dev_class, data->dev_class, 3);
2093                 info->clock_offset      = data->clock_offset;
2094
2095                 info++;
2096                 copied++;
2097         }
2098
2099         BT_DBG("cache %p, copied %d", cache, copied);
2100         return copied;
2101 }
2102
2103 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2104 {
2105         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2106         struct hci_dev *hdev = req->hdev;
2107         struct hci_cp_inquiry cp;
2108
2109         BT_DBG("%s", hdev->name);
2110
2111         if (test_bit(HCI_INQUIRY, &hdev->flags))
2112                 return;
2113
2114         /* Start Inquiry */
2115         memcpy(&cp.lap, &ir->lap, 3);
2116         cp.length  = ir->length;
2117         cp.num_rsp = ir->num_rsp;
2118         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2119 }
2120
2121 static int wait_inquiry(void *word)
2122 {
2123         schedule();
2124         return signal_pending(current);
2125 }
2126
2127 int hci_inquiry(void __user *arg)
2128 {
2129         __u8 __user *ptr = arg;
2130         struct hci_inquiry_req ir;
2131         struct hci_dev *hdev;
2132         int err = 0, do_inquiry = 0, max_rsp;
2133         long timeo;
2134         __u8 *buf;
2135
2136         if (copy_from_user(&ir, ptr, sizeof(ir)))
2137                 return -EFAULT;
2138
2139         hdev = hci_dev_get(ir.dev_id);
2140         if (!hdev)
2141                 return -ENODEV;
2142
2143         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2144                 err = -EBUSY;
2145                 goto done;
2146         }
2147
2148         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2149                 err = -EOPNOTSUPP;
2150                 goto done;
2151         }
2152
2153         if (hdev->dev_type != HCI_BREDR) {
2154                 err = -EOPNOTSUPP;
2155                 goto done;
2156         }
2157
2158         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2159                 err = -EOPNOTSUPP;
2160                 goto done;
2161         }
2162
2163         hci_dev_lock(hdev);
2164         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2165             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2166                 hci_inquiry_cache_flush(hdev);
2167                 do_inquiry = 1;
2168         }
2169         hci_dev_unlock(hdev);
2170
2171         timeo = ir.length * msecs_to_jiffies(2000);
2172
2173         if (do_inquiry) {
2174                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2175                                    timeo);
2176                 if (err < 0)
2177                         goto done;
2178
2179                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2180                  * cleared). If it is interrupted by a signal, return -EINTR.
2181                  */
2182                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2183                                 TASK_INTERRUPTIBLE))
2184                         return -EINTR;
2185         }
2186
2187         /* for unlimited number of responses we will use buffer with
2188          * 255 entries
2189          */
2190         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2191
2192         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2193          * copy it to the user space.
2194          */
2195         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2196         if (!buf) {
2197                 err = -ENOMEM;
2198                 goto done;
2199         }
2200
2201         hci_dev_lock(hdev);
2202         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2203         hci_dev_unlock(hdev);
2204
2205         BT_DBG("num_rsp %d", ir.num_rsp);
2206
2207         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2208                 ptr += sizeof(ir);
2209                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2210                                  ir.num_rsp))
2211                         err = -EFAULT;
2212         } else
2213                 err = -EFAULT;
2214
2215         kfree(buf);
2216
2217 done:
2218         hci_dev_put(hdev);
2219         return err;
2220 }
2221
2222 static int hci_dev_do_open(struct hci_dev *hdev)
2223 {
2224         int ret = 0;
2225
2226         BT_DBG("%s %p", hdev->name, hdev);
2227
2228         hci_req_lock(hdev);
2229
2230         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2231                 ret = -ENODEV;
2232                 goto done;
2233         }
2234
2235         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2236             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2237                 /* Check for rfkill but allow the HCI setup stage to
2238                  * proceed (which in itself doesn't cause any RF activity).
2239                  */
2240                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2241                         ret = -ERFKILL;
2242                         goto done;
2243                 }
2244
2245                 /* Check for valid public address or a configured static
2246                  * random adddress, but let the HCI setup proceed to
2247                  * be able to determine if there is a public address
2248                  * or not.
2249                  *
2250                  * In case of user channel usage, it is not important
2251                  * if a public address or static random address is
2252                  * available.
2253                  *
2254                  * This check is only valid for BR/EDR controllers
2255                  * since AMP controllers do not have an address.
2256                  */
2257                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2258                     hdev->dev_type == HCI_BREDR &&
2259                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2260                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2261                         ret = -EADDRNOTAVAIL;
2262                         goto done;
2263                 }
2264         }
2265
2266         if (test_bit(HCI_UP, &hdev->flags)) {
2267                 ret = -EALREADY;
2268                 goto done;
2269         }
2270
2271         if (hdev->open(hdev)) {
2272                 ret = -EIO;
2273                 goto done;
2274         }
2275
2276         atomic_set(&hdev->cmd_cnt, 1);
2277         set_bit(HCI_INIT, &hdev->flags);
2278
2279         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2280                 if (hdev->setup)
2281                         ret = hdev->setup(hdev);
2282
2283                 /* The transport driver can set these quirks before
2284                  * creating the HCI device or in its setup callback.
2285                  *
2286                  * In case any of them is set, the controller has to
2287                  * start up as unconfigured.
2288                  */
2289                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2290                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2291                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2292
2293                 /* For an unconfigured controller it is required to
2294                  * read at least the version information provided by
2295                  * the Read Local Version Information command.
2296                  *
2297                  * If the set_bdaddr driver callback is provided, then
2298                  * also the original Bluetooth public device address
2299                  * will be read using the Read BD Address command.
2300                  */
2301                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2302                         ret = __hci_unconf_init(hdev);
2303         }
2304
2305         if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2306                 /* If public address change is configured, ensure that
2307                  * the address gets programmed. If the driver does not
2308                  * support changing the public address, fail the power
2309                  * on procedure.
2310                  */
2311                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2312                     hdev->set_bdaddr)
2313                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2314                 else
2315                         ret = -EADDRNOTAVAIL;
2316         }
2317
2318         if (!ret) {
2319                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2320                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2321                         ret = __hci_init(hdev);
2322         }
2323
2324         clear_bit(HCI_INIT, &hdev->flags);
2325
2326         if (!ret) {
2327                 hci_dev_hold(hdev);
2328                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2329                 set_bit(HCI_UP, &hdev->flags);
2330                 hci_notify(hdev, HCI_DEV_UP);
2331                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2332                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2333                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2334                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2335                     hdev->dev_type == HCI_BREDR) {
2336                         hci_dev_lock(hdev);
2337                         mgmt_powered(hdev, 1);
2338                         hci_dev_unlock(hdev);
2339                 }
2340         } else {
2341                 /* Init failed, cleanup */
2342                 flush_work(&hdev->tx_work);
2343                 flush_work(&hdev->cmd_work);
2344                 flush_work(&hdev->rx_work);
2345
2346                 skb_queue_purge(&hdev->cmd_q);
2347                 skb_queue_purge(&hdev->rx_q);
2348
2349                 if (hdev->flush)
2350                         hdev->flush(hdev);
2351
2352                 if (hdev->sent_cmd) {
2353                         kfree_skb(hdev->sent_cmd);
2354                         hdev->sent_cmd = NULL;
2355                 }
2356
2357                 hdev->close(hdev);
2358                 hdev->flags &= BIT(HCI_RAW);
2359         }
2360
2361 done:
2362         hci_req_unlock(hdev);
2363         return ret;
2364 }
2365
2366 /* ---- HCI ioctl helpers ---- */
2367
2368 int hci_dev_open(__u16 dev)
2369 {
2370         struct hci_dev *hdev;
2371         int err;
2372
2373         hdev = hci_dev_get(dev);
2374         if (!hdev)
2375                 return -ENODEV;
2376
2377         /* Devices that are marked as unconfigured can only be powered
2378          * up as user channel. Trying to bring them up as normal devices
2379          * will result into a failure. Only user channel operation is
2380          * possible.
2381          *
2382          * When this function is called for a user channel, the flag
2383          * HCI_USER_CHANNEL will be set first before attempting to
2384          * open the device.
2385          */
2386         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2387             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2388                 err = -EOPNOTSUPP;
2389                 goto done;
2390         }
2391
2392         /* We need to ensure that no other power on/off work is pending
2393          * before proceeding to call hci_dev_do_open. This is
2394          * particularly important if the setup procedure has not yet
2395          * completed.
2396          */
2397         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2398                 cancel_delayed_work(&hdev->power_off);
2399
2400         /* After this call it is guaranteed that the setup procedure
2401          * has finished. This means that error conditions like RFKILL
2402          * or no valid public or static random address apply.
2403          */
2404         flush_workqueue(hdev->req_workqueue);
2405
2406         err = hci_dev_do_open(hdev);
2407
2408 done:
2409         hci_dev_put(hdev);
2410         return err;
2411 }
2412
2413 /* This function requires the caller holds hdev->lock */
2414 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2415 {
2416         struct hci_conn_params *p;
2417
2418         list_for_each_entry(p, &hdev->le_conn_params, list)
2419                 list_del_init(&p->action);
2420
2421         BT_DBG("All LE pending actions cleared");
2422 }
2423
2424 static int hci_dev_do_close(struct hci_dev *hdev)
2425 {
2426         BT_DBG("%s %p", hdev->name, hdev);
2427
2428         cancel_delayed_work(&hdev->power_off);
2429
2430         hci_req_cancel(hdev, ENODEV);
2431         hci_req_lock(hdev);
2432
2433         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2434                 cancel_delayed_work_sync(&hdev->cmd_timer);
2435                 hci_req_unlock(hdev);
2436                 return 0;
2437         }
2438
2439         /* Flush RX and TX works */
2440         flush_work(&hdev->tx_work);
2441         flush_work(&hdev->rx_work);
2442
2443         if (hdev->discov_timeout > 0) {
2444                 cancel_delayed_work(&hdev->discov_off);
2445                 hdev->discov_timeout = 0;
2446                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2447                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2448         }
2449
2450         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2451                 cancel_delayed_work(&hdev->service_cache);
2452
2453         cancel_delayed_work_sync(&hdev->le_scan_disable);
2454
2455         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2456                 cancel_delayed_work_sync(&hdev->rpa_expired);
2457
2458         hci_dev_lock(hdev);
2459         hci_inquiry_cache_flush(hdev);
2460         hci_conn_hash_flush(hdev);
2461         hci_pend_le_actions_clear(hdev);
2462         hci_dev_unlock(hdev);
2463
2464         hci_notify(hdev, HCI_DEV_DOWN);
2465
2466         if (hdev->flush)
2467                 hdev->flush(hdev);
2468
2469         /* Reset device */
2470         skb_queue_purge(&hdev->cmd_q);
2471         atomic_set(&hdev->cmd_cnt, 1);
2472         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2473             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2474             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2475                 set_bit(HCI_INIT, &hdev->flags);
2476                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2477                 clear_bit(HCI_INIT, &hdev->flags);
2478         }
2479
2480         /* flush cmd  work */
2481         flush_work(&hdev->cmd_work);
2482
2483         /* Drop queues */
2484         skb_queue_purge(&hdev->rx_q);
2485         skb_queue_purge(&hdev->cmd_q);
2486         skb_queue_purge(&hdev->raw_q);
2487
2488         /* Drop last sent command */
2489         if (hdev->sent_cmd) {
2490                 cancel_delayed_work_sync(&hdev->cmd_timer);
2491                 kfree_skb(hdev->sent_cmd);
2492                 hdev->sent_cmd = NULL;
2493         }
2494
2495         kfree_skb(hdev->recv_evt);
2496         hdev->recv_evt = NULL;
2497
2498         /* After this point our queues are empty
2499          * and no tasks are scheduled. */
2500         hdev->close(hdev);
2501
2502         /* Clear flags */
2503         hdev->flags &= BIT(HCI_RAW);
2504         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2505
2506         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2507                 if (hdev->dev_type == HCI_BREDR) {
2508                         hci_dev_lock(hdev);
2509                         mgmt_powered(hdev, 0);
2510                         hci_dev_unlock(hdev);
2511                 }
2512         }
2513
2514         /* Controller radio is available but is currently powered down */
2515         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2516
2517         memset(hdev->eir, 0, sizeof(hdev->eir));
2518         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2519         bacpy(&hdev->random_addr, BDADDR_ANY);
2520
2521         hci_req_unlock(hdev);
2522
2523         hci_dev_put(hdev);
2524         return 0;
2525 }
2526
2527 int hci_dev_close(__u16 dev)
2528 {
2529         struct hci_dev *hdev;
2530         int err;
2531
2532         hdev = hci_dev_get(dev);
2533         if (!hdev)
2534                 return -ENODEV;
2535
2536         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2537                 err = -EBUSY;
2538                 goto done;
2539         }
2540
2541         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2542                 cancel_delayed_work(&hdev->power_off);
2543
2544         err = hci_dev_do_close(hdev);
2545
2546 done:
2547         hci_dev_put(hdev);
2548         return err;
2549 }
2550
2551 int hci_dev_reset(__u16 dev)
2552 {
2553         struct hci_dev *hdev;
2554         int ret = 0;
2555
2556         hdev = hci_dev_get(dev);
2557         if (!hdev)
2558                 return -ENODEV;
2559
2560         hci_req_lock(hdev);
2561
2562         if (!test_bit(HCI_UP, &hdev->flags)) {
2563                 ret = -ENETDOWN;
2564                 goto done;
2565         }
2566
2567         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2568                 ret = -EBUSY;
2569                 goto done;
2570         }
2571
2572         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2573                 ret = -EOPNOTSUPP;
2574                 goto done;
2575         }
2576
2577         /* Drop queues */
2578         skb_queue_purge(&hdev->rx_q);
2579         skb_queue_purge(&hdev->cmd_q);
2580
2581         hci_dev_lock(hdev);
2582         hci_inquiry_cache_flush(hdev);
2583         hci_conn_hash_flush(hdev);
2584         hci_dev_unlock(hdev);
2585
2586         if (hdev->flush)
2587                 hdev->flush(hdev);
2588
2589         atomic_set(&hdev->cmd_cnt, 1);
2590         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2591
2592         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2593
2594 done:
2595         hci_req_unlock(hdev);
2596         hci_dev_put(hdev);
2597         return ret;
2598 }
2599
2600 int hci_dev_reset_stat(__u16 dev)
2601 {
2602         struct hci_dev *hdev;
2603         int ret = 0;
2604
2605         hdev = hci_dev_get(dev);
2606         if (!hdev)
2607                 return -ENODEV;
2608
2609         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2610                 ret = -EBUSY;
2611                 goto done;
2612         }
2613
2614         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2615                 ret = -EOPNOTSUPP;
2616                 goto done;
2617         }
2618
2619         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2620
2621 done:
2622         hci_dev_put(hdev);
2623         return ret;
2624 }
2625
2626 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2627 {
2628         struct hci_dev *hdev;
2629         struct hci_dev_req dr;
2630         int err = 0;
2631
2632         if (copy_from_user(&dr, arg, sizeof(dr)))
2633                 return -EFAULT;
2634
2635         hdev = hci_dev_get(dr.dev_id);
2636         if (!hdev)
2637                 return -ENODEV;
2638
2639         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2640                 err = -EBUSY;
2641                 goto done;
2642         }
2643
2644         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2645                 err = -EOPNOTSUPP;
2646                 goto done;
2647         }
2648
2649         if (hdev->dev_type != HCI_BREDR) {
2650                 err = -EOPNOTSUPP;
2651                 goto done;
2652         }
2653
2654         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2655                 err = -EOPNOTSUPP;
2656                 goto done;
2657         }
2658
2659         switch (cmd) {
2660         case HCISETAUTH:
2661                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2662                                    HCI_INIT_TIMEOUT);
2663                 break;
2664
2665         case HCISETENCRYPT:
2666                 if (!lmp_encrypt_capable(hdev)) {
2667                         err = -EOPNOTSUPP;
2668                         break;
2669                 }
2670
2671                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2672                         /* Auth must be enabled first */
2673                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2674                                            HCI_INIT_TIMEOUT);
2675                         if (err)
2676                                 break;
2677                 }
2678
2679                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2680                                    HCI_INIT_TIMEOUT);
2681                 break;
2682
2683         case HCISETSCAN:
2684                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2685                                    HCI_INIT_TIMEOUT);
2686                 break;
2687
2688         case HCISETLINKPOL:
2689                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2690                                    HCI_INIT_TIMEOUT);
2691                 break;
2692
2693         case HCISETLINKMODE:
2694                 hdev->link_mode = ((__u16) dr.dev_opt) &
2695                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2696                 break;
2697
2698         case HCISETPTYPE:
2699                 hdev->pkt_type = (__u16) dr.dev_opt;
2700                 break;
2701
2702         case HCISETACLMTU:
2703                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2704                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2705                 break;
2706
2707         case HCISETSCOMTU:
2708                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2709                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2710                 break;
2711
2712         default:
2713                 err = -EINVAL;
2714                 break;
2715         }
2716
2717 done:
2718         hci_dev_put(hdev);
2719         return err;
2720 }
2721
2722 int hci_get_dev_list(void __user *arg)
2723 {
2724         struct hci_dev *hdev;
2725         struct hci_dev_list_req *dl;
2726         struct hci_dev_req *dr;
2727         int n = 0, size, err;
2728         __u16 dev_num;
2729
2730         if (get_user(dev_num, (__u16 __user *) arg))
2731                 return -EFAULT;
2732
2733         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2734                 return -EINVAL;
2735
2736         size = sizeof(*dl) + dev_num * sizeof(*dr);
2737
2738         dl = kzalloc(size, GFP_KERNEL);
2739         if (!dl)
2740                 return -ENOMEM;
2741
2742         dr = dl->dev_req;
2743
2744         read_lock(&hci_dev_list_lock);
2745         list_for_each_entry(hdev, &hci_dev_list, list) {
2746                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2747                         cancel_delayed_work(&hdev->power_off);
2748
2749                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2750                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2751
2752                 (dr + n)->dev_id  = hdev->id;
2753                 (dr + n)->dev_opt = hdev->flags;
2754
2755                 if (++n >= dev_num)
2756                         break;
2757         }
2758         read_unlock(&hci_dev_list_lock);
2759
2760         dl->dev_num = n;
2761         size = sizeof(*dl) + n * sizeof(*dr);
2762
2763         err = copy_to_user(arg, dl, size);
2764         kfree(dl);
2765
2766         return err ? -EFAULT : 0;
2767 }
2768
2769 int hci_get_dev_info(void __user *arg)
2770 {
2771         struct hci_dev *hdev;
2772         struct hci_dev_info di;
2773         int err = 0;
2774
2775         if (copy_from_user(&di, arg, sizeof(di)))
2776                 return -EFAULT;
2777
2778         hdev = hci_dev_get(di.dev_id);
2779         if (!hdev)
2780                 return -ENODEV;
2781
2782         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2783                 cancel_delayed_work_sync(&hdev->power_off);
2784
2785         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2786                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2787
2788         strcpy(di.name, hdev->name);
2789         di.bdaddr   = hdev->bdaddr;
2790         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2791         di.flags    = hdev->flags;
2792         di.pkt_type = hdev->pkt_type;
2793         if (lmp_bredr_capable(hdev)) {
2794                 di.acl_mtu  = hdev->acl_mtu;
2795                 di.acl_pkts = hdev->acl_pkts;
2796                 di.sco_mtu  = hdev->sco_mtu;
2797                 di.sco_pkts = hdev->sco_pkts;
2798         } else {
2799                 di.acl_mtu  = hdev->le_mtu;
2800                 di.acl_pkts = hdev->le_pkts;
2801                 di.sco_mtu  = 0;
2802                 di.sco_pkts = 0;
2803         }
2804         di.link_policy = hdev->link_policy;
2805         di.link_mode   = hdev->link_mode;
2806
2807         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2808         memcpy(&di.features, &hdev->features, sizeof(di.features));
2809
2810         if (copy_to_user(arg, &di, sizeof(di)))
2811                 err = -EFAULT;
2812
2813         hci_dev_put(hdev);
2814
2815         return err;
2816 }
2817
2818 /* ---- Interface to HCI drivers ---- */
2819
2820 static int hci_rfkill_set_block(void *data, bool blocked)
2821 {
2822         struct hci_dev *hdev = data;
2823
2824         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2825
2826         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2827                 return -EBUSY;
2828
2829         if (blocked) {
2830                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2831                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2832                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
2833                         hci_dev_do_close(hdev);
2834         } else {
2835                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2836         }
2837
2838         return 0;
2839 }
2840
2841 static const struct rfkill_ops hci_rfkill_ops = {
2842         .set_block = hci_rfkill_set_block,
2843 };
2844
2845 static void hci_power_on(struct work_struct *work)
2846 {
2847         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2848         int err;
2849
2850         BT_DBG("%s", hdev->name);
2851
2852         err = hci_dev_do_open(hdev);
2853         if (err < 0) {
2854                 mgmt_set_powered_failed(hdev, err);
2855                 return;
2856         }
2857
2858         /* During the HCI setup phase, a few error conditions are
2859          * ignored and they need to be checked now. If they are still
2860          * valid, it is important to turn the device back off.
2861          */
2862         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2863             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2864             (hdev->dev_type == HCI_BREDR &&
2865              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2866              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2867                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2868                 hci_dev_do_close(hdev);
2869         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2870                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2871                                    HCI_AUTO_OFF_TIMEOUT);
2872         }
2873
2874         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2875                 /* For unconfigured devices, set the HCI_RAW flag
2876                  * so that userspace can easily identify them.
2877                  */
2878                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2879                         set_bit(HCI_RAW, &hdev->flags);
2880
2881                 /* For fully configured devices, this will send
2882                  * the Index Added event. For unconfigured devices,
2883                  * it will send Unconfigued Index Added event.
2884                  *
2885                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2886                  * and no event will be send.
2887                  */
2888                 mgmt_index_added(hdev);
2889         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2890                 /* Powering on the controller with HCI_CONFIG set only
2891                  * happens with the transition from unconfigured to
2892                  * configured. This will send the Index Added event.
2893                  */
2894                 mgmt_index_added(hdev);
2895         }
2896 }
2897
2898 static void hci_power_off(struct work_struct *work)
2899 {
2900         struct hci_dev *hdev = container_of(work, struct hci_dev,
2901                                             power_off.work);
2902
2903         BT_DBG("%s", hdev->name);
2904
2905         hci_dev_do_close(hdev);
2906 }
2907
2908 static void hci_discov_off(struct work_struct *work)
2909 {
2910         struct hci_dev *hdev;
2911
2912         hdev = container_of(work, struct hci_dev, discov_off.work);
2913
2914         BT_DBG("%s", hdev->name);
2915
2916         mgmt_discoverable_timeout(hdev);
2917 }
2918
2919 void hci_uuids_clear(struct hci_dev *hdev)
2920 {
2921         struct bt_uuid *uuid, *tmp;
2922
2923         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2924                 list_del(&uuid->list);
2925                 kfree(uuid);
2926         }
2927 }
2928
2929 void hci_link_keys_clear(struct hci_dev *hdev)
2930 {
2931         struct list_head *p, *n;
2932
2933         list_for_each_safe(p, n, &hdev->link_keys) {
2934                 struct link_key *key;
2935
2936                 key = list_entry(p, struct link_key, list);
2937
2938                 list_del(p);
2939                 kfree(key);
2940         }
2941 }
2942
2943 void hci_smp_ltks_clear(struct hci_dev *hdev)
2944 {
2945         struct smp_ltk *k, *tmp;
2946
2947         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2948                 list_del(&k->list);
2949                 kfree(k);
2950         }
2951 }
2952
2953 void hci_smp_irks_clear(struct hci_dev *hdev)
2954 {
2955         struct smp_irk *k, *tmp;
2956
2957         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2958                 list_del(&k->list);
2959                 kfree(k);
2960         }
2961 }
2962
2963 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2964 {
2965         struct link_key *k;
2966
2967         list_for_each_entry(k, &hdev->link_keys, list)
2968                 if (bacmp(bdaddr, &k->bdaddr) == 0)
2969                         return k;
2970
2971         return NULL;
2972 }
2973
2974 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2975                                u8 key_type, u8 old_key_type)
2976 {
2977         /* Legacy key */
2978         if (key_type < 0x03)
2979                 return true;
2980
2981         /* Debug keys are insecure so don't store them persistently */
2982         if (key_type == HCI_LK_DEBUG_COMBINATION)
2983                 return false;
2984
2985         /* Changed combination key and there's no previous one */
2986         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2987                 return false;
2988
2989         /* Security mode 3 case */
2990         if (!conn)
2991                 return true;
2992
2993         /* Neither local nor remote side had no-bonding as requirement */
2994         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2995                 return true;
2996
2997         /* Local side had dedicated bonding as requirement */
2998         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2999                 return true;
3000
3001         /* Remote side had dedicated bonding as requirement */
3002         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3003                 return true;
3004
3005         /* If none of the above criteria match, then don't store the key
3006          * persistently */
3007         return false;
3008 }
3009
3010 static bool ltk_type_master(u8 type)
3011 {
3012         return (type == SMP_LTK);
3013 }
3014
3015 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3016                              bool master)
3017 {
3018         struct smp_ltk *k;
3019
3020         list_for_each_entry(k, &hdev->long_term_keys, list) {
3021                 if (k->ediv != ediv || k->rand != rand)
3022                         continue;
3023
3024                 if (ltk_type_master(k->type) != master)
3025                         continue;
3026
3027                 return k;
3028         }
3029
3030         return NULL;
3031 }
3032
3033 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3034                                      u8 addr_type, bool master)
3035 {
3036         struct smp_ltk *k;
3037
3038         list_for_each_entry(k, &hdev->long_term_keys, list)
3039                 if (addr_type == k->bdaddr_type &&
3040                     bacmp(bdaddr, &k->bdaddr) == 0 &&
3041                     ltk_type_master(k->type) == master)
3042                         return k;
3043
3044         return NULL;
3045 }
3046
3047 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3048 {
3049         struct smp_irk *irk;
3050
3051         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3052                 if (!bacmp(&irk->rpa, rpa))
3053                         return irk;
3054         }
3055
3056         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3057                 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3058                         bacpy(&irk->rpa, rpa);
3059                         return irk;
3060                 }
3061         }
3062
3063         return NULL;
3064 }
3065
3066 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3067                                      u8 addr_type)
3068 {
3069         struct smp_irk *irk;
3070
3071         /* Identity Address must be public or static random */
3072         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3073                 return NULL;
3074
3075         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3076                 if (addr_type == irk->addr_type &&
3077                     bacmp(bdaddr, &irk->bdaddr) == 0)
3078                         return irk;
3079         }
3080
3081         return NULL;
3082 }
3083
3084 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3085                                   bdaddr_t *bdaddr, u8 *val, u8 type,
3086                                   u8 pin_len, bool *persistent)
3087 {
3088         struct link_key *key, *old_key;
3089         u8 old_key_type;
3090
3091         old_key = hci_find_link_key(hdev, bdaddr);
3092         if (old_key) {
3093                 old_key_type = old_key->type;
3094                 key = old_key;
3095         } else {
3096                 old_key_type = conn ? conn->key_type : 0xff;
3097                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3098                 if (!key)
3099                         return NULL;
3100                 list_add(&key->list, &hdev->link_keys);
3101         }
3102
3103         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3104
3105         /* Some buggy controller combinations generate a changed
3106          * combination key for legacy pairing even when there's no
3107          * previous key */
3108         if (type == HCI_LK_CHANGED_COMBINATION &&
3109             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3110                 type = HCI_LK_COMBINATION;
3111                 if (conn)
3112                         conn->key_type = type;
3113         }
3114
3115         bacpy(&key->bdaddr, bdaddr);
3116         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3117         key->pin_len = pin_len;
3118
3119         if (type == HCI_LK_CHANGED_COMBINATION)
3120                 key->type = old_key_type;
3121         else
3122                 key->type = type;
3123
3124         if (persistent)
3125                 *persistent = hci_persistent_key(hdev, conn, type,
3126                                                  old_key_type);
3127
3128         return key;
3129 }
3130
3131 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3132                             u8 addr_type, u8 type, u8 authenticated,
3133                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3134 {
3135         struct smp_ltk *key, *old_key;
3136         bool master = ltk_type_master(type);
3137
3138         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3139         if (old_key)
3140                 key = old_key;
3141         else {
3142                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3143                 if (!key)
3144                         return NULL;
3145                 list_add(&key->list, &hdev->long_term_keys);
3146         }
3147
3148         bacpy(&key->bdaddr, bdaddr);
3149         key->bdaddr_type = addr_type;
3150         memcpy(key->val, tk, sizeof(key->val));
3151         key->authenticated = authenticated;
3152         key->ediv = ediv;
3153         key->rand = rand;
3154         key->enc_size = enc_size;
3155         key->type = type;
3156
3157         return key;
3158 }
3159
3160 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3161                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3162 {
3163         struct smp_irk *irk;
3164
3165         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3166         if (!irk) {
3167                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3168                 if (!irk)
3169                         return NULL;
3170
3171                 bacpy(&irk->bdaddr, bdaddr);
3172                 irk->addr_type = addr_type;
3173
3174                 list_add(&irk->list, &hdev->identity_resolving_keys);
3175         }
3176
3177         memcpy(irk->val, val, 16);
3178         bacpy(&irk->rpa, rpa);
3179
3180         return irk;
3181 }
3182
3183 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3184 {
3185         struct link_key *key;
3186
3187         key = hci_find_link_key(hdev, bdaddr);
3188         if (!key)
3189                 return -ENOENT;
3190
3191         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3192
3193         list_del(&key->list);
3194         kfree(key);
3195
3196         return 0;
3197 }
3198
3199 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3200 {
3201         struct smp_ltk *k, *tmp;
3202         int removed = 0;
3203
3204         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3205                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3206                         continue;
3207
3208                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3209
3210                 list_del(&k->list);
3211                 kfree(k);
3212                 removed++;
3213         }
3214
3215         return removed ? 0 : -ENOENT;
3216 }
3217
3218 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3219 {
3220         struct smp_irk *k, *tmp;
3221
3222         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3223                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3224                         continue;
3225
3226                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3227
3228                 list_del(&k->list);
3229                 kfree(k);
3230         }
3231 }
3232
3233 /* HCI command timer function */
3234 static void hci_cmd_timeout(struct work_struct *work)
3235 {
3236         struct hci_dev *hdev = container_of(work, struct hci_dev,
3237                                             cmd_timer.work);
3238
3239         if (hdev->sent_cmd) {
3240                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3241                 u16 opcode = __le16_to_cpu(sent->opcode);
3242
3243                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3244         } else {
3245                 BT_ERR("%s command tx timeout", hdev->name);
3246         }
3247
3248         atomic_set(&hdev->cmd_cnt, 1);
3249         queue_work(hdev->workqueue, &hdev->cmd_work);
3250 }
3251
3252 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3253                                           bdaddr_t *bdaddr)
3254 {
3255         struct oob_data *data;
3256
3257         list_for_each_entry(data, &hdev->remote_oob_data, list)
3258                 if (bacmp(bdaddr, &data->bdaddr) == 0)
3259                         return data;
3260
3261         return NULL;
3262 }
3263
3264 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3265 {
3266         struct oob_data *data;
3267
3268         data = hci_find_remote_oob_data(hdev, bdaddr);
3269         if (!data)
3270                 return -ENOENT;
3271
3272         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3273
3274         list_del(&data->list);
3275         kfree(data);
3276
3277         return 0;
3278 }
3279
3280 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3281 {
3282         struct oob_data *data, *n;
3283
3284         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3285                 list_del(&data->list);
3286                 kfree(data);
3287         }
3288 }
3289
3290 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3291                             u8 *hash, u8 *randomizer)
3292 {
3293         struct oob_data *data;
3294
3295         data = hci_find_remote_oob_data(hdev, bdaddr);
3296         if (!data) {
3297                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3298                 if (!data)
3299                         return -ENOMEM;
3300
3301                 bacpy(&data->bdaddr, bdaddr);
3302                 list_add(&data->list, &hdev->remote_oob_data);
3303         }
3304
3305         memcpy(data->hash192, hash, sizeof(data->hash192));
3306         memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3307
3308         memset(data->hash256, 0, sizeof(data->hash256));
3309         memset(data->randomizer256, 0, sizeof(data->randomizer256));
3310
3311         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3312
3313         return 0;
3314 }
3315
3316 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3317                                 u8 *hash192, u8 *randomizer192,
3318                                 u8 *hash256, u8 *randomizer256)
3319 {
3320         struct oob_data *data;
3321
3322         data = hci_find_remote_oob_data(hdev, bdaddr);
3323         if (!data) {
3324                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3325                 if (!data)
3326                         return -ENOMEM;
3327
3328                 bacpy(&data->bdaddr, bdaddr);
3329                 list_add(&data->list, &hdev->remote_oob_data);
3330         }
3331
3332         memcpy(data->hash192, hash192, sizeof(data->hash192));
3333         memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3334
3335         memcpy(data->hash256, hash256, sizeof(data->hash256));
3336         memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3337
3338         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3339
3340         return 0;
3341 }
3342
3343 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3344                                          bdaddr_t *bdaddr, u8 type)
3345 {
3346         struct bdaddr_list *b;
3347
3348         list_for_each_entry(b, &hdev->blacklist, list) {
3349                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3350                         return b;
3351         }
3352
3353         return NULL;
3354 }
3355
3356 static void hci_blacklist_clear(struct hci_dev *hdev)
3357 {
3358         struct list_head *p, *n;
3359
3360         list_for_each_safe(p, n, &hdev->blacklist) {
3361                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3362
3363                 list_del(p);
3364                 kfree(b);
3365         }
3366 }
3367
3368 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3369 {
3370         struct bdaddr_list *entry;
3371
3372         if (!bacmp(bdaddr, BDADDR_ANY))
3373                 return -EBADF;
3374
3375         if (hci_blacklist_lookup(hdev, bdaddr, type))
3376                 return -EEXIST;
3377
3378         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3379         if (!entry)
3380                 return -ENOMEM;
3381
3382         bacpy(&entry->bdaddr, bdaddr);
3383         entry->bdaddr_type = type;
3384
3385         list_add(&entry->list, &hdev->blacklist);
3386
3387         return 0;
3388 }
3389
3390 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3391 {
3392         struct bdaddr_list *entry;
3393
3394         if (!bacmp(bdaddr, BDADDR_ANY)) {
3395                 hci_blacklist_clear(hdev);
3396                 return 0;
3397         }
3398
3399         entry = hci_blacklist_lookup(hdev, bdaddr, type);
3400         if (!entry)
3401                 return -ENOENT;
3402
3403         list_del(&entry->list);
3404         kfree(entry);
3405
3406         return 0;
3407 }
3408
3409 struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3410                                           bdaddr_t *bdaddr, u8 type)
3411 {
3412         struct bdaddr_list *b;
3413
3414         list_for_each_entry(b, &hdev->le_white_list, list) {
3415                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3416                         return b;
3417         }
3418
3419         return NULL;
3420 }
3421
3422 void hci_white_list_clear(struct hci_dev *hdev)
3423 {
3424         struct list_head *p, *n;
3425
3426         list_for_each_safe(p, n, &hdev->le_white_list) {
3427                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3428
3429                 list_del(p);
3430                 kfree(b);
3431         }
3432 }
3433
3434 int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3435 {
3436         struct bdaddr_list *entry;
3437
3438         if (!bacmp(bdaddr, BDADDR_ANY))
3439                 return -EBADF;
3440
3441         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3442         if (!entry)
3443                 return -ENOMEM;
3444
3445         bacpy(&entry->bdaddr, bdaddr);
3446         entry->bdaddr_type = type;
3447
3448         list_add(&entry->list, &hdev->le_white_list);
3449
3450         return 0;
3451 }
3452
3453 int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3454 {
3455         struct bdaddr_list *entry;
3456
3457         if (!bacmp(bdaddr, BDADDR_ANY))
3458                 return -EBADF;
3459
3460         entry = hci_white_list_lookup(hdev, bdaddr, type);
3461         if (!entry)
3462                 return -ENOENT;
3463
3464         list_del(&entry->list);
3465         kfree(entry);
3466
3467         return 0;
3468 }
3469
3470 /* This function requires the caller holds hdev->lock */
3471 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3472                                                bdaddr_t *addr, u8 addr_type)
3473 {
3474         struct hci_conn_params *params;
3475
3476         /* The conn params list only contains identity addresses */
3477         if (!hci_is_identity_address(addr, addr_type))
3478                 return NULL;
3479
3480         list_for_each_entry(params, &hdev->le_conn_params, list) {
3481                 if (bacmp(&params->addr, addr) == 0 &&
3482                     params->addr_type == addr_type) {
3483                         return params;
3484                 }
3485         }
3486
3487         return NULL;
3488 }
3489
3490 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3491 {
3492         struct hci_conn *conn;
3493
3494         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3495         if (!conn)
3496                 return false;
3497
3498         if (conn->dst_type != type)
3499                 return false;
3500
3501         if (conn->state != BT_CONNECTED)
3502                 return false;
3503
3504         return true;
3505 }
3506
3507 /* This function requires the caller holds hdev->lock */
3508 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3509                                                   bdaddr_t *addr, u8 addr_type)
3510 {
3511         struct hci_conn_params *param;
3512
3513         /* The list only contains identity addresses */
3514         if (!hci_is_identity_address(addr, addr_type))
3515                 return NULL;
3516
3517         list_for_each_entry(param, list, action) {
3518                 if (bacmp(&param->addr, addr) == 0 &&
3519                     param->addr_type == addr_type)
3520                         return param;
3521         }
3522
3523         return NULL;
3524 }
3525
3526 /* This function requires the caller holds hdev->lock */
3527 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3528                                             bdaddr_t *addr, u8 addr_type)
3529 {
3530         struct hci_conn_params *params;
3531
3532         if (!hci_is_identity_address(addr, addr_type))
3533                 return NULL;
3534
3535         params = hci_conn_params_lookup(hdev, addr, addr_type);
3536         if (params)
3537                 return params;
3538
3539         params = kzalloc(sizeof(*params), GFP_KERNEL);
3540         if (!params) {
3541                 BT_ERR("Out of memory");
3542                 return NULL;
3543         }
3544
3545         bacpy(&params->addr, addr);
3546         params->addr_type = addr_type;
3547
3548         list_add(&params->list, &hdev->le_conn_params);
3549         INIT_LIST_HEAD(&params->action);
3550
3551         params->conn_min_interval = hdev->le_conn_min_interval;
3552         params->conn_max_interval = hdev->le_conn_max_interval;
3553         params->conn_latency = hdev->le_conn_latency;
3554         params->supervision_timeout = hdev->le_supv_timeout;
3555         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3556
3557         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3558
3559         return params;
3560 }
3561
3562 /* This function requires the caller holds hdev->lock */
3563 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3564                         u8 auto_connect)
3565 {
3566         struct hci_conn_params *params;
3567
3568         params = hci_conn_params_add(hdev, addr, addr_type);
3569         if (!params)
3570                 return -EIO;
3571
3572         if (params->auto_connect == auto_connect)
3573                 return 0;
3574
3575         list_del_init(&params->action);
3576
3577         switch (auto_connect) {
3578         case HCI_AUTO_CONN_DISABLED:
3579         case HCI_AUTO_CONN_LINK_LOSS:
3580                 hci_update_background_scan(hdev);
3581                 break;
3582         case HCI_AUTO_CONN_REPORT:
3583                 list_add(&params->action, &hdev->pend_le_reports);
3584                 hci_update_background_scan(hdev);
3585                 break;
3586         case HCI_AUTO_CONN_ALWAYS:
3587                 if (!is_connected(hdev, addr, addr_type)) {
3588                         list_add(&params->action, &hdev->pend_le_conns);
3589                         hci_update_background_scan(hdev);
3590                 }
3591                 break;
3592         }
3593
3594         params->auto_connect = auto_connect;
3595
3596         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3597                auto_connect);
3598
3599         return 0;
3600 }
3601
3602 /* This function requires the caller holds hdev->lock */
3603 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3604 {
3605         struct hci_conn_params *params;
3606
3607         params = hci_conn_params_lookup(hdev, addr, addr_type);
3608         if (!params)
3609                 return;
3610
3611         list_del(&params->action);
3612         list_del(&params->list);
3613         kfree(params);
3614
3615         hci_update_background_scan(hdev);
3616
3617         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3618 }
3619
3620 /* This function requires the caller holds hdev->lock */
3621 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3622 {
3623         struct hci_conn_params *params, *tmp;
3624
3625         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3626                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3627                         continue;
3628                 list_del(&params->list);
3629                 kfree(params);
3630         }
3631
3632         BT_DBG("All LE disabled connection parameters were removed");
3633 }
3634
3635 /* This function requires the caller holds hdev->lock */
3636 void hci_conn_params_clear_all(struct hci_dev *hdev)
3637 {
3638         struct hci_conn_params *params, *tmp;
3639
3640         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3641                 list_del(&params->action);
3642                 list_del(&params->list);
3643                 kfree(params);
3644         }
3645
3646         hci_update_background_scan(hdev);
3647
3648         BT_DBG("All LE connection parameters were removed");
3649 }
3650
3651 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3652 {
3653         if (status) {
3654                 BT_ERR("Failed to start inquiry: status %d", status);
3655
3656                 hci_dev_lock(hdev);
3657                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3658                 hci_dev_unlock(hdev);
3659                 return;
3660         }
3661 }
3662
3663 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3664 {
3665         /* General inquiry access code (GIAC) */
3666         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3667         struct hci_request req;
3668         struct hci_cp_inquiry cp;
3669         int err;
3670
3671         if (status) {
3672                 BT_ERR("Failed to disable LE scanning: status %d", status);
3673                 return;
3674         }
3675
3676         switch (hdev->discovery.type) {
3677         case DISCOV_TYPE_LE:
3678                 hci_dev_lock(hdev);
3679                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3680                 hci_dev_unlock(hdev);
3681                 break;
3682
3683         case DISCOV_TYPE_INTERLEAVED:
3684                 hci_req_init(&req, hdev);
3685
3686                 memset(&cp, 0, sizeof(cp));
3687                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3688                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3689                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3690
3691                 hci_dev_lock(hdev);
3692
3693                 hci_inquiry_cache_flush(hdev);
3694
3695                 err = hci_req_run(&req, inquiry_complete);
3696                 if (err) {
3697                         BT_ERR("Inquiry request failed: err %d", err);
3698                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3699                 }
3700
3701                 hci_dev_unlock(hdev);
3702                 break;
3703         }
3704 }
3705
3706 static void le_scan_disable_work(struct work_struct *work)
3707 {
3708         struct hci_dev *hdev = container_of(work, struct hci_dev,
3709                                             le_scan_disable.work);
3710         struct hci_request req;
3711         int err;
3712
3713         BT_DBG("%s", hdev->name);
3714
3715         hci_req_init(&req, hdev);
3716
3717         hci_req_add_le_scan_disable(&req);
3718
3719         err = hci_req_run(&req, le_scan_disable_work_complete);
3720         if (err)
3721                 BT_ERR("Disable LE scanning request failed: err %d", err);
3722 }
3723
3724 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3725 {
3726         struct hci_dev *hdev = req->hdev;
3727
3728         /* If we're advertising or initiating an LE connection we can't
3729          * go ahead and change the random address at this time. This is
3730          * because the eventual initiator address used for the
3731          * subsequently created connection will be undefined (some
3732          * controllers use the new address and others the one we had
3733          * when the operation started).
3734          *
3735          * In this kind of scenario skip the update and let the random
3736          * address be updated at the next cycle.
3737          */
3738         if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3739             hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3740                 BT_DBG("Deferring random address update");
3741                 return;
3742         }
3743
3744         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3745 }
3746
3747 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3748                               u8 *own_addr_type)
3749 {
3750         struct hci_dev *hdev = req->hdev;
3751         int err;
3752
3753         /* If privacy is enabled use a resolvable private address. If
3754          * current RPA has expired or there is something else than
3755          * the current RPA in use, then generate a new one.
3756          */
3757         if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3758                 int to;
3759
3760                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3761
3762                 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3763                     !bacmp(&hdev->random_addr, &hdev->rpa))
3764                         return 0;
3765
3766                 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3767                 if (err < 0) {
3768                         BT_ERR("%s failed to generate new RPA", hdev->name);
3769                         return err;
3770                 }
3771
3772                 set_random_addr(req, &hdev->rpa);
3773
3774                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3775                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3776
3777                 return 0;
3778         }
3779
3780         /* In case of required privacy without resolvable private address,
3781          * use an unresolvable private address. This is useful for active
3782          * scanning and non-connectable advertising.
3783          */
3784         if (require_privacy) {
3785                 bdaddr_t urpa;
3786
3787                 get_random_bytes(&urpa, 6);
3788                 urpa.b[5] &= 0x3f;      /* Clear two most significant bits */
3789
3790                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3791                 set_random_addr(req, &urpa);
3792                 return 0;
3793         }
3794
3795         /* If forcing static address is in use or there is no public
3796          * address use the static address as random address (but skip
3797          * the HCI command if the current random address is already the
3798          * static one.
3799          */
3800         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3801             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3802                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3803                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3804                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3805                                     &hdev->static_addr);
3806                 return 0;
3807         }
3808
3809         /* Neither privacy nor static address is being used so use a
3810          * public address.
3811          */
3812         *own_addr_type = ADDR_LE_DEV_PUBLIC;
3813
3814         return 0;
3815 }
3816
3817 /* Copy the Identity Address of the controller.
3818  *
3819  * If the controller has a public BD_ADDR, then by default use that one.
3820  * If this is a LE only controller without a public address, default to
3821  * the static random address.
3822  *
3823  * For debugging purposes it is possible to force controllers with a
3824  * public address to use the static random address instead.
3825  */
3826 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3827                                u8 *bdaddr_type)
3828 {
3829         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3830             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3831                 bacpy(bdaddr, &hdev->static_addr);
3832                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3833         } else {
3834                 bacpy(bdaddr, &hdev->bdaddr);
3835                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3836         }
3837 }
3838
3839 /* Alloc HCI device */
3840 struct hci_dev *hci_alloc_dev(void)
3841 {
3842         struct hci_dev *hdev;
3843
3844         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3845         if (!hdev)
3846                 return NULL;
3847
3848         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3849         hdev->esco_type = (ESCO_HV1);
3850         hdev->link_mode = (HCI_LM_ACCEPT);
3851         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3852         hdev->io_capability = 0x03;     /* No Input No Output */
3853         hdev->manufacturer = 0xffff;    /* Default to internal use */
3854         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3855         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3856
3857         hdev->sniff_max_interval = 800;
3858         hdev->sniff_min_interval = 80;
3859
3860         hdev->le_adv_channel_map = 0x07;
3861         hdev->le_scan_interval = 0x0060;
3862         hdev->le_scan_window = 0x0030;
3863         hdev->le_conn_min_interval = 0x0028;
3864         hdev->le_conn_max_interval = 0x0038;
3865         hdev->le_conn_latency = 0x0000;
3866         hdev->le_supv_timeout = 0x002a;
3867
3868         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3869         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3870         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3871         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3872
3873         mutex_init(&hdev->lock);
3874         mutex_init(&hdev->req_lock);
3875
3876         INIT_LIST_HEAD(&hdev->mgmt_pending);
3877         INIT_LIST_HEAD(&hdev->blacklist);
3878         INIT_LIST_HEAD(&hdev->uuids);
3879         INIT_LIST_HEAD(&hdev->link_keys);
3880         INIT_LIST_HEAD(&hdev->long_term_keys);
3881         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3882         INIT_LIST_HEAD(&hdev->remote_oob_data);
3883         INIT_LIST_HEAD(&hdev->le_white_list);
3884         INIT_LIST_HEAD(&hdev->le_conn_params);
3885         INIT_LIST_HEAD(&hdev->pend_le_conns);
3886         INIT_LIST_HEAD(&hdev->pend_le_reports);
3887         INIT_LIST_HEAD(&hdev->conn_hash.list);
3888
3889         INIT_WORK(&hdev->rx_work, hci_rx_work);
3890         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3891         INIT_WORK(&hdev->tx_work, hci_tx_work);
3892         INIT_WORK(&hdev->power_on, hci_power_on);
3893
3894         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3895         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3896         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3897
3898         skb_queue_head_init(&hdev->rx_q);
3899         skb_queue_head_init(&hdev->cmd_q);
3900         skb_queue_head_init(&hdev->raw_q);
3901
3902         init_waitqueue_head(&hdev->req_wait_q);
3903
3904         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3905
3906         hci_init_sysfs(hdev);
3907         discovery_init(hdev);
3908
3909         return hdev;
3910 }
3911 EXPORT_SYMBOL(hci_alloc_dev);
3912
3913 /* Free HCI device */
3914 void hci_free_dev(struct hci_dev *hdev)
3915 {
3916         /* will free via device release */
3917         put_device(&hdev->dev);
3918 }
3919 EXPORT_SYMBOL(hci_free_dev);
3920
3921 /* Register HCI device */
3922 int hci_register_dev(struct hci_dev *hdev)
3923 {
3924         int id, error;
3925
3926         if (!hdev->open || !hdev->close)
3927                 return -EINVAL;
3928
3929         /* Do not allow HCI_AMP devices to register at index 0,
3930          * so the index can be used as the AMP controller ID.
3931          */
3932         switch (hdev->dev_type) {
3933         case HCI_BREDR:
3934                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3935                 break;
3936         case HCI_AMP:
3937                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3938                 break;
3939         default:
3940                 return -EINVAL;
3941         }
3942
3943         if (id < 0)
3944                 return id;
3945
3946         sprintf(hdev->name, "hci%d", id);
3947         hdev->id = id;
3948
3949         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3950
3951         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3952                                           WQ_MEM_RECLAIM, 1, hdev->name);
3953         if (!hdev->workqueue) {
3954                 error = -ENOMEM;
3955                 goto err;
3956         }
3957
3958         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3959                                               WQ_MEM_RECLAIM, 1, hdev->name);
3960         if (!hdev->req_workqueue) {
3961                 destroy_workqueue(hdev->workqueue);
3962                 error = -ENOMEM;
3963                 goto err;
3964         }
3965
3966         if (!IS_ERR_OR_NULL(bt_debugfs))
3967                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3968
3969         dev_set_name(&hdev->dev, "%s", hdev->name);
3970
3971         hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3972                                                CRYPTO_ALG_ASYNC);
3973         if (IS_ERR(hdev->tfm_aes)) {
3974                 BT_ERR("Unable to create crypto context");
3975                 error = PTR_ERR(hdev->tfm_aes);
3976                 hdev->tfm_aes = NULL;
3977                 goto err_wqueue;
3978         }
3979
3980         error = device_add(&hdev->dev);
3981         if (error < 0)
3982                 goto err_tfm;
3983
3984         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3985                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3986                                     hdev);
3987         if (hdev->rfkill) {
3988                 if (rfkill_register(hdev->rfkill) < 0) {
3989                         rfkill_destroy(hdev->rfkill);
3990                         hdev->rfkill = NULL;
3991                 }
3992         }
3993
3994         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3995                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3996
3997         set_bit(HCI_SETUP, &hdev->dev_flags);
3998         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3999
4000         if (hdev->dev_type == HCI_BREDR) {
4001                 /* Assume BR/EDR support until proven otherwise (such as
4002                  * through reading supported features during init.
4003                  */
4004                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4005         }
4006
4007         write_lock(&hci_dev_list_lock);
4008         list_add(&hdev->list, &hci_dev_list);
4009         write_unlock(&hci_dev_list_lock);
4010
4011         /* Devices that are marked for raw-only usage are unconfigured
4012          * and should not be included in normal operation.
4013          */
4014         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4015                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4016
4017         hci_notify(hdev, HCI_DEV_REG);
4018         hci_dev_hold(hdev);
4019
4020         queue_work(hdev->req_workqueue, &hdev->power_on);
4021
4022         return id;
4023
4024 err_tfm:
4025         crypto_free_blkcipher(hdev->tfm_aes);
4026 err_wqueue:
4027         destroy_workqueue(hdev->workqueue);
4028         destroy_workqueue(hdev->req_workqueue);
4029 err:
4030         ida_simple_remove(&hci_index_ida, hdev->id);
4031
4032         return error;
4033 }
4034 EXPORT_SYMBOL(hci_register_dev);
4035
4036 /* Unregister HCI device */
4037 void hci_unregister_dev(struct hci_dev *hdev)
4038 {
4039         int i, id;
4040
4041         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4042
4043         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4044
4045         id = hdev->id;
4046
4047         write_lock(&hci_dev_list_lock);
4048         list_del(&hdev->list);
4049         write_unlock(&hci_dev_list_lock);
4050
4051         hci_dev_do_close(hdev);
4052
4053         for (i = 0; i < NUM_REASSEMBLY; i++)
4054                 kfree_skb(hdev->reassembly[i]);
4055
4056         cancel_work_sync(&hdev->power_on);
4057
4058         if (!test_bit(HCI_INIT, &hdev->flags) &&
4059             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4060             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4061                 hci_dev_lock(hdev);
4062                 mgmt_index_removed(hdev);
4063                 hci_dev_unlock(hdev);
4064         }
4065
4066         /* mgmt_index_removed should take care of emptying the
4067          * pending list */
4068         BUG_ON(!list_empty(&hdev->mgmt_pending));
4069
4070         hci_notify(hdev, HCI_DEV_UNREG);
4071
4072         if (hdev->rfkill) {
4073                 rfkill_unregister(hdev->rfkill);
4074                 rfkill_destroy(hdev->rfkill);
4075         }
4076
4077         if (hdev->tfm_aes)
4078                 crypto_free_blkcipher(hdev->tfm_aes);
4079
4080         device_del(&hdev->dev);
4081
4082         debugfs_remove_recursive(hdev->debugfs);
4083
4084         destroy_workqueue(hdev->workqueue);
4085         destroy_workqueue(hdev->req_workqueue);
4086
4087         hci_dev_lock(hdev);
4088         hci_blacklist_clear(hdev);
4089         hci_uuids_clear(hdev);
4090         hci_link_keys_clear(hdev);
4091         hci_smp_ltks_clear(hdev);
4092         hci_smp_irks_clear(hdev);
4093         hci_remote_oob_data_clear(hdev);
4094         hci_white_list_clear(hdev);
4095         hci_conn_params_clear_all(hdev);
4096         hci_dev_unlock(hdev);
4097
4098         hci_dev_put(hdev);
4099
4100         ida_simple_remove(&hci_index_ida, id);
4101 }
4102 EXPORT_SYMBOL(hci_unregister_dev);
4103
4104 /* Suspend HCI device */
4105 int hci_suspend_dev(struct hci_dev *hdev)
4106 {
4107         hci_notify(hdev, HCI_DEV_SUSPEND);
4108         return 0;
4109 }
4110 EXPORT_SYMBOL(hci_suspend_dev);
4111
4112 /* Resume HCI device */
4113 int hci_resume_dev(struct hci_dev *hdev)
4114 {
4115         hci_notify(hdev, HCI_DEV_RESUME);
4116         return 0;
4117 }
4118 EXPORT_SYMBOL(hci_resume_dev);
4119
4120 /* Receive frame from HCI drivers */
4121 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4122 {
4123         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4124                       && !test_bit(HCI_INIT, &hdev->flags))) {
4125                 kfree_skb(skb);
4126                 return -ENXIO;
4127         }
4128
4129         /* Incoming skb */
4130         bt_cb(skb)->incoming = 1;
4131
4132         /* Time stamp */
4133         __net_timestamp(skb);
4134
4135         skb_queue_tail(&hdev->rx_q, skb);
4136         queue_work(hdev->workqueue, &hdev->rx_work);
4137
4138         return 0;
4139 }
4140 EXPORT_SYMBOL(hci_recv_frame);
4141
4142 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4143                           int count, __u8 index)
4144 {
4145         int len = 0;
4146         int hlen = 0;
4147         int remain = count;
4148         struct sk_buff *skb;
4149         struct bt_skb_cb *scb;
4150
4151         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4152             index >= NUM_REASSEMBLY)
4153                 return -EILSEQ;
4154
4155         skb = hdev->reassembly[index];
4156
4157         if (!skb) {
4158                 switch (type) {
4159                 case HCI_ACLDATA_PKT:
4160                         len = HCI_MAX_FRAME_SIZE;
4161                         hlen = HCI_ACL_HDR_SIZE;
4162                         break;
4163                 case HCI_EVENT_PKT:
4164                         len = HCI_MAX_EVENT_SIZE;
4165                         hlen = HCI_EVENT_HDR_SIZE;
4166                         break;
4167                 case HCI_SCODATA_PKT:
4168                         len = HCI_MAX_SCO_SIZE;
4169                         hlen = HCI_SCO_HDR_SIZE;
4170                         break;
4171                 }
4172
4173                 skb = bt_skb_alloc(len, GFP_ATOMIC);
4174                 if (!skb)
4175                         return -ENOMEM;
4176
4177                 scb = (void *) skb->cb;
4178                 scb->expect = hlen;
4179                 scb->pkt_type = type;
4180
4181                 hdev->reassembly[index] = skb;
4182         }
4183
4184         while (count) {
4185                 scb = (void *) skb->cb;
4186                 len = min_t(uint, scb->expect, count);
4187
4188                 memcpy(skb_put(skb, len), data, len);
4189
4190                 count -= len;
4191                 data += len;
4192                 scb->expect -= len;
4193                 remain = count;
4194
4195                 switch (type) {
4196                 case HCI_EVENT_PKT:
4197                         if (skb->len == HCI_EVENT_HDR_SIZE) {
4198                                 struct hci_event_hdr *h = hci_event_hdr(skb);
4199                                 scb->expect = h->plen;
4200
4201                                 if (skb_tailroom(skb) < scb->expect) {
4202                                         kfree_skb(skb);
4203                                         hdev->reassembly[index] = NULL;
4204                                         return -ENOMEM;
4205                                 }
4206                         }
4207                         break;
4208
4209                 case HCI_ACLDATA_PKT:
4210                         if (skb->len  == HCI_ACL_HDR_SIZE) {
4211                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4212                                 scb->expect = __le16_to_cpu(h->dlen);
4213
4214                                 if (skb_tailroom(skb) < scb->expect) {
4215                                         kfree_skb(skb);
4216                                         hdev->reassembly[index] = NULL;
4217                                         return -ENOMEM;
4218                                 }
4219                         }
4220                         break;
4221
4222                 case HCI_SCODATA_PKT:
4223                         if (skb->len == HCI_SCO_HDR_SIZE) {
4224                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4225                                 scb->expect = h->dlen;
4226
4227                                 if (skb_tailroom(skb) < scb->expect) {
4228                                         kfree_skb(skb);
4229                                         hdev->reassembly[index] = NULL;
4230                                         return -ENOMEM;
4231                                 }
4232                         }
4233                         break;
4234                 }
4235
4236                 if (scb->expect == 0) {
4237                         /* Complete frame */
4238
4239                         bt_cb(skb)->pkt_type = type;
4240                         hci_recv_frame(hdev, skb);
4241
4242                         hdev->reassembly[index] = NULL;
4243                         return remain;
4244                 }
4245         }
4246
4247         return remain;
4248 }
4249
4250 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4251 {
4252         int rem = 0;
4253
4254         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4255                 return -EILSEQ;
4256
4257         while (count) {
4258                 rem = hci_reassembly(hdev, type, data, count, type - 1);
4259                 if (rem < 0)
4260                         return rem;
4261
4262                 data += (count - rem);
4263                 count = rem;
4264         }
4265
4266         return rem;
4267 }
4268 EXPORT_SYMBOL(hci_recv_fragment);
4269
4270 #define STREAM_REASSEMBLY 0
4271
4272 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4273 {
4274         int type;
4275         int rem = 0;
4276
4277         while (count) {
4278                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4279
4280                 if (!skb) {
4281                         struct { char type; } *pkt;
4282
4283                         /* Start of the frame */
4284                         pkt = data;
4285                         type = pkt->type;
4286
4287                         data++;
4288                         count--;
4289                 } else
4290                         type = bt_cb(skb)->pkt_type;
4291
4292                 rem = hci_reassembly(hdev, type, data, count,
4293                                      STREAM_REASSEMBLY);
4294                 if (rem < 0)
4295                         return rem;
4296
4297                 data += (count - rem);
4298                 count = rem;
4299         }
4300
4301         return rem;
4302 }
4303 EXPORT_SYMBOL(hci_recv_stream_fragment);
4304
4305 /* ---- Interface to upper protocols ---- */
4306
4307 int hci_register_cb(struct hci_cb *cb)
4308 {
4309         BT_DBG("%p name %s", cb, cb->name);
4310
4311         write_lock(&hci_cb_list_lock);
4312         list_add(&cb->list, &hci_cb_list);
4313         write_unlock(&hci_cb_list_lock);
4314
4315         return 0;
4316 }
4317 EXPORT_SYMBOL(hci_register_cb);
4318
4319 int hci_unregister_cb(struct hci_cb *cb)
4320 {
4321         BT_DBG("%p name %s", cb, cb->name);
4322
4323         write_lock(&hci_cb_list_lock);
4324         list_del(&cb->list);
4325         write_unlock(&hci_cb_list_lock);
4326
4327         return 0;
4328 }
4329 EXPORT_SYMBOL(hci_unregister_cb);
4330
4331 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4332 {
4333         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4334
4335         /* Time stamp */
4336         __net_timestamp(skb);
4337
4338         /* Send copy to monitor */
4339         hci_send_to_monitor(hdev, skb);
4340
4341         if (atomic_read(&hdev->promisc)) {
4342                 /* Send copy to the sockets */
4343                 hci_send_to_sock(hdev, skb);
4344         }
4345
4346         /* Get rid of skb owner, prior to sending to the driver. */
4347         skb_orphan(skb);
4348
4349         if (hdev->send(hdev, skb) < 0)
4350                 BT_ERR("%s sending frame failed", hdev->name);
4351 }
4352
4353 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4354 {
4355         skb_queue_head_init(&req->cmd_q);
4356         req->hdev = hdev;
4357         req->err = 0;
4358 }
4359
4360 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4361 {
4362         struct hci_dev *hdev = req->hdev;
4363         struct sk_buff *skb;
4364         unsigned long flags;
4365
4366         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4367
4368         /* If an error occured during request building, remove all HCI
4369          * commands queued on the HCI request queue.
4370          */
4371         if (req->err) {
4372                 skb_queue_purge(&req->cmd_q);
4373                 return req->err;
4374         }
4375
4376         /* Do not allow empty requests */
4377         if (skb_queue_empty(&req->cmd_q))
4378                 return -ENODATA;
4379
4380         skb = skb_peek_tail(&req->cmd_q);
4381         bt_cb(skb)->req.complete = complete;
4382
4383         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4384         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4385         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4386
4387         queue_work(hdev->workqueue, &hdev->cmd_work);
4388
4389         return 0;
4390 }
4391
4392 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4393                                        u32 plen, const void *param)
4394 {
4395         int len = HCI_COMMAND_HDR_SIZE + plen;
4396         struct hci_command_hdr *hdr;
4397         struct sk_buff *skb;
4398
4399         skb = bt_skb_alloc(len, GFP_ATOMIC);
4400         if (!skb)
4401                 return NULL;
4402
4403         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4404         hdr->opcode = cpu_to_le16(opcode);
4405         hdr->plen   = plen;
4406
4407         if (plen)
4408                 memcpy(skb_put(skb, plen), param, plen);
4409
4410         BT_DBG("skb len %d", skb->len);
4411
4412         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4413
4414         return skb;
4415 }
4416
4417 /* Send HCI command */
4418 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4419                  const void *param)
4420 {
4421         struct sk_buff *skb;
4422
4423         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4424
4425         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4426         if (!skb) {
4427                 BT_ERR("%s no memory for command", hdev->name);
4428                 return -ENOMEM;
4429         }
4430
4431         /* Stand-alone HCI commands must be flaged as
4432          * single-command requests.
4433          */
4434         bt_cb(skb)->req.start = true;
4435
4436         skb_queue_tail(&hdev->cmd_q, skb);
4437         queue_work(hdev->workqueue, &hdev->cmd_work);
4438
4439         return 0;
4440 }
4441
4442 /* Queue a command to an asynchronous HCI request */
4443 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4444                     const void *param, u8 event)
4445 {
4446         struct hci_dev *hdev = req->hdev;
4447         struct sk_buff *skb;
4448
4449         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4450
4451         /* If an error occured during request building, there is no point in
4452          * queueing the HCI command. We can simply return.
4453          */
4454         if (req->err)
4455                 return;
4456
4457         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4458         if (!skb) {
4459                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4460                        hdev->name, opcode);
4461                 req->err = -ENOMEM;
4462                 return;
4463         }
4464
4465         if (skb_queue_empty(&req->cmd_q))
4466                 bt_cb(skb)->req.start = true;
4467
4468         bt_cb(skb)->req.event = event;
4469
4470         skb_queue_tail(&req->cmd_q, skb);
4471 }
4472
4473 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4474                  const void *param)
4475 {
4476         hci_req_add_ev(req, opcode, plen, param, 0);
4477 }
4478
4479 /* Get data from the previously sent command */
4480 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4481 {
4482         struct hci_command_hdr *hdr;
4483
4484         if (!hdev->sent_cmd)
4485                 return NULL;
4486
4487         hdr = (void *) hdev->sent_cmd->data;
4488
4489         if (hdr->opcode != cpu_to_le16(opcode))
4490                 return NULL;
4491
4492         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4493
4494         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4495 }
4496
4497 /* Send ACL data */
4498 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4499 {
4500         struct hci_acl_hdr *hdr;
4501         int len = skb->len;
4502
4503         skb_push(skb, HCI_ACL_HDR_SIZE);
4504         skb_reset_transport_header(skb);
4505         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4506         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4507         hdr->dlen   = cpu_to_le16(len);
4508 }
4509
4510 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4511                           struct sk_buff *skb, __u16 flags)
4512 {
4513         struct hci_conn *conn = chan->conn;
4514         struct hci_dev *hdev = conn->hdev;
4515         struct sk_buff *list;
4516
4517         skb->len = skb_headlen(skb);
4518         skb->data_len = 0;
4519
4520         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4521
4522         switch (hdev->dev_type) {
4523         case HCI_BREDR:
4524                 hci_add_acl_hdr(skb, conn->handle, flags);
4525                 break;
4526         case HCI_AMP:
4527                 hci_add_acl_hdr(skb, chan->handle, flags);
4528                 break;
4529         default:
4530                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4531                 return;
4532         }
4533
4534         list = skb_shinfo(skb)->frag_list;
4535         if (!list) {
4536                 /* Non fragmented */
4537                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4538
4539                 skb_queue_tail(queue, skb);
4540         } else {
4541                 /* Fragmented */
4542                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4543
4544                 skb_shinfo(skb)->frag_list = NULL;
4545
4546                 /* Queue all fragments atomically */
4547                 spin_lock(&queue->lock);
4548
4549                 __skb_queue_tail(queue, skb);
4550
4551                 flags &= ~ACL_START;
4552                 flags |= ACL_CONT;
4553                 do {
4554                         skb = list; list = list->next;
4555
4556                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4557                         hci_add_acl_hdr(skb, conn->handle, flags);
4558
4559                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4560
4561                         __skb_queue_tail(queue, skb);
4562                 } while (list);
4563
4564                 spin_unlock(&queue->lock);
4565         }
4566 }
4567
4568 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4569 {
4570         struct hci_dev *hdev = chan->conn->hdev;
4571
4572         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4573
4574         hci_queue_acl(chan, &chan->data_q, skb, flags);
4575
4576         queue_work(hdev->workqueue, &hdev->tx_work);
4577 }
4578
4579 /* Send SCO data */
4580 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4581 {
4582         struct hci_dev *hdev = conn->hdev;
4583         struct hci_sco_hdr hdr;
4584
4585         BT_DBG("%s len %d", hdev->name, skb->len);
4586
4587         hdr.handle = cpu_to_le16(conn->handle);
4588         hdr.dlen   = skb->len;
4589
4590         skb_push(skb, HCI_SCO_HDR_SIZE);
4591         skb_reset_transport_header(skb);
4592         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4593
4594         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4595
4596         skb_queue_tail(&conn->data_q, skb);
4597         queue_work(hdev->workqueue, &hdev->tx_work);
4598 }
4599
4600 /* ---- HCI TX task (outgoing data) ---- */
4601
4602 /* HCI Connection scheduler */
4603 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4604                                      int *quote)
4605 {
4606         struct hci_conn_hash *h = &hdev->conn_hash;
4607         struct hci_conn *conn = NULL, *c;
4608         unsigned int num = 0, min = ~0;
4609
4610         /* We don't have to lock device here. Connections are always
4611          * added and removed with TX task disabled. */
4612
4613         rcu_read_lock();
4614
4615         list_for_each_entry_rcu(c, &h->list, list) {
4616                 if (c->type != type || skb_queue_empty(&c->data_q))
4617                         continue;
4618
4619                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4620                         continue;
4621
4622                 num++;
4623
4624                 if (c->sent < min) {
4625                         min  = c->sent;
4626                         conn = c;
4627                 }
4628
4629                 if (hci_conn_num(hdev, type) == num)
4630                         break;
4631         }
4632
4633         rcu_read_unlock();
4634
4635         if (conn) {
4636                 int cnt, q;
4637
4638                 switch (conn->type) {
4639                 case ACL_LINK:
4640                         cnt = hdev->acl_cnt;
4641                         break;
4642                 case SCO_LINK:
4643                 case ESCO_LINK:
4644                         cnt = hdev->sco_cnt;
4645                         break;
4646                 case LE_LINK:
4647                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4648                         break;
4649                 default:
4650                         cnt = 0;
4651                         BT_ERR("Unknown link type");
4652                 }
4653
4654                 q = cnt / num;
4655                 *quote = q ? q : 1;
4656         } else
4657                 *quote = 0;
4658
4659         BT_DBG("conn %p quote %d", conn, *quote);
4660         return conn;
4661 }
4662
4663 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4664 {
4665         struct hci_conn_hash *h = &hdev->conn_hash;
4666         struct hci_conn *c;
4667
4668         BT_ERR("%s link tx timeout", hdev->name);
4669
4670         rcu_read_lock();
4671
4672         /* Kill stalled connections */
4673         list_for_each_entry_rcu(c, &h->list, list) {
4674                 if (c->type == type && c->sent) {
4675                         BT_ERR("%s killing stalled connection %pMR",
4676                                hdev->name, &c->dst);
4677                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4678                 }
4679         }
4680
4681         rcu_read_unlock();
4682 }
4683
4684 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4685                                       int *quote)
4686 {
4687         struct hci_conn_hash *h = &hdev->conn_hash;
4688         struct hci_chan *chan = NULL;
4689         unsigned int num = 0, min = ~0, cur_prio = 0;
4690         struct hci_conn *conn;
4691         int cnt, q, conn_num = 0;
4692
4693         BT_DBG("%s", hdev->name);
4694
4695         rcu_read_lock();
4696
4697         list_for_each_entry_rcu(conn, &h->list, list) {
4698                 struct hci_chan *tmp;
4699
4700                 if (conn->type != type)
4701                         continue;
4702
4703                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4704                         continue;
4705
4706                 conn_num++;
4707
4708                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4709                         struct sk_buff *skb;
4710
4711                         if (skb_queue_empty(&tmp->data_q))
4712                                 continue;
4713
4714                         skb = skb_peek(&tmp->data_q);
4715                         if (skb->priority < cur_prio)
4716                                 continue;
4717
4718                         if (skb->priority > cur_prio) {
4719                                 num = 0;
4720                                 min = ~0;
4721                                 cur_prio = skb->priority;
4722                         }
4723
4724                         num++;
4725
4726                         if (conn->sent < min) {
4727                                 min  = conn->sent;
4728                                 chan = tmp;
4729                         }
4730                 }
4731
4732                 if (hci_conn_num(hdev, type) == conn_num)
4733                         break;
4734         }
4735
4736         rcu_read_unlock();
4737
4738         if (!chan)
4739                 return NULL;
4740
4741         switch (chan->conn->type) {
4742         case ACL_LINK:
4743                 cnt = hdev->acl_cnt;
4744                 break;
4745         case AMP_LINK:
4746                 cnt = hdev->block_cnt;
4747                 break;
4748         case SCO_LINK:
4749         case ESCO_LINK:
4750                 cnt = hdev->sco_cnt;
4751                 break;
4752         case LE_LINK:
4753                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4754                 break;
4755         default:
4756                 cnt = 0;
4757                 BT_ERR("Unknown link type");
4758         }
4759
4760         q = cnt / num;
4761         *quote = q ? q : 1;
4762         BT_DBG("chan %p quote %d", chan, *quote);
4763         return chan;
4764 }
4765
4766 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4767 {
4768         struct hci_conn_hash *h = &hdev->conn_hash;
4769         struct hci_conn *conn;
4770         int num = 0;
4771
4772         BT_DBG("%s", hdev->name);
4773
4774         rcu_read_lock();
4775
4776         list_for_each_entry_rcu(conn, &h->list, list) {
4777                 struct hci_chan *chan;
4778
4779                 if (conn->type != type)
4780                         continue;
4781
4782                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4783                         continue;
4784
4785                 num++;
4786
4787                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4788                         struct sk_buff *skb;
4789
4790                         if (chan->sent) {
4791                                 chan->sent = 0;
4792                                 continue;
4793                         }
4794
4795                         if (skb_queue_empty(&chan->data_q))
4796                                 continue;
4797
4798                         skb = skb_peek(&chan->data_q);
4799                         if (skb->priority >= HCI_PRIO_MAX - 1)
4800                                 continue;
4801
4802                         skb->priority = HCI_PRIO_MAX - 1;
4803
4804                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4805                                skb->priority);
4806                 }
4807
4808                 if (hci_conn_num(hdev, type) == num)
4809                         break;
4810         }
4811
4812         rcu_read_unlock();
4813
4814 }
4815
4816 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4817 {
4818         /* Calculate count of blocks used by this packet */
4819         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4820 }
4821
4822 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4823 {
4824         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4825                 /* ACL tx timeout must be longer than maximum
4826                  * link supervision timeout (40.9 seconds) */
4827                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4828                                        HCI_ACL_TX_TIMEOUT))
4829                         hci_link_tx_to(hdev, ACL_LINK);
4830         }
4831 }
4832
4833 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4834 {
4835         unsigned int cnt = hdev->acl_cnt;
4836         struct hci_chan *chan;
4837         struct sk_buff *skb;
4838         int quote;
4839
4840         __check_timeout(hdev, cnt);
4841
4842         while (hdev->acl_cnt &&
4843                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4844                 u32 priority = (skb_peek(&chan->data_q))->priority;
4845                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4846                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4847                                skb->len, skb->priority);
4848
4849                         /* Stop if priority has changed */
4850                         if (skb->priority < priority)
4851                                 break;
4852
4853                         skb = skb_dequeue(&chan->data_q);
4854
4855                         hci_conn_enter_active_mode(chan->conn,
4856                                                    bt_cb(skb)->force_active);
4857
4858                         hci_send_frame(hdev, skb);
4859                         hdev->acl_last_tx = jiffies;
4860
4861                         hdev->acl_cnt--;
4862                         chan->sent++;
4863                         chan->conn->sent++;
4864                 }
4865         }
4866
4867         if (cnt != hdev->acl_cnt)
4868                 hci_prio_recalculate(hdev, ACL_LINK);
4869 }
4870
4871 static void hci_sched_acl_blk(struct hci_dev *hdev)
4872 {
4873         unsigned int cnt = hdev->block_cnt;
4874         struct hci_chan *chan;
4875         struct sk_buff *skb;
4876         int quote;
4877         u8 type;
4878
4879         __check_timeout(hdev, cnt);
4880
4881         BT_DBG("%s", hdev->name);
4882
4883         if (hdev->dev_type == HCI_AMP)
4884                 type = AMP_LINK;
4885         else
4886                 type = ACL_LINK;
4887
4888         while (hdev->block_cnt > 0 &&
4889                (chan = hci_chan_sent(hdev, type, &quote))) {
4890                 u32 priority = (skb_peek(&chan->data_q))->priority;
4891                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4892                         int blocks;
4893
4894                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4895                                skb->len, skb->priority);
4896
4897                         /* Stop if priority has changed */
4898                         if (skb->priority < priority)
4899                                 break;
4900
4901                         skb = skb_dequeue(&chan->data_q);
4902
4903                         blocks = __get_blocks(hdev, skb);
4904                         if (blocks > hdev->block_cnt)
4905                                 return;
4906
4907                         hci_conn_enter_active_mode(chan->conn,
4908                                                    bt_cb(skb)->force_active);
4909
4910                         hci_send_frame(hdev, skb);
4911                         hdev->acl_last_tx = jiffies;
4912
4913                         hdev->block_cnt -= blocks;
4914                         quote -= blocks;
4915
4916                         chan->sent += blocks;
4917                         chan->conn->sent += blocks;
4918                 }
4919         }
4920
4921         if (cnt != hdev->block_cnt)
4922                 hci_prio_recalculate(hdev, type);
4923 }
4924
4925 static void hci_sched_acl(struct hci_dev *hdev)
4926 {
4927         BT_DBG("%s", hdev->name);
4928
4929         /* No ACL link over BR/EDR controller */
4930         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4931                 return;
4932
4933         /* No AMP link over AMP controller */
4934         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4935                 return;
4936
4937         switch (hdev->flow_ctl_mode) {
4938         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4939                 hci_sched_acl_pkt(hdev);
4940                 break;
4941
4942         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4943                 hci_sched_acl_blk(hdev);
4944                 break;
4945         }
4946 }
4947
4948 /* Schedule SCO */
4949 static void hci_sched_sco(struct hci_dev *hdev)
4950 {
4951         struct hci_conn *conn;
4952         struct sk_buff *skb;
4953         int quote;
4954
4955         BT_DBG("%s", hdev->name);
4956
4957         if (!hci_conn_num(hdev, SCO_LINK))
4958                 return;
4959
4960         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4961                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4962                         BT_DBG("skb %p len %d", skb, skb->len);
4963                         hci_send_frame(hdev, skb);
4964
4965                         conn->sent++;
4966                         if (conn->sent == ~0)
4967                                 conn->sent = 0;
4968                 }
4969         }
4970 }
4971
4972 static void hci_sched_esco(struct hci_dev *hdev)
4973 {
4974         struct hci_conn *conn;
4975         struct sk_buff *skb;
4976         int quote;
4977
4978         BT_DBG("%s", hdev->name);
4979
4980         if (!hci_conn_num(hdev, ESCO_LINK))
4981                 return;
4982
4983         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4984                                                      &quote))) {
4985                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4986                         BT_DBG("skb %p len %d", skb, skb->len);
4987                         hci_send_frame(hdev, skb);
4988
4989                         conn->sent++;
4990                         if (conn->sent == ~0)
4991                                 conn->sent = 0;
4992                 }
4993         }
4994 }
4995
4996 static void hci_sched_le(struct hci_dev *hdev)
4997 {
4998         struct hci_chan *chan;
4999         struct sk_buff *skb;
5000         int quote, cnt, tmp;
5001
5002         BT_DBG("%s", hdev->name);
5003
5004         if (!hci_conn_num(hdev, LE_LINK))
5005                 return;
5006
5007         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5008                 /* LE tx timeout must be longer than maximum
5009                  * link supervision timeout (40.9 seconds) */
5010                 if (!hdev->le_cnt && hdev->le_pkts &&
5011                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
5012                         hci_link_tx_to(hdev, LE_LINK);
5013         }
5014
5015         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5016         tmp = cnt;
5017         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
5018                 u32 priority = (skb_peek(&chan->data_q))->priority;
5019                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5020                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5021                                skb->len, skb->priority);
5022
5023                         /* Stop if priority has changed */
5024                         if (skb->priority < priority)
5025                                 break;
5026
5027                         skb = skb_dequeue(&chan->data_q);
5028
5029                         hci_send_frame(hdev, skb);
5030                         hdev->le_last_tx = jiffies;
5031
5032                         cnt--;
5033                         chan->sent++;
5034                         chan->conn->sent++;
5035                 }
5036         }
5037
5038         if (hdev->le_pkts)
5039                 hdev->le_cnt = cnt;
5040         else
5041                 hdev->acl_cnt = cnt;
5042
5043         if (cnt != tmp)
5044                 hci_prio_recalculate(hdev, LE_LINK);
5045 }
5046
5047 static void hci_tx_work(struct work_struct *work)
5048 {
5049         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5050         struct sk_buff *skb;
5051
5052         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5053                hdev->sco_cnt, hdev->le_cnt);
5054
5055         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5056                 /* Schedule queues and send stuff to HCI driver */
5057                 hci_sched_acl(hdev);
5058                 hci_sched_sco(hdev);
5059                 hci_sched_esco(hdev);
5060                 hci_sched_le(hdev);
5061         }
5062
5063         /* Send next queued raw (unknown type) packet */
5064         while ((skb = skb_dequeue(&hdev->raw_q)))
5065                 hci_send_frame(hdev, skb);
5066 }
5067
5068 /* ----- HCI RX task (incoming data processing) ----- */
5069
5070 /* ACL data packet */
5071 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5072 {
5073         struct hci_acl_hdr *hdr = (void *) skb->data;
5074         struct hci_conn *conn;
5075         __u16 handle, flags;
5076
5077         skb_pull(skb, HCI_ACL_HDR_SIZE);
5078
5079         handle = __le16_to_cpu(hdr->handle);
5080         flags  = hci_flags(handle);
5081         handle = hci_handle(handle);
5082
5083         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5084                handle, flags);
5085
5086         hdev->stat.acl_rx++;
5087
5088         hci_dev_lock(hdev);
5089         conn = hci_conn_hash_lookup_handle(hdev, handle);
5090         hci_dev_unlock(hdev);
5091
5092         if (conn) {
5093                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5094
5095                 /* Send to upper protocol */
5096                 l2cap_recv_acldata(conn, skb, flags);
5097                 return;
5098         } else {
5099                 BT_ERR("%s ACL packet for unknown connection handle %d",
5100                        hdev->name, handle);
5101         }
5102
5103         kfree_skb(skb);
5104 }
5105
5106 /* SCO data packet */
5107 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5108 {
5109         struct hci_sco_hdr *hdr = (void *) skb->data;
5110         struct hci_conn *conn;
5111         __u16 handle;
5112
5113         skb_pull(skb, HCI_SCO_HDR_SIZE);
5114
5115         handle = __le16_to_cpu(hdr->handle);
5116
5117         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5118
5119         hdev->stat.sco_rx++;
5120
5121         hci_dev_lock(hdev);
5122         conn = hci_conn_hash_lookup_handle(hdev, handle);
5123         hci_dev_unlock(hdev);
5124
5125         if (conn) {
5126                 /* Send to upper protocol */
5127                 sco_recv_scodata(conn, skb);
5128                 return;
5129         } else {
5130                 BT_ERR("%s SCO packet for unknown connection handle %d",
5131                        hdev->name, handle);
5132         }
5133
5134         kfree_skb(skb);
5135 }
5136
5137 static bool hci_req_is_complete(struct hci_dev *hdev)
5138 {
5139         struct sk_buff *skb;
5140
5141         skb = skb_peek(&hdev->cmd_q);
5142         if (!skb)
5143                 return true;
5144
5145         return bt_cb(skb)->req.start;
5146 }
5147
5148 static void hci_resend_last(struct hci_dev *hdev)
5149 {
5150         struct hci_command_hdr *sent;
5151         struct sk_buff *skb;
5152         u16 opcode;
5153
5154         if (!hdev->sent_cmd)
5155                 return;
5156
5157         sent = (void *) hdev->sent_cmd->data;
5158         opcode = __le16_to_cpu(sent->opcode);
5159         if (opcode == HCI_OP_RESET)
5160                 return;
5161
5162         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5163         if (!skb)
5164                 return;
5165
5166         skb_queue_head(&hdev->cmd_q, skb);
5167         queue_work(hdev->workqueue, &hdev->cmd_work);
5168 }
5169
5170 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5171 {
5172         hci_req_complete_t req_complete = NULL;
5173         struct sk_buff *skb;
5174         unsigned long flags;
5175
5176         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5177
5178         /* If the completed command doesn't match the last one that was
5179          * sent we need to do special handling of it.
5180          */
5181         if (!hci_sent_cmd_data(hdev, opcode)) {
5182                 /* Some CSR based controllers generate a spontaneous
5183                  * reset complete event during init and any pending
5184                  * command will never be completed. In such a case we
5185                  * need to resend whatever was the last sent
5186                  * command.
5187                  */
5188                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5189                         hci_resend_last(hdev);
5190
5191                 return;
5192         }
5193
5194         /* If the command succeeded and there's still more commands in
5195          * this request the request is not yet complete.
5196          */
5197         if (!status && !hci_req_is_complete(hdev))
5198                 return;
5199
5200         /* If this was the last command in a request the complete
5201          * callback would be found in hdev->sent_cmd instead of the
5202          * command queue (hdev->cmd_q).
5203          */
5204         if (hdev->sent_cmd) {
5205                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5206
5207                 if (req_complete) {
5208                         /* We must set the complete callback to NULL to
5209                          * avoid calling the callback more than once if
5210                          * this function gets called again.
5211                          */
5212                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
5213
5214                         goto call_complete;
5215                 }
5216         }
5217
5218         /* Remove all pending commands belonging to this request */
5219         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5220         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5221                 if (bt_cb(skb)->req.start) {
5222                         __skb_queue_head(&hdev->cmd_q, skb);
5223                         break;
5224                 }
5225
5226                 req_complete = bt_cb(skb)->req.complete;
5227                 kfree_skb(skb);
5228         }
5229         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5230
5231 call_complete:
5232         if (req_complete)
5233                 req_complete(hdev, status);
5234 }
5235
5236 static void hci_rx_work(struct work_struct *work)
5237 {
5238         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5239         struct sk_buff *skb;
5240
5241         BT_DBG("%s", hdev->name);
5242
5243         while ((skb = skb_dequeue(&hdev->rx_q))) {
5244                 /* Send copy to monitor */
5245                 hci_send_to_monitor(hdev, skb);
5246
5247                 if (atomic_read(&hdev->promisc)) {
5248                         /* Send copy to the sockets */
5249                         hci_send_to_sock(hdev, skb);
5250                 }
5251
5252                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5253                         kfree_skb(skb);
5254                         continue;
5255                 }
5256
5257                 if (test_bit(HCI_INIT, &hdev->flags)) {
5258                         /* Don't process data packets in this states. */
5259                         switch (bt_cb(skb)->pkt_type) {
5260                         case HCI_ACLDATA_PKT:
5261                         case HCI_SCODATA_PKT:
5262                                 kfree_skb(skb);
5263                                 continue;
5264                         }
5265                 }
5266
5267                 /* Process frame */
5268                 switch (bt_cb(skb)->pkt_type) {
5269                 case HCI_EVENT_PKT:
5270                         BT_DBG("%s Event packet", hdev->name);
5271                         hci_event_packet(hdev, skb);
5272                         break;
5273
5274                 case HCI_ACLDATA_PKT:
5275                         BT_DBG("%s ACL data packet", hdev->name);
5276                         hci_acldata_packet(hdev, skb);
5277                         break;
5278
5279                 case HCI_SCODATA_PKT:
5280                         BT_DBG("%s SCO data packet", hdev->name);
5281                         hci_scodata_packet(hdev, skb);
5282                         break;
5283
5284                 default:
5285                         kfree_skb(skb);
5286                         break;
5287                 }
5288         }
5289 }
5290
5291 static void hci_cmd_work(struct work_struct *work)
5292 {
5293         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5294         struct sk_buff *skb;
5295
5296         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5297                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5298
5299         /* Send queued commands */
5300         if (atomic_read(&hdev->cmd_cnt)) {
5301                 skb = skb_dequeue(&hdev->cmd_q);
5302                 if (!skb)
5303                         return;
5304
5305                 kfree_skb(hdev->sent_cmd);
5306
5307                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5308                 if (hdev->sent_cmd) {
5309                         atomic_dec(&hdev->cmd_cnt);
5310                         hci_send_frame(hdev, skb);
5311                         if (test_bit(HCI_RESET, &hdev->flags))
5312                                 cancel_delayed_work(&hdev->cmd_timer);
5313                         else
5314                                 schedule_delayed_work(&hdev->cmd_timer,
5315                                                       HCI_CMD_TIMEOUT);
5316                 } else {
5317                         skb_queue_head(&hdev->cmd_q, skb);
5318                         queue_work(hdev->workqueue, &hdev->cmd_work);
5319                 }
5320         }
5321 }
5322
5323 void hci_req_add_le_scan_disable(struct hci_request *req)
5324 {
5325         struct hci_cp_le_set_scan_enable cp;
5326
5327         memset(&cp, 0, sizeof(cp));
5328         cp.enable = LE_SCAN_DISABLE;
5329         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5330 }
5331
5332 void hci_req_add_le_passive_scan(struct hci_request *req)
5333 {
5334         struct hci_cp_le_set_scan_param param_cp;
5335         struct hci_cp_le_set_scan_enable enable_cp;
5336         struct hci_dev *hdev = req->hdev;
5337         u8 own_addr_type;
5338
5339         /* Set require_privacy to false since no SCAN_REQ are send
5340          * during passive scanning. Not using an unresolvable address
5341          * here is important so that peer devices using direct
5342          * advertising with our address will be correctly reported
5343          * by the controller.
5344          */
5345         if (hci_update_random_address(req, false, &own_addr_type))
5346                 return;
5347
5348         memset(&param_cp, 0, sizeof(param_cp));
5349         param_cp.type = LE_SCAN_PASSIVE;
5350         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5351         param_cp.window = cpu_to_le16(hdev->le_scan_window);
5352         param_cp.own_address_type = own_addr_type;
5353         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5354                     &param_cp);
5355
5356         memset(&enable_cp, 0, sizeof(enable_cp));
5357         enable_cp.enable = LE_SCAN_ENABLE;
5358         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5359         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5360                     &enable_cp);
5361 }
5362
5363 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5364 {
5365         if (status)
5366                 BT_DBG("HCI request failed to update background scanning: "
5367                        "status 0x%2.2x", status);
5368 }
5369
5370 /* This function controls the background scanning based on hdev->pend_le_conns
5371  * list. If there are pending LE connection we start the background scanning,
5372  * otherwise we stop it.
5373  *
5374  * This function requires the caller holds hdev->lock.
5375  */
5376 void hci_update_background_scan(struct hci_dev *hdev)
5377 {
5378         struct hci_request req;
5379         struct hci_conn *conn;
5380         int err;
5381
5382         if (!test_bit(HCI_UP, &hdev->flags) ||
5383             test_bit(HCI_INIT, &hdev->flags) ||
5384             test_bit(HCI_SETUP, &hdev->dev_flags) ||
5385             test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5386             test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5387             test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5388                 return;
5389
5390         hci_req_init(&req, hdev);
5391
5392         if (list_empty(&hdev->pend_le_conns) &&
5393             list_empty(&hdev->pend_le_reports)) {
5394                 /* If there is no pending LE connections or devices
5395                  * to be scanned for, we should stop the background
5396                  * scanning.
5397                  */
5398
5399                 /* If controller is not scanning we are done. */
5400                 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5401                         return;
5402
5403                 hci_req_add_le_scan_disable(&req);
5404
5405                 BT_DBG("%s stopping background scanning", hdev->name);
5406         } else {
5407                 /* If there is at least one pending LE connection, we should
5408                  * keep the background scan running.
5409                  */
5410
5411                 /* If controller is connecting, we should not start scanning
5412                  * since some controllers are not able to scan and connect at
5413                  * the same time.
5414                  */
5415                 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5416                 if (conn)
5417                         return;
5418
5419                 /* If controller is currently scanning, we stop it to ensure we
5420                  * don't miss any advertising (due to duplicates filter).
5421                  */
5422                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5423                         hci_req_add_le_scan_disable(&req);
5424
5425                 hci_req_add_le_passive_scan(&req);
5426
5427                 BT_DBG("%s starting background scanning", hdev->name);
5428         }
5429
5430         err = hci_req_run(&req, update_background_scan_complete);
5431         if (err)
5432                 BT_ERR("Failed to run HCI request: err %d", err);
5433 }