Bluetooth: Run controller setup after external configuration
[cascardo/linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "smp.h"
41
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
45
46 /* HCI device list */
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
49
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
53
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
56
57 /* ---- HCI notifications ---- */
58
59 static void hci_notify(struct hci_dev *hdev, int event)
60 {
61         hci_sock_dev_event(hdev, event);
62 }
63
64 /* ---- HCI debugfs entries ---- */
65
66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67                              size_t count, loff_t *ppos)
68 {
69         struct hci_dev *hdev = file->private_data;
70         char buf[3];
71
72         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
73         buf[1] = '\n';
74         buf[2] = '\0';
75         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76 }
77
78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79                               size_t count, loff_t *ppos)
80 {
81         struct hci_dev *hdev = file->private_data;
82         struct sk_buff *skb;
83         char buf[32];
84         size_t buf_size = min(count, (sizeof(buf)-1));
85         bool enable;
86         int err;
87
88         if (!test_bit(HCI_UP, &hdev->flags))
89                 return -ENETDOWN;
90
91         if (copy_from_user(buf, user_buf, buf_size))
92                 return -EFAULT;
93
94         buf[buf_size] = '\0';
95         if (strtobool(buf, &enable))
96                 return -EINVAL;
97
98         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
99                 return -EALREADY;
100
101         hci_req_lock(hdev);
102         if (enable)
103                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
104                                      HCI_CMD_TIMEOUT);
105         else
106                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
107                                      HCI_CMD_TIMEOUT);
108         hci_req_unlock(hdev);
109
110         if (IS_ERR(skb))
111                 return PTR_ERR(skb);
112
113         err = -bt_to_errno(skb->data[0]);
114         kfree_skb(skb);
115
116         if (err < 0)
117                 return err;
118
119         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
120
121         return count;
122 }
123
124 static const struct file_operations dut_mode_fops = {
125         .open           = simple_open,
126         .read           = dut_mode_read,
127         .write          = dut_mode_write,
128         .llseek         = default_llseek,
129 };
130
131 static int features_show(struct seq_file *f, void *ptr)
132 {
133         struct hci_dev *hdev = f->private;
134         u8 p;
135
136         hci_dev_lock(hdev);
137         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
138                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
139                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140                            hdev->features[p][0], hdev->features[p][1],
141                            hdev->features[p][2], hdev->features[p][3],
142                            hdev->features[p][4], hdev->features[p][5],
143                            hdev->features[p][6], hdev->features[p][7]);
144         }
145         if (lmp_le_capable(hdev))
146                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148                            hdev->le_features[0], hdev->le_features[1],
149                            hdev->le_features[2], hdev->le_features[3],
150                            hdev->le_features[4], hdev->le_features[5],
151                            hdev->le_features[6], hdev->le_features[7]);
152         hci_dev_unlock(hdev);
153
154         return 0;
155 }
156
157 static int features_open(struct inode *inode, struct file *file)
158 {
159         return single_open(file, features_show, inode->i_private);
160 }
161
162 static const struct file_operations features_fops = {
163         .open           = features_open,
164         .read           = seq_read,
165         .llseek         = seq_lseek,
166         .release        = single_release,
167 };
168
169 static int blacklist_show(struct seq_file *f, void *p)
170 {
171         struct hci_dev *hdev = f->private;
172         struct bdaddr_list *b;
173
174         hci_dev_lock(hdev);
175         list_for_each_entry(b, &hdev->blacklist, list)
176                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
177         hci_dev_unlock(hdev);
178
179         return 0;
180 }
181
182 static int blacklist_open(struct inode *inode, struct file *file)
183 {
184         return single_open(file, blacklist_show, inode->i_private);
185 }
186
187 static const struct file_operations blacklist_fops = {
188         .open           = blacklist_open,
189         .read           = seq_read,
190         .llseek         = seq_lseek,
191         .release        = single_release,
192 };
193
194 static int uuids_show(struct seq_file *f, void *p)
195 {
196         struct hci_dev *hdev = f->private;
197         struct bt_uuid *uuid;
198
199         hci_dev_lock(hdev);
200         list_for_each_entry(uuid, &hdev->uuids, list) {
201                 u8 i, val[16];
202
203                 /* The Bluetooth UUID values are stored in big endian,
204                  * but with reversed byte order. So convert them into
205                  * the right order for the %pUb modifier.
206                  */
207                 for (i = 0; i < 16; i++)
208                         val[i] = uuid->uuid[15 - i];
209
210                 seq_printf(f, "%pUb\n", val);
211         }
212         hci_dev_unlock(hdev);
213
214         return 0;
215 }
216
217 static int uuids_open(struct inode *inode, struct file *file)
218 {
219         return single_open(file, uuids_show, inode->i_private);
220 }
221
222 static const struct file_operations uuids_fops = {
223         .open           = uuids_open,
224         .read           = seq_read,
225         .llseek         = seq_lseek,
226         .release        = single_release,
227 };
228
229 static int inquiry_cache_show(struct seq_file *f, void *p)
230 {
231         struct hci_dev *hdev = f->private;
232         struct discovery_state *cache = &hdev->discovery;
233         struct inquiry_entry *e;
234
235         hci_dev_lock(hdev);
236
237         list_for_each_entry(e, &cache->all, all) {
238                 struct inquiry_data *data = &e->data;
239                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
240                            &data->bdaddr,
241                            data->pscan_rep_mode, data->pscan_period_mode,
242                            data->pscan_mode, data->dev_class[2],
243                            data->dev_class[1], data->dev_class[0],
244                            __le16_to_cpu(data->clock_offset),
245                            data->rssi, data->ssp_mode, e->timestamp);
246         }
247
248         hci_dev_unlock(hdev);
249
250         return 0;
251 }
252
253 static int inquiry_cache_open(struct inode *inode, struct file *file)
254 {
255         return single_open(file, inquiry_cache_show, inode->i_private);
256 }
257
258 static const struct file_operations inquiry_cache_fops = {
259         .open           = inquiry_cache_open,
260         .read           = seq_read,
261         .llseek         = seq_lseek,
262         .release        = single_release,
263 };
264
265 static int link_keys_show(struct seq_file *f, void *ptr)
266 {
267         struct hci_dev *hdev = f->private;
268         struct list_head *p, *n;
269
270         hci_dev_lock(hdev);
271         list_for_each_safe(p, n, &hdev->link_keys) {
272                 struct link_key *key = list_entry(p, struct link_key, list);
273                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
274                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
275         }
276         hci_dev_unlock(hdev);
277
278         return 0;
279 }
280
281 static int link_keys_open(struct inode *inode, struct file *file)
282 {
283         return single_open(file, link_keys_show, inode->i_private);
284 }
285
286 static const struct file_operations link_keys_fops = {
287         .open           = link_keys_open,
288         .read           = seq_read,
289         .llseek         = seq_lseek,
290         .release        = single_release,
291 };
292
293 static int dev_class_show(struct seq_file *f, void *ptr)
294 {
295         struct hci_dev *hdev = f->private;
296
297         hci_dev_lock(hdev);
298         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
299                    hdev->dev_class[1], hdev->dev_class[0]);
300         hci_dev_unlock(hdev);
301
302         return 0;
303 }
304
305 static int dev_class_open(struct inode *inode, struct file *file)
306 {
307         return single_open(file, dev_class_show, inode->i_private);
308 }
309
310 static const struct file_operations dev_class_fops = {
311         .open           = dev_class_open,
312         .read           = seq_read,
313         .llseek         = seq_lseek,
314         .release        = single_release,
315 };
316
317 static int voice_setting_get(void *data, u64 *val)
318 {
319         struct hci_dev *hdev = data;
320
321         hci_dev_lock(hdev);
322         *val = hdev->voice_setting;
323         hci_dev_unlock(hdev);
324
325         return 0;
326 }
327
328 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
329                         NULL, "0x%4.4llx\n");
330
331 static int auto_accept_delay_set(void *data, u64 val)
332 {
333         struct hci_dev *hdev = data;
334
335         hci_dev_lock(hdev);
336         hdev->auto_accept_delay = val;
337         hci_dev_unlock(hdev);
338
339         return 0;
340 }
341
342 static int auto_accept_delay_get(void *data, u64 *val)
343 {
344         struct hci_dev *hdev = data;
345
346         hci_dev_lock(hdev);
347         *val = hdev->auto_accept_delay;
348         hci_dev_unlock(hdev);
349
350         return 0;
351 }
352
353 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
354                         auto_accept_delay_set, "%llu\n");
355
356 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
357                                      size_t count, loff_t *ppos)
358 {
359         struct hci_dev *hdev = file->private_data;
360         char buf[3];
361
362         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
363         buf[1] = '\n';
364         buf[2] = '\0';
365         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
366 }
367
368 static ssize_t force_sc_support_write(struct file *file,
369                                       const char __user *user_buf,
370                                       size_t count, loff_t *ppos)
371 {
372         struct hci_dev *hdev = file->private_data;
373         char buf[32];
374         size_t buf_size = min(count, (sizeof(buf)-1));
375         bool enable;
376
377         if (test_bit(HCI_UP, &hdev->flags))
378                 return -EBUSY;
379
380         if (copy_from_user(buf, user_buf, buf_size))
381                 return -EFAULT;
382
383         buf[buf_size] = '\0';
384         if (strtobool(buf, &enable))
385                 return -EINVAL;
386
387         if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
388                 return -EALREADY;
389
390         change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
391
392         return count;
393 }
394
395 static const struct file_operations force_sc_support_fops = {
396         .open           = simple_open,
397         .read           = force_sc_support_read,
398         .write          = force_sc_support_write,
399         .llseek         = default_llseek,
400 };
401
402 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
403                                  size_t count, loff_t *ppos)
404 {
405         struct hci_dev *hdev = file->private_data;
406         char buf[3];
407
408         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
409         buf[1] = '\n';
410         buf[2] = '\0';
411         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
412 }
413
414 static const struct file_operations sc_only_mode_fops = {
415         .open           = simple_open,
416         .read           = sc_only_mode_read,
417         .llseek         = default_llseek,
418 };
419
420 static int idle_timeout_set(void *data, u64 val)
421 {
422         struct hci_dev *hdev = data;
423
424         if (val != 0 && (val < 500 || val > 3600000))
425                 return -EINVAL;
426
427         hci_dev_lock(hdev);
428         hdev->idle_timeout = val;
429         hci_dev_unlock(hdev);
430
431         return 0;
432 }
433
434 static int idle_timeout_get(void *data, u64 *val)
435 {
436         struct hci_dev *hdev = data;
437
438         hci_dev_lock(hdev);
439         *val = hdev->idle_timeout;
440         hci_dev_unlock(hdev);
441
442         return 0;
443 }
444
445 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
446                         idle_timeout_set, "%llu\n");
447
448 static int rpa_timeout_set(void *data, u64 val)
449 {
450         struct hci_dev *hdev = data;
451
452         /* Require the RPA timeout to be at least 30 seconds and at most
453          * 24 hours.
454          */
455         if (val < 30 || val > (60 * 60 * 24))
456                 return -EINVAL;
457
458         hci_dev_lock(hdev);
459         hdev->rpa_timeout = val;
460         hci_dev_unlock(hdev);
461
462         return 0;
463 }
464
465 static int rpa_timeout_get(void *data, u64 *val)
466 {
467         struct hci_dev *hdev = data;
468
469         hci_dev_lock(hdev);
470         *val = hdev->rpa_timeout;
471         hci_dev_unlock(hdev);
472
473         return 0;
474 }
475
476 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
477                         rpa_timeout_set, "%llu\n");
478
479 static int sniff_min_interval_set(void *data, u64 val)
480 {
481         struct hci_dev *hdev = data;
482
483         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
484                 return -EINVAL;
485
486         hci_dev_lock(hdev);
487         hdev->sniff_min_interval = val;
488         hci_dev_unlock(hdev);
489
490         return 0;
491 }
492
493 static int sniff_min_interval_get(void *data, u64 *val)
494 {
495         struct hci_dev *hdev = data;
496
497         hci_dev_lock(hdev);
498         *val = hdev->sniff_min_interval;
499         hci_dev_unlock(hdev);
500
501         return 0;
502 }
503
504 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
505                         sniff_min_interval_set, "%llu\n");
506
507 static int sniff_max_interval_set(void *data, u64 val)
508 {
509         struct hci_dev *hdev = data;
510
511         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
512                 return -EINVAL;
513
514         hci_dev_lock(hdev);
515         hdev->sniff_max_interval = val;
516         hci_dev_unlock(hdev);
517
518         return 0;
519 }
520
521 static int sniff_max_interval_get(void *data, u64 *val)
522 {
523         struct hci_dev *hdev = data;
524
525         hci_dev_lock(hdev);
526         *val = hdev->sniff_max_interval;
527         hci_dev_unlock(hdev);
528
529         return 0;
530 }
531
532 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
533                         sniff_max_interval_set, "%llu\n");
534
535 static int conn_info_min_age_set(void *data, u64 val)
536 {
537         struct hci_dev *hdev = data;
538
539         if (val == 0 || val > hdev->conn_info_max_age)
540                 return -EINVAL;
541
542         hci_dev_lock(hdev);
543         hdev->conn_info_min_age = val;
544         hci_dev_unlock(hdev);
545
546         return 0;
547 }
548
549 static int conn_info_min_age_get(void *data, u64 *val)
550 {
551         struct hci_dev *hdev = data;
552
553         hci_dev_lock(hdev);
554         *val = hdev->conn_info_min_age;
555         hci_dev_unlock(hdev);
556
557         return 0;
558 }
559
560 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
561                         conn_info_min_age_set, "%llu\n");
562
563 static int conn_info_max_age_set(void *data, u64 val)
564 {
565         struct hci_dev *hdev = data;
566
567         if (val == 0 || val < hdev->conn_info_min_age)
568                 return -EINVAL;
569
570         hci_dev_lock(hdev);
571         hdev->conn_info_max_age = val;
572         hci_dev_unlock(hdev);
573
574         return 0;
575 }
576
577 static int conn_info_max_age_get(void *data, u64 *val)
578 {
579         struct hci_dev *hdev = data;
580
581         hci_dev_lock(hdev);
582         *val = hdev->conn_info_max_age;
583         hci_dev_unlock(hdev);
584
585         return 0;
586 }
587
588 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
589                         conn_info_max_age_set, "%llu\n");
590
591 static int identity_show(struct seq_file *f, void *p)
592 {
593         struct hci_dev *hdev = f->private;
594         bdaddr_t addr;
595         u8 addr_type;
596
597         hci_dev_lock(hdev);
598
599         hci_copy_identity_address(hdev, &addr, &addr_type);
600
601         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
602                    16, hdev->irk, &hdev->rpa);
603
604         hci_dev_unlock(hdev);
605
606         return 0;
607 }
608
609 static int identity_open(struct inode *inode, struct file *file)
610 {
611         return single_open(file, identity_show, inode->i_private);
612 }
613
614 static const struct file_operations identity_fops = {
615         .open           = identity_open,
616         .read           = seq_read,
617         .llseek         = seq_lseek,
618         .release        = single_release,
619 };
620
621 static int random_address_show(struct seq_file *f, void *p)
622 {
623         struct hci_dev *hdev = f->private;
624
625         hci_dev_lock(hdev);
626         seq_printf(f, "%pMR\n", &hdev->random_addr);
627         hci_dev_unlock(hdev);
628
629         return 0;
630 }
631
632 static int random_address_open(struct inode *inode, struct file *file)
633 {
634         return single_open(file, random_address_show, inode->i_private);
635 }
636
637 static const struct file_operations random_address_fops = {
638         .open           = random_address_open,
639         .read           = seq_read,
640         .llseek         = seq_lseek,
641         .release        = single_release,
642 };
643
644 static int static_address_show(struct seq_file *f, void *p)
645 {
646         struct hci_dev *hdev = f->private;
647
648         hci_dev_lock(hdev);
649         seq_printf(f, "%pMR\n", &hdev->static_addr);
650         hci_dev_unlock(hdev);
651
652         return 0;
653 }
654
655 static int static_address_open(struct inode *inode, struct file *file)
656 {
657         return single_open(file, static_address_show, inode->i_private);
658 }
659
660 static const struct file_operations static_address_fops = {
661         .open           = static_address_open,
662         .read           = seq_read,
663         .llseek         = seq_lseek,
664         .release        = single_release,
665 };
666
667 static ssize_t force_static_address_read(struct file *file,
668                                          char __user *user_buf,
669                                          size_t count, loff_t *ppos)
670 {
671         struct hci_dev *hdev = file->private_data;
672         char buf[3];
673
674         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
675         buf[1] = '\n';
676         buf[2] = '\0';
677         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
678 }
679
680 static ssize_t force_static_address_write(struct file *file,
681                                           const char __user *user_buf,
682                                           size_t count, loff_t *ppos)
683 {
684         struct hci_dev *hdev = file->private_data;
685         char buf[32];
686         size_t buf_size = min(count, (sizeof(buf)-1));
687         bool enable;
688
689         if (test_bit(HCI_UP, &hdev->flags))
690                 return -EBUSY;
691
692         if (copy_from_user(buf, user_buf, buf_size))
693                 return -EFAULT;
694
695         buf[buf_size] = '\0';
696         if (strtobool(buf, &enable))
697                 return -EINVAL;
698
699         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
700                 return -EALREADY;
701
702         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
703
704         return count;
705 }
706
707 static const struct file_operations force_static_address_fops = {
708         .open           = simple_open,
709         .read           = force_static_address_read,
710         .write          = force_static_address_write,
711         .llseek         = default_llseek,
712 };
713
714 static int white_list_show(struct seq_file *f, void *ptr)
715 {
716         struct hci_dev *hdev = f->private;
717         struct bdaddr_list *b;
718
719         hci_dev_lock(hdev);
720         list_for_each_entry(b, &hdev->le_white_list, list)
721                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
722         hci_dev_unlock(hdev);
723
724         return 0;
725 }
726
727 static int white_list_open(struct inode *inode, struct file *file)
728 {
729         return single_open(file, white_list_show, inode->i_private);
730 }
731
732 static const struct file_operations white_list_fops = {
733         .open           = white_list_open,
734         .read           = seq_read,
735         .llseek         = seq_lseek,
736         .release        = single_release,
737 };
738
739 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
740 {
741         struct hci_dev *hdev = f->private;
742         struct list_head *p, *n;
743
744         hci_dev_lock(hdev);
745         list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
746                 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
747                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
748                            &irk->bdaddr, irk->addr_type,
749                            16, irk->val, &irk->rpa);
750         }
751         hci_dev_unlock(hdev);
752
753         return 0;
754 }
755
756 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
757 {
758         return single_open(file, identity_resolving_keys_show,
759                            inode->i_private);
760 }
761
762 static const struct file_operations identity_resolving_keys_fops = {
763         .open           = identity_resolving_keys_open,
764         .read           = seq_read,
765         .llseek         = seq_lseek,
766         .release        = single_release,
767 };
768
769 static int long_term_keys_show(struct seq_file *f, void *ptr)
770 {
771         struct hci_dev *hdev = f->private;
772         struct list_head *p, *n;
773
774         hci_dev_lock(hdev);
775         list_for_each_safe(p, n, &hdev->long_term_keys) {
776                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
777                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
778                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
779                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
780                            __le64_to_cpu(ltk->rand), 16, ltk->val);
781         }
782         hci_dev_unlock(hdev);
783
784         return 0;
785 }
786
787 static int long_term_keys_open(struct inode *inode, struct file *file)
788 {
789         return single_open(file, long_term_keys_show, inode->i_private);
790 }
791
792 static const struct file_operations long_term_keys_fops = {
793         .open           = long_term_keys_open,
794         .read           = seq_read,
795         .llseek         = seq_lseek,
796         .release        = single_release,
797 };
798
799 static int conn_min_interval_set(void *data, u64 val)
800 {
801         struct hci_dev *hdev = data;
802
803         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
804                 return -EINVAL;
805
806         hci_dev_lock(hdev);
807         hdev->le_conn_min_interval = val;
808         hci_dev_unlock(hdev);
809
810         return 0;
811 }
812
813 static int conn_min_interval_get(void *data, u64 *val)
814 {
815         struct hci_dev *hdev = data;
816
817         hci_dev_lock(hdev);
818         *val = hdev->le_conn_min_interval;
819         hci_dev_unlock(hdev);
820
821         return 0;
822 }
823
824 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
825                         conn_min_interval_set, "%llu\n");
826
827 static int conn_max_interval_set(void *data, u64 val)
828 {
829         struct hci_dev *hdev = data;
830
831         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
832                 return -EINVAL;
833
834         hci_dev_lock(hdev);
835         hdev->le_conn_max_interval = val;
836         hci_dev_unlock(hdev);
837
838         return 0;
839 }
840
841 static int conn_max_interval_get(void *data, u64 *val)
842 {
843         struct hci_dev *hdev = data;
844
845         hci_dev_lock(hdev);
846         *val = hdev->le_conn_max_interval;
847         hci_dev_unlock(hdev);
848
849         return 0;
850 }
851
852 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
853                         conn_max_interval_set, "%llu\n");
854
855 static int conn_latency_set(void *data, u64 val)
856 {
857         struct hci_dev *hdev = data;
858
859         if (val > 0x01f3)
860                 return -EINVAL;
861
862         hci_dev_lock(hdev);
863         hdev->le_conn_latency = val;
864         hci_dev_unlock(hdev);
865
866         return 0;
867 }
868
869 static int conn_latency_get(void *data, u64 *val)
870 {
871         struct hci_dev *hdev = data;
872
873         hci_dev_lock(hdev);
874         *val = hdev->le_conn_latency;
875         hci_dev_unlock(hdev);
876
877         return 0;
878 }
879
880 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
881                         conn_latency_set, "%llu\n");
882
883 static int supervision_timeout_set(void *data, u64 val)
884 {
885         struct hci_dev *hdev = data;
886
887         if (val < 0x000a || val > 0x0c80)
888                 return -EINVAL;
889
890         hci_dev_lock(hdev);
891         hdev->le_supv_timeout = val;
892         hci_dev_unlock(hdev);
893
894         return 0;
895 }
896
897 static int supervision_timeout_get(void *data, u64 *val)
898 {
899         struct hci_dev *hdev = data;
900
901         hci_dev_lock(hdev);
902         *val = hdev->le_supv_timeout;
903         hci_dev_unlock(hdev);
904
905         return 0;
906 }
907
908 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
909                         supervision_timeout_set, "%llu\n");
910
911 static int adv_channel_map_set(void *data, u64 val)
912 {
913         struct hci_dev *hdev = data;
914
915         if (val < 0x01 || val > 0x07)
916                 return -EINVAL;
917
918         hci_dev_lock(hdev);
919         hdev->le_adv_channel_map = val;
920         hci_dev_unlock(hdev);
921
922         return 0;
923 }
924
925 static int adv_channel_map_get(void *data, u64 *val)
926 {
927         struct hci_dev *hdev = data;
928
929         hci_dev_lock(hdev);
930         *val = hdev->le_adv_channel_map;
931         hci_dev_unlock(hdev);
932
933         return 0;
934 }
935
936 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
937                         adv_channel_map_set, "%llu\n");
938
939 static int device_list_show(struct seq_file *f, void *ptr)
940 {
941         struct hci_dev *hdev = f->private;
942         struct hci_conn_params *p;
943
944         hci_dev_lock(hdev);
945         list_for_each_entry(p, &hdev->le_conn_params, list) {
946                 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
947                            p->auto_connect);
948         }
949         hci_dev_unlock(hdev);
950
951         return 0;
952 }
953
954 static int device_list_open(struct inode *inode, struct file *file)
955 {
956         return single_open(file, device_list_show, inode->i_private);
957 }
958
959 static const struct file_operations device_list_fops = {
960         .open           = device_list_open,
961         .read           = seq_read,
962         .llseek         = seq_lseek,
963         .release        = single_release,
964 };
965
966 /* ---- HCI requests ---- */
967
968 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
969 {
970         BT_DBG("%s result 0x%2.2x", hdev->name, result);
971
972         if (hdev->req_status == HCI_REQ_PEND) {
973                 hdev->req_result = result;
974                 hdev->req_status = HCI_REQ_DONE;
975                 wake_up_interruptible(&hdev->req_wait_q);
976         }
977 }
978
979 static void hci_req_cancel(struct hci_dev *hdev, int err)
980 {
981         BT_DBG("%s err 0x%2.2x", hdev->name, err);
982
983         if (hdev->req_status == HCI_REQ_PEND) {
984                 hdev->req_result = err;
985                 hdev->req_status = HCI_REQ_CANCELED;
986                 wake_up_interruptible(&hdev->req_wait_q);
987         }
988 }
989
990 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
991                                             u8 event)
992 {
993         struct hci_ev_cmd_complete *ev;
994         struct hci_event_hdr *hdr;
995         struct sk_buff *skb;
996
997         hci_dev_lock(hdev);
998
999         skb = hdev->recv_evt;
1000         hdev->recv_evt = NULL;
1001
1002         hci_dev_unlock(hdev);
1003
1004         if (!skb)
1005                 return ERR_PTR(-ENODATA);
1006
1007         if (skb->len < sizeof(*hdr)) {
1008                 BT_ERR("Too short HCI event");
1009                 goto failed;
1010         }
1011
1012         hdr = (void *) skb->data;
1013         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1014
1015         if (event) {
1016                 if (hdr->evt != event)
1017                         goto failed;
1018                 return skb;
1019         }
1020
1021         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1022                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1023                 goto failed;
1024         }
1025
1026         if (skb->len < sizeof(*ev)) {
1027                 BT_ERR("Too short cmd_complete event");
1028                 goto failed;
1029         }
1030
1031         ev = (void *) skb->data;
1032         skb_pull(skb, sizeof(*ev));
1033
1034         if (opcode == __le16_to_cpu(ev->opcode))
1035                 return skb;
1036
1037         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1038                __le16_to_cpu(ev->opcode));
1039
1040 failed:
1041         kfree_skb(skb);
1042         return ERR_PTR(-ENODATA);
1043 }
1044
1045 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1046                                   const void *param, u8 event, u32 timeout)
1047 {
1048         DECLARE_WAITQUEUE(wait, current);
1049         struct hci_request req;
1050         int err = 0;
1051
1052         BT_DBG("%s", hdev->name);
1053
1054         hci_req_init(&req, hdev);
1055
1056         hci_req_add_ev(&req, opcode, plen, param, event);
1057
1058         hdev->req_status = HCI_REQ_PEND;
1059
1060         err = hci_req_run(&req, hci_req_sync_complete);
1061         if (err < 0)
1062                 return ERR_PTR(err);
1063
1064         add_wait_queue(&hdev->req_wait_q, &wait);
1065         set_current_state(TASK_INTERRUPTIBLE);
1066
1067         schedule_timeout(timeout);
1068
1069         remove_wait_queue(&hdev->req_wait_q, &wait);
1070
1071         if (signal_pending(current))
1072                 return ERR_PTR(-EINTR);
1073
1074         switch (hdev->req_status) {
1075         case HCI_REQ_DONE:
1076                 err = -bt_to_errno(hdev->req_result);
1077                 break;
1078
1079         case HCI_REQ_CANCELED:
1080                 err = -hdev->req_result;
1081                 break;
1082
1083         default:
1084                 err = -ETIMEDOUT;
1085                 break;
1086         }
1087
1088         hdev->req_status = hdev->req_result = 0;
1089
1090         BT_DBG("%s end: err %d", hdev->name, err);
1091
1092         if (err < 0)
1093                 return ERR_PTR(err);
1094
1095         return hci_get_cmd_complete(hdev, opcode, event);
1096 }
1097 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1098
1099 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1100                                const void *param, u32 timeout)
1101 {
1102         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1103 }
1104 EXPORT_SYMBOL(__hci_cmd_sync);
1105
1106 /* Execute request and wait for completion. */
1107 static int __hci_req_sync(struct hci_dev *hdev,
1108                           void (*func)(struct hci_request *req,
1109                                       unsigned long opt),
1110                           unsigned long opt, __u32 timeout)
1111 {
1112         struct hci_request req;
1113         DECLARE_WAITQUEUE(wait, current);
1114         int err = 0;
1115
1116         BT_DBG("%s start", hdev->name);
1117
1118         hci_req_init(&req, hdev);
1119
1120         hdev->req_status = HCI_REQ_PEND;
1121
1122         func(&req, opt);
1123
1124         err = hci_req_run(&req, hci_req_sync_complete);
1125         if (err < 0) {
1126                 hdev->req_status = 0;
1127
1128                 /* ENODATA means the HCI request command queue is empty.
1129                  * This can happen when a request with conditionals doesn't
1130                  * trigger any commands to be sent. This is normal behavior
1131                  * and should not trigger an error return.
1132                  */
1133                 if (err == -ENODATA)
1134                         return 0;
1135
1136                 return err;
1137         }
1138
1139         add_wait_queue(&hdev->req_wait_q, &wait);
1140         set_current_state(TASK_INTERRUPTIBLE);
1141
1142         schedule_timeout(timeout);
1143
1144         remove_wait_queue(&hdev->req_wait_q, &wait);
1145
1146         if (signal_pending(current))
1147                 return -EINTR;
1148
1149         switch (hdev->req_status) {
1150         case HCI_REQ_DONE:
1151                 err = -bt_to_errno(hdev->req_result);
1152                 break;
1153
1154         case HCI_REQ_CANCELED:
1155                 err = -hdev->req_result;
1156                 break;
1157
1158         default:
1159                 err = -ETIMEDOUT;
1160                 break;
1161         }
1162
1163         hdev->req_status = hdev->req_result = 0;
1164
1165         BT_DBG("%s end: err %d", hdev->name, err);
1166
1167         return err;
1168 }
1169
1170 static int hci_req_sync(struct hci_dev *hdev,
1171                         void (*req)(struct hci_request *req,
1172                                     unsigned long opt),
1173                         unsigned long opt, __u32 timeout)
1174 {
1175         int ret;
1176
1177         if (!test_bit(HCI_UP, &hdev->flags))
1178                 return -ENETDOWN;
1179
1180         /* Serialize all requests */
1181         hci_req_lock(hdev);
1182         ret = __hci_req_sync(hdev, req, opt, timeout);
1183         hci_req_unlock(hdev);
1184
1185         return ret;
1186 }
1187
1188 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1189 {
1190         BT_DBG("%s %ld", req->hdev->name, opt);
1191
1192         /* Reset device */
1193         set_bit(HCI_RESET, &req->hdev->flags);
1194         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1195 }
1196
1197 static void bredr_init(struct hci_request *req)
1198 {
1199         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1200
1201         /* Read Local Supported Features */
1202         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1203
1204         /* Read Local Version */
1205         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1206
1207         /* Read BD Address */
1208         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1209 }
1210
1211 static void amp_init(struct hci_request *req)
1212 {
1213         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1214
1215         /* Read Local Version */
1216         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1217
1218         /* Read Local Supported Commands */
1219         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1220
1221         /* Read Local Supported Features */
1222         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1223
1224         /* Read Local AMP Info */
1225         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1226
1227         /* Read Data Blk size */
1228         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1229
1230         /* Read Flow Control Mode */
1231         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1232
1233         /* Read Location Data */
1234         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1235 }
1236
1237 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1238 {
1239         struct hci_dev *hdev = req->hdev;
1240
1241         BT_DBG("%s %ld", hdev->name, opt);
1242
1243         /* Reset */
1244         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1245                 hci_reset_req(req, 0);
1246
1247         switch (hdev->dev_type) {
1248         case HCI_BREDR:
1249                 bredr_init(req);
1250                 break;
1251
1252         case HCI_AMP:
1253                 amp_init(req);
1254                 break;
1255
1256         default:
1257                 BT_ERR("Unknown device type %d", hdev->dev_type);
1258                 break;
1259         }
1260 }
1261
1262 static void bredr_setup(struct hci_request *req)
1263 {
1264         struct hci_dev *hdev = req->hdev;
1265
1266         __le16 param;
1267         __u8 flt_type;
1268
1269         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1270         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1271
1272         /* Read Class of Device */
1273         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1274
1275         /* Read Local Name */
1276         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1277
1278         /* Read Voice Setting */
1279         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1280
1281         /* Read Number of Supported IAC */
1282         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1283
1284         /* Read Current IAC LAP */
1285         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1286
1287         /* Clear Event Filters */
1288         flt_type = HCI_FLT_CLEAR_ALL;
1289         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1290
1291         /* Connection accept timeout ~20 secs */
1292         param = cpu_to_le16(0x7d00);
1293         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1294
1295         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1296          * but it does not support page scan related HCI commands.
1297          */
1298         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1299                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1300                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1301         }
1302 }
1303
1304 static void le_setup(struct hci_request *req)
1305 {
1306         struct hci_dev *hdev = req->hdev;
1307
1308         /* Read LE Buffer Size */
1309         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1310
1311         /* Read LE Local Supported Features */
1312         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1313
1314         /* Read LE Supported States */
1315         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1316
1317         /* Read LE Advertising Channel TX Power */
1318         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1319
1320         /* Read LE White List Size */
1321         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1322
1323         /* Clear LE White List */
1324         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1325
1326         /* LE-only controllers have LE implicitly enabled */
1327         if (!lmp_bredr_capable(hdev))
1328                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1329 }
1330
1331 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1332 {
1333         if (lmp_ext_inq_capable(hdev))
1334                 return 0x02;
1335
1336         if (lmp_inq_rssi_capable(hdev))
1337                 return 0x01;
1338
1339         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1340             hdev->lmp_subver == 0x0757)
1341                 return 0x01;
1342
1343         if (hdev->manufacturer == 15) {
1344                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1345                         return 0x01;
1346                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1347                         return 0x01;
1348                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1349                         return 0x01;
1350         }
1351
1352         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1353             hdev->lmp_subver == 0x1805)
1354                 return 0x01;
1355
1356         return 0x00;
1357 }
1358
1359 static void hci_setup_inquiry_mode(struct hci_request *req)
1360 {
1361         u8 mode;
1362
1363         mode = hci_get_inquiry_mode(req->hdev);
1364
1365         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1366 }
1367
1368 static void hci_setup_event_mask(struct hci_request *req)
1369 {
1370         struct hci_dev *hdev = req->hdev;
1371
1372         /* The second byte is 0xff instead of 0x9f (two reserved bits
1373          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1374          * command otherwise.
1375          */
1376         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1377
1378         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1379          * any event mask for pre 1.2 devices.
1380          */
1381         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1382                 return;
1383
1384         if (lmp_bredr_capable(hdev)) {
1385                 events[4] |= 0x01; /* Flow Specification Complete */
1386                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1387                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1388                 events[5] |= 0x08; /* Synchronous Connection Complete */
1389                 events[5] |= 0x10; /* Synchronous Connection Changed */
1390         } else {
1391                 /* Use a different default for LE-only devices */
1392                 memset(events, 0, sizeof(events));
1393                 events[0] |= 0x10; /* Disconnection Complete */
1394                 events[0] |= 0x80; /* Encryption Change */
1395                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1396                 events[1] |= 0x20; /* Command Complete */
1397                 events[1] |= 0x40; /* Command Status */
1398                 events[1] |= 0x80; /* Hardware Error */
1399                 events[2] |= 0x04; /* Number of Completed Packets */
1400                 events[3] |= 0x02; /* Data Buffer Overflow */
1401                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1402         }
1403
1404         if (lmp_inq_rssi_capable(hdev))
1405                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1406
1407         if (lmp_sniffsubr_capable(hdev))
1408                 events[5] |= 0x20; /* Sniff Subrating */
1409
1410         if (lmp_pause_enc_capable(hdev))
1411                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1412
1413         if (lmp_ext_inq_capable(hdev))
1414                 events[5] |= 0x40; /* Extended Inquiry Result */
1415
1416         if (lmp_no_flush_capable(hdev))
1417                 events[7] |= 0x01; /* Enhanced Flush Complete */
1418
1419         if (lmp_lsto_capable(hdev))
1420                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1421
1422         if (lmp_ssp_capable(hdev)) {
1423                 events[6] |= 0x01;      /* IO Capability Request */
1424                 events[6] |= 0x02;      /* IO Capability Response */
1425                 events[6] |= 0x04;      /* User Confirmation Request */
1426                 events[6] |= 0x08;      /* User Passkey Request */
1427                 events[6] |= 0x10;      /* Remote OOB Data Request */
1428                 events[6] |= 0x20;      /* Simple Pairing Complete */
1429                 events[7] |= 0x04;      /* User Passkey Notification */
1430                 events[7] |= 0x08;      /* Keypress Notification */
1431                 events[7] |= 0x10;      /* Remote Host Supported
1432                                          * Features Notification
1433                                          */
1434         }
1435
1436         if (lmp_le_capable(hdev))
1437                 events[7] |= 0x20;      /* LE Meta-Event */
1438
1439         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1440 }
1441
1442 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1443 {
1444         struct hci_dev *hdev = req->hdev;
1445
1446         if (lmp_bredr_capable(hdev))
1447                 bredr_setup(req);
1448         else
1449                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1450
1451         if (lmp_le_capable(hdev))
1452                 le_setup(req);
1453
1454         hci_setup_event_mask(req);
1455
1456         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1457          * local supported commands HCI command.
1458          */
1459         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1460                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1461
1462         if (lmp_ssp_capable(hdev)) {
1463                 /* When SSP is available, then the host features page
1464                  * should also be available as well. However some
1465                  * controllers list the max_page as 0 as long as SSP
1466                  * has not been enabled. To achieve proper debugging
1467                  * output, force the minimum max_page to 1 at least.
1468                  */
1469                 hdev->max_page = 0x01;
1470
1471                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1472                         u8 mode = 0x01;
1473                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1474                                     sizeof(mode), &mode);
1475                 } else {
1476                         struct hci_cp_write_eir cp;
1477
1478                         memset(hdev->eir, 0, sizeof(hdev->eir));
1479                         memset(&cp, 0, sizeof(cp));
1480
1481                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1482                 }
1483         }
1484
1485         if (lmp_inq_rssi_capable(hdev))
1486                 hci_setup_inquiry_mode(req);
1487
1488         if (lmp_inq_tx_pwr_capable(hdev))
1489                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1490
1491         if (lmp_ext_feat_capable(hdev)) {
1492                 struct hci_cp_read_local_ext_features cp;
1493
1494                 cp.page = 0x01;
1495                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1496                             sizeof(cp), &cp);
1497         }
1498
1499         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1500                 u8 enable = 1;
1501                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1502                             &enable);
1503         }
1504 }
1505
1506 static void hci_setup_link_policy(struct hci_request *req)
1507 {
1508         struct hci_dev *hdev = req->hdev;
1509         struct hci_cp_write_def_link_policy cp;
1510         u16 link_policy = 0;
1511
1512         if (lmp_rswitch_capable(hdev))
1513                 link_policy |= HCI_LP_RSWITCH;
1514         if (lmp_hold_capable(hdev))
1515                 link_policy |= HCI_LP_HOLD;
1516         if (lmp_sniff_capable(hdev))
1517                 link_policy |= HCI_LP_SNIFF;
1518         if (lmp_park_capable(hdev))
1519                 link_policy |= HCI_LP_PARK;
1520
1521         cp.policy = cpu_to_le16(link_policy);
1522         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1523 }
1524
1525 static void hci_set_le_support(struct hci_request *req)
1526 {
1527         struct hci_dev *hdev = req->hdev;
1528         struct hci_cp_write_le_host_supported cp;
1529
1530         /* LE-only devices do not support explicit enablement */
1531         if (!lmp_bredr_capable(hdev))
1532                 return;
1533
1534         memset(&cp, 0, sizeof(cp));
1535
1536         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1537                 cp.le = 0x01;
1538                 cp.simul = lmp_le_br_capable(hdev);
1539         }
1540
1541         if (cp.le != lmp_host_le_capable(hdev))
1542                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1543                             &cp);
1544 }
1545
1546 static void hci_set_event_mask_page_2(struct hci_request *req)
1547 {
1548         struct hci_dev *hdev = req->hdev;
1549         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1550
1551         /* If Connectionless Slave Broadcast master role is supported
1552          * enable all necessary events for it.
1553          */
1554         if (lmp_csb_master_capable(hdev)) {
1555                 events[1] |= 0x40;      /* Triggered Clock Capture */
1556                 events[1] |= 0x80;      /* Synchronization Train Complete */
1557                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1558                 events[2] |= 0x20;      /* CSB Channel Map Change */
1559         }
1560
1561         /* If Connectionless Slave Broadcast slave role is supported
1562          * enable all necessary events for it.
1563          */
1564         if (lmp_csb_slave_capable(hdev)) {
1565                 events[2] |= 0x01;      /* Synchronization Train Received */
1566                 events[2] |= 0x02;      /* CSB Receive */
1567                 events[2] |= 0x04;      /* CSB Timeout */
1568                 events[2] |= 0x08;      /* Truncated Page Complete */
1569         }
1570
1571         /* Enable Authenticated Payload Timeout Expired event if supported */
1572         if (lmp_ping_capable(hdev))
1573                 events[2] |= 0x80;
1574
1575         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1576 }
1577
1578 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1579 {
1580         struct hci_dev *hdev = req->hdev;
1581         u8 p;
1582
1583         /* Some Broadcom based Bluetooth controllers do not support the
1584          * Delete Stored Link Key command. They are clearly indicating its
1585          * absence in the bit mask of supported commands.
1586          *
1587          * Check the supported commands and only if the the command is marked
1588          * as supported send it. If not supported assume that the controller
1589          * does not have actual support for stored link keys which makes this
1590          * command redundant anyway.
1591          *
1592          * Some controllers indicate that they support handling deleting
1593          * stored link keys, but they don't. The quirk lets a driver
1594          * just disable this command.
1595          */
1596         if (hdev->commands[6] & 0x80 &&
1597             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1598                 struct hci_cp_delete_stored_link_key cp;
1599
1600                 bacpy(&cp.bdaddr, BDADDR_ANY);
1601                 cp.delete_all = 0x01;
1602                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1603                             sizeof(cp), &cp);
1604         }
1605
1606         if (hdev->commands[5] & 0x10)
1607                 hci_setup_link_policy(req);
1608
1609         if (lmp_le_capable(hdev)) {
1610                 u8 events[8];
1611
1612                 memset(events, 0, sizeof(events));
1613                 events[0] = 0x1f;
1614
1615                 /* If controller supports the Connection Parameters Request
1616                  * Link Layer Procedure, enable the corresponding event.
1617                  */
1618                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1619                         events[0] |= 0x20;      /* LE Remote Connection
1620                                                  * Parameter Request
1621                                                  */
1622
1623                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1624                             events);
1625
1626                 hci_set_le_support(req);
1627         }
1628
1629         /* Read features beyond page 1 if available */
1630         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1631                 struct hci_cp_read_local_ext_features cp;
1632
1633                 cp.page = p;
1634                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1635                             sizeof(cp), &cp);
1636         }
1637 }
1638
1639 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1640 {
1641         struct hci_dev *hdev = req->hdev;
1642
1643         /* Set event mask page 2 if the HCI command for it is supported */
1644         if (hdev->commands[22] & 0x04)
1645                 hci_set_event_mask_page_2(req);
1646
1647         /* Check for Synchronization Train support */
1648         if (lmp_sync_train_capable(hdev))
1649                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1650
1651         /* Enable Secure Connections if supported and configured */
1652         if ((lmp_sc_capable(hdev) ||
1653              test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1654             test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1655                 u8 support = 0x01;
1656                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1657                             sizeof(support), &support);
1658         }
1659 }
1660
1661 static int __hci_init(struct hci_dev *hdev)
1662 {
1663         int err;
1664
1665         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1666         if (err < 0)
1667                 return err;
1668
1669         /* The Device Under Test (DUT) mode is special and available for
1670          * all controller types. So just create it early on.
1671          */
1672         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1673                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1674                                     &dut_mode_fops);
1675         }
1676
1677         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1678          * BR/EDR/LE type controllers. AMP controllers only need the
1679          * first stage init.
1680          */
1681         if (hdev->dev_type != HCI_BREDR)
1682                 return 0;
1683
1684         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1685         if (err < 0)
1686                 return err;
1687
1688         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1689         if (err < 0)
1690                 return err;
1691
1692         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1693         if (err < 0)
1694                 return err;
1695
1696         /* Only create debugfs entries during the initial setup
1697          * phase and not every time the controller gets powered on.
1698          */
1699         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1700                 return 0;
1701
1702         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1703                             &features_fops);
1704         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1705                            &hdev->manufacturer);
1706         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1707         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1708         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1709                             &blacklist_fops);
1710         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1711
1712         debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1713                             &conn_info_min_age_fops);
1714         debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1715                             &conn_info_max_age_fops);
1716
1717         if (lmp_bredr_capable(hdev)) {
1718                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1719                                     hdev, &inquiry_cache_fops);
1720                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1721                                     hdev, &link_keys_fops);
1722                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1723                                     hdev, &dev_class_fops);
1724                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1725                                     hdev, &voice_setting_fops);
1726         }
1727
1728         if (lmp_ssp_capable(hdev)) {
1729                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1730                                     hdev, &auto_accept_delay_fops);
1731                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1732                                     hdev, &force_sc_support_fops);
1733                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1734                                     hdev, &sc_only_mode_fops);
1735         }
1736
1737         if (lmp_sniff_capable(hdev)) {
1738                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1739                                     hdev, &idle_timeout_fops);
1740                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1741                                     hdev, &sniff_min_interval_fops);
1742                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1743                                     hdev, &sniff_max_interval_fops);
1744         }
1745
1746         if (lmp_le_capable(hdev)) {
1747                 debugfs_create_file("identity", 0400, hdev->debugfs,
1748                                     hdev, &identity_fops);
1749                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1750                                     hdev, &rpa_timeout_fops);
1751                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1752                                     hdev, &random_address_fops);
1753                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1754                                     hdev, &static_address_fops);
1755
1756                 /* For controllers with a public address, provide a debug
1757                  * option to force the usage of the configured static
1758                  * address. By default the public address is used.
1759                  */
1760                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1761                         debugfs_create_file("force_static_address", 0644,
1762                                             hdev->debugfs, hdev,
1763                                             &force_static_address_fops);
1764
1765                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1766                                   &hdev->le_white_list_size);
1767                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1768                                     &white_list_fops);
1769                 debugfs_create_file("identity_resolving_keys", 0400,
1770                                     hdev->debugfs, hdev,
1771                                     &identity_resolving_keys_fops);
1772                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1773                                     hdev, &long_term_keys_fops);
1774                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1775                                     hdev, &conn_min_interval_fops);
1776                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1777                                     hdev, &conn_max_interval_fops);
1778                 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1779                                     hdev, &conn_latency_fops);
1780                 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1781                                     hdev, &supervision_timeout_fops);
1782                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1783                                     hdev, &adv_channel_map_fops);
1784                 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1785                                     &device_list_fops);
1786                 debugfs_create_u16("discov_interleaved_timeout", 0644,
1787                                    hdev->debugfs,
1788                                    &hdev->discov_interleaved_timeout);
1789         }
1790
1791         return 0;
1792 }
1793
1794 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1795 {
1796         struct hci_dev *hdev = req->hdev;
1797
1798         BT_DBG("%s %ld", hdev->name, opt);
1799
1800         /* Reset */
1801         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1802                 hci_reset_req(req, 0);
1803
1804         /* Read Local Version */
1805         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1806
1807         /* Read BD Address */
1808         if (hdev->set_bdaddr)
1809                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1810 }
1811
1812 static int __hci_unconf_init(struct hci_dev *hdev)
1813 {
1814         int err;
1815
1816         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1817         if (err < 0)
1818                 return err;
1819
1820         return 0;
1821 }
1822
1823 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1824 {
1825         __u8 scan = opt;
1826
1827         BT_DBG("%s %x", req->hdev->name, scan);
1828
1829         /* Inquiry and Page scans */
1830         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1831 }
1832
1833 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1834 {
1835         __u8 auth = opt;
1836
1837         BT_DBG("%s %x", req->hdev->name, auth);
1838
1839         /* Authentication */
1840         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1841 }
1842
1843 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1844 {
1845         __u8 encrypt = opt;
1846
1847         BT_DBG("%s %x", req->hdev->name, encrypt);
1848
1849         /* Encryption */
1850         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1851 }
1852
1853 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1854 {
1855         __le16 policy = cpu_to_le16(opt);
1856
1857         BT_DBG("%s %x", req->hdev->name, policy);
1858
1859         /* Default link policy */
1860         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1861 }
1862
1863 /* Get HCI device by index.
1864  * Device is held on return. */
1865 struct hci_dev *hci_dev_get(int index)
1866 {
1867         struct hci_dev *hdev = NULL, *d;
1868
1869         BT_DBG("%d", index);
1870
1871         if (index < 0)
1872                 return NULL;
1873
1874         read_lock(&hci_dev_list_lock);
1875         list_for_each_entry(d, &hci_dev_list, list) {
1876                 if (d->id == index) {
1877                         hdev = hci_dev_hold(d);
1878                         break;
1879                 }
1880         }
1881         read_unlock(&hci_dev_list_lock);
1882         return hdev;
1883 }
1884
1885 /* ---- Inquiry support ---- */
1886
1887 bool hci_discovery_active(struct hci_dev *hdev)
1888 {
1889         struct discovery_state *discov = &hdev->discovery;
1890
1891         switch (discov->state) {
1892         case DISCOVERY_FINDING:
1893         case DISCOVERY_RESOLVING:
1894                 return true;
1895
1896         default:
1897                 return false;
1898         }
1899 }
1900
1901 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1902 {
1903         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1904
1905         if (hdev->discovery.state == state)
1906                 return;
1907
1908         switch (state) {
1909         case DISCOVERY_STOPPED:
1910                 hci_update_background_scan(hdev);
1911
1912                 if (hdev->discovery.state != DISCOVERY_STARTING)
1913                         mgmt_discovering(hdev, 0);
1914                 break;
1915         case DISCOVERY_STARTING:
1916                 break;
1917         case DISCOVERY_FINDING:
1918                 mgmt_discovering(hdev, 1);
1919                 break;
1920         case DISCOVERY_RESOLVING:
1921                 break;
1922         case DISCOVERY_STOPPING:
1923                 break;
1924         }
1925
1926         hdev->discovery.state = state;
1927 }
1928
1929 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1930 {
1931         struct discovery_state *cache = &hdev->discovery;
1932         struct inquiry_entry *p, *n;
1933
1934         list_for_each_entry_safe(p, n, &cache->all, all) {
1935                 list_del(&p->all);
1936                 kfree(p);
1937         }
1938
1939         INIT_LIST_HEAD(&cache->unknown);
1940         INIT_LIST_HEAD(&cache->resolve);
1941 }
1942
1943 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1944                                                bdaddr_t *bdaddr)
1945 {
1946         struct discovery_state *cache = &hdev->discovery;
1947         struct inquiry_entry *e;
1948
1949         BT_DBG("cache %p, %pMR", cache, bdaddr);
1950
1951         list_for_each_entry(e, &cache->all, all) {
1952                 if (!bacmp(&e->data.bdaddr, bdaddr))
1953                         return e;
1954         }
1955
1956         return NULL;
1957 }
1958
1959 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1960                                                        bdaddr_t *bdaddr)
1961 {
1962         struct discovery_state *cache = &hdev->discovery;
1963         struct inquiry_entry *e;
1964
1965         BT_DBG("cache %p, %pMR", cache, bdaddr);
1966
1967         list_for_each_entry(e, &cache->unknown, list) {
1968                 if (!bacmp(&e->data.bdaddr, bdaddr))
1969                         return e;
1970         }
1971
1972         return NULL;
1973 }
1974
1975 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1976                                                        bdaddr_t *bdaddr,
1977                                                        int state)
1978 {
1979         struct discovery_state *cache = &hdev->discovery;
1980         struct inquiry_entry *e;
1981
1982         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1983
1984         list_for_each_entry(e, &cache->resolve, list) {
1985                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1986                         return e;
1987                 if (!bacmp(&e->data.bdaddr, bdaddr))
1988                         return e;
1989         }
1990
1991         return NULL;
1992 }
1993
1994 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1995                                       struct inquiry_entry *ie)
1996 {
1997         struct discovery_state *cache = &hdev->discovery;
1998         struct list_head *pos = &cache->resolve;
1999         struct inquiry_entry *p;
2000
2001         list_del(&ie->list);
2002
2003         list_for_each_entry(p, &cache->resolve, list) {
2004                 if (p->name_state != NAME_PENDING &&
2005                     abs(p->data.rssi) >= abs(ie->data.rssi))
2006                         break;
2007                 pos = &p->list;
2008         }
2009
2010         list_add(&ie->list, pos);
2011 }
2012
2013 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2014                              bool name_known)
2015 {
2016         struct discovery_state *cache = &hdev->discovery;
2017         struct inquiry_entry *ie;
2018         u32 flags = 0;
2019
2020         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2021
2022         hci_remove_remote_oob_data(hdev, &data->bdaddr);
2023
2024         if (!data->ssp_mode)
2025                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2026
2027         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2028         if (ie) {
2029                 if (!ie->data.ssp_mode)
2030                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2031
2032                 if (ie->name_state == NAME_NEEDED &&
2033                     data->rssi != ie->data.rssi) {
2034                         ie->data.rssi = data->rssi;
2035                         hci_inquiry_cache_update_resolve(hdev, ie);
2036                 }
2037
2038                 goto update;
2039         }
2040
2041         /* Entry not in the cache. Add new one. */
2042         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2043         if (!ie) {
2044                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2045                 goto done;
2046         }
2047
2048         list_add(&ie->all, &cache->all);
2049
2050         if (name_known) {
2051                 ie->name_state = NAME_KNOWN;
2052         } else {
2053                 ie->name_state = NAME_NOT_KNOWN;
2054                 list_add(&ie->list, &cache->unknown);
2055         }
2056
2057 update:
2058         if (name_known && ie->name_state != NAME_KNOWN &&
2059             ie->name_state != NAME_PENDING) {
2060                 ie->name_state = NAME_KNOWN;
2061                 list_del(&ie->list);
2062         }
2063
2064         memcpy(&ie->data, data, sizeof(*data));
2065         ie->timestamp = jiffies;
2066         cache->timestamp = jiffies;
2067
2068         if (ie->name_state == NAME_NOT_KNOWN)
2069                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2070
2071 done:
2072         return flags;
2073 }
2074
2075 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2076 {
2077         struct discovery_state *cache = &hdev->discovery;
2078         struct inquiry_info *info = (struct inquiry_info *) buf;
2079         struct inquiry_entry *e;
2080         int copied = 0;
2081
2082         list_for_each_entry(e, &cache->all, all) {
2083                 struct inquiry_data *data = &e->data;
2084
2085                 if (copied >= num)
2086                         break;
2087
2088                 bacpy(&info->bdaddr, &data->bdaddr);
2089                 info->pscan_rep_mode    = data->pscan_rep_mode;
2090                 info->pscan_period_mode = data->pscan_period_mode;
2091                 info->pscan_mode        = data->pscan_mode;
2092                 memcpy(info->dev_class, data->dev_class, 3);
2093                 info->clock_offset      = data->clock_offset;
2094
2095                 info++;
2096                 copied++;
2097         }
2098
2099         BT_DBG("cache %p, copied %d", cache, copied);
2100         return copied;
2101 }
2102
2103 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2104 {
2105         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2106         struct hci_dev *hdev = req->hdev;
2107         struct hci_cp_inquiry cp;
2108
2109         BT_DBG("%s", hdev->name);
2110
2111         if (test_bit(HCI_INQUIRY, &hdev->flags))
2112                 return;
2113
2114         /* Start Inquiry */
2115         memcpy(&cp.lap, &ir->lap, 3);
2116         cp.length  = ir->length;
2117         cp.num_rsp = ir->num_rsp;
2118         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2119 }
2120
2121 static int wait_inquiry(void *word)
2122 {
2123         schedule();
2124         return signal_pending(current);
2125 }
2126
2127 int hci_inquiry(void __user *arg)
2128 {
2129         __u8 __user *ptr = arg;
2130         struct hci_inquiry_req ir;
2131         struct hci_dev *hdev;
2132         int err = 0, do_inquiry = 0, max_rsp;
2133         long timeo;
2134         __u8 *buf;
2135
2136         if (copy_from_user(&ir, ptr, sizeof(ir)))
2137                 return -EFAULT;
2138
2139         hdev = hci_dev_get(ir.dev_id);
2140         if (!hdev)
2141                 return -ENODEV;
2142
2143         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2144                 err = -EBUSY;
2145                 goto done;
2146         }
2147
2148         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2149                 err = -EOPNOTSUPP;
2150                 goto done;
2151         }
2152
2153         if (hdev->dev_type != HCI_BREDR) {
2154                 err = -EOPNOTSUPP;
2155                 goto done;
2156         }
2157
2158         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2159                 err = -EOPNOTSUPP;
2160                 goto done;
2161         }
2162
2163         hci_dev_lock(hdev);
2164         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2165             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2166                 hci_inquiry_cache_flush(hdev);
2167                 do_inquiry = 1;
2168         }
2169         hci_dev_unlock(hdev);
2170
2171         timeo = ir.length * msecs_to_jiffies(2000);
2172
2173         if (do_inquiry) {
2174                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2175                                    timeo);
2176                 if (err < 0)
2177                         goto done;
2178
2179                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2180                  * cleared). If it is interrupted by a signal, return -EINTR.
2181                  */
2182                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2183                                 TASK_INTERRUPTIBLE))
2184                         return -EINTR;
2185         }
2186
2187         /* for unlimited number of responses we will use buffer with
2188          * 255 entries
2189          */
2190         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2191
2192         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2193          * copy it to the user space.
2194          */
2195         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2196         if (!buf) {
2197                 err = -ENOMEM;
2198                 goto done;
2199         }
2200
2201         hci_dev_lock(hdev);
2202         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2203         hci_dev_unlock(hdev);
2204
2205         BT_DBG("num_rsp %d", ir.num_rsp);
2206
2207         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2208                 ptr += sizeof(ir);
2209                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2210                                  ir.num_rsp))
2211                         err = -EFAULT;
2212         } else
2213                 err = -EFAULT;
2214
2215         kfree(buf);
2216
2217 done:
2218         hci_dev_put(hdev);
2219         return err;
2220 }
2221
2222 static int hci_dev_do_open(struct hci_dev *hdev)
2223 {
2224         int ret = 0;
2225
2226         BT_DBG("%s %p", hdev->name, hdev);
2227
2228         hci_req_lock(hdev);
2229
2230         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2231                 ret = -ENODEV;
2232                 goto done;
2233         }
2234
2235         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2236             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2237                 /* Check for rfkill but allow the HCI setup stage to
2238                  * proceed (which in itself doesn't cause any RF activity).
2239                  */
2240                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2241                         ret = -ERFKILL;
2242                         goto done;
2243                 }
2244
2245                 /* Check for valid public address or a configured static
2246                  * random adddress, but let the HCI setup proceed to
2247                  * be able to determine if there is a public address
2248                  * or not.
2249                  *
2250                  * In case of user channel usage, it is not important
2251                  * if a public address or static random address is
2252                  * available.
2253                  *
2254                  * This check is only valid for BR/EDR controllers
2255                  * since AMP controllers do not have an address.
2256                  */
2257                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2258                     hdev->dev_type == HCI_BREDR &&
2259                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2260                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2261                         ret = -EADDRNOTAVAIL;
2262                         goto done;
2263                 }
2264         }
2265
2266         if (test_bit(HCI_UP, &hdev->flags)) {
2267                 ret = -EALREADY;
2268                 goto done;
2269         }
2270
2271         if (hdev->open(hdev)) {
2272                 ret = -EIO;
2273                 goto done;
2274         }
2275
2276         atomic_set(&hdev->cmd_cnt, 1);
2277         set_bit(HCI_INIT, &hdev->flags);
2278
2279         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2280                 if (hdev->setup)
2281                         ret = hdev->setup(hdev);
2282
2283                 /* The transport driver can set these quirks before
2284                  * creating the HCI device or in its setup callback.
2285                  *
2286                  * In case any of them is set, the controller has to
2287                  * start up as unconfigured.
2288                  */
2289                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2290                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2291                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2292
2293                 /* For an unconfigured controller it is required to
2294                  * read at least the version information provided by
2295                  * the Read Local Version Information command.
2296                  *
2297                  * If the set_bdaddr driver callback is provided, then
2298                  * also the original Bluetooth public device address
2299                  * will be read using the Read BD Address command.
2300                  */
2301                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2302                         ret = __hci_unconf_init(hdev);
2303         }
2304
2305         /* If public address change is configured, ensure that the
2306          * address gets programmed. If the driver does not support
2307          * changing the public address, fail the power on procedure.
2308          */
2309         if (!ret && bacmp(&hdev->public_addr, BDADDR_ANY)) {
2310                 if (hdev->set_bdaddr)
2311                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2312                 else
2313                         ret = -EADDRNOTAVAIL;
2314         }
2315
2316         if (!ret) {
2317                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2318                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2319                         ret = __hci_init(hdev);
2320         }
2321
2322         clear_bit(HCI_INIT, &hdev->flags);
2323
2324         if (!ret) {
2325                 hci_dev_hold(hdev);
2326                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2327                 set_bit(HCI_UP, &hdev->flags);
2328                 hci_notify(hdev, HCI_DEV_UP);
2329                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2330                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2331                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2332                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2333                     hdev->dev_type == HCI_BREDR) {
2334                         hci_dev_lock(hdev);
2335                         mgmt_powered(hdev, 1);
2336                         hci_dev_unlock(hdev);
2337                 }
2338         } else {
2339                 /* Init failed, cleanup */
2340                 flush_work(&hdev->tx_work);
2341                 flush_work(&hdev->cmd_work);
2342                 flush_work(&hdev->rx_work);
2343
2344                 skb_queue_purge(&hdev->cmd_q);
2345                 skb_queue_purge(&hdev->rx_q);
2346
2347                 if (hdev->flush)
2348                         hdev->flush(hdev);
2349
2350                 if (hdev->sent_cmd) {
2351                         kfree_skb(hdev->sent_cmd);
2352                         hdev->sent_cmd = NULL;
2353                 }
2354
2355                 hdev->close(hdev);
2356                 hdev->flags &= BIT(HCI_RAW);
2357         }
2358
2359 done:
2360         hci_req_unlock(hdev);
2361         return ret;
2362 }
2363
2364 /* ---- HCI ioctl helpers ---- */
2365
2366 int hci_dev_open(__u16 dev)
2367 {
2368         struct hci_dev *hdev;
2369         int err;
2370
2371         hdev = hci_dev_get(dev);
2372         if (!hdev)
2373                 return -ENODEV;
2374
2375         /* Devices that are marked as unconfigured can only be powered
2376          * up as user channel. Trying to bring them up as normal devices
2377          * will result into a failure. Only user channel operation is
2378          * possible.
2379          *
2380          * When this function is called for a user channel, the flag
2381          * HCI_USER_CHANNEL will be set first before attempting to
2382          * open the device.
2383          */
2384         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2385             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2386                 err = -EOPNOTSUPP;
2387                 goto done;
2388         }
2389
2390         /* We need to ensure that no other power on/off work is pending
2391          * before proceeding to call hci_dev_do_open. This is
2392          * particularly important if the setup procedure has not yet
2393          * completed.
2394          */
2395         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2396                 cancel_delayed_work(&hdev->power_off);
2397
2398         /* After this call it is guaranteed that the setup procedure
2399          * has finished. This means that error conditions like RFKILL
2400          * or no valid public or static random address apply.
2401          */
2402         flush_workqueue(hdev->req_workqueue);
2403
2404         err = hci_dev_do_open(hdev);
2405
2406 done:
2407         hci_dev_put(hdev);
2408         return err;
2409 }
2410
2411 /* This function requires the caller holds hdev->lock */
2412 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2413 {
2414         struct hci_conn_params *p;
2415
2416         list_for_each_entry(p, &hdev->le_conn_params, list)
2417                 list_del_init(&p->action);
2418
2419         BT_DBG("All LE pending actions cleared");
2420 }
2421
2422 static int hci_dev_do_close(struct hci_dev *hdev)
2423 {
2424         BT_DBG("%s %p", hdev->name, hdev);
2425
2426         cancel_delayed_work(&hdev->power_off);
2427
2428         hci_req_cancel(hdev, ENODEV);
2429         hci_req_lock(hdev);
2430
2431         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2432                 cancel_delayed_work_sync(&hdev->cmd_timer);
2433                 hci_req_unlock(hdev);
2434                 return 0;
2435         }
2436
2437         /* Flush RX and TX works */
2438         flush_work(&hdev->tx_work);
2439         flush_work(&hdev->rx_work);
2440
2441         if (hdev->discov_timeout > 0) {
2442                 cancel_delayed_work(&hdev->discov_off);
2443                 hdev->discov_timeout = 0;
2444                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2445                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2446         }
2447
2448         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2449                 cancel_delayed_work(&hdev->service_cache);
2450
2451         cancel_delayed_work_sync(&hdev->le_scan_disable);
2452
2453         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2454                 cancel_delayed_work_sync(&hdev->rpa_expired);
2455
2456         hci_dev_lock(hdev);
2457         hci_inquiry_cache_flush(hdev);
2458         hci_conn_hash_flush(hdev);
2459         hci_pend_le_actions_clear(hdev);
2460         hci_dev_unlock(hdev);
2461
2462         hci_notify(hdev, HCI_DEV_DOWN);
2463
2464         if (hdev->flush)
2465                 hdev->flush(hdev);
2466
2467         /* Reset device */
2468         skb_queue_purge(&hdev->cmd_q);
2469         atomic_set(&hdev->cmd_cnt, 1);
2470         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2471             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2472             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2473                 set_bit(HCI_INIT, &hdev->flags);
2474                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2475                 clear_bit(HCI_INIT, &hdev->flags);
2476         }
2477
2478         /* flush cmd  work */
2479         flush_work(&hdev->cmd_work);
2480
2481         /* Drop queues */
2482         skb_queue_purge(&hdev->rx_q);
2483         skb_queue_purge(&hdev->cmd_q);
2484         skb_queue_purge(&hdev->raw_q);
2485
2486         /* Drop last sent command */
2487         if (hdev->sent_cmd) {
2488                 cancel_delayed_work_sync(&hdev->cmd_timer);
2489                 kfree_skb(hdev->sent_cmd);
2490                 hdev->sent_cmd = NULL;
2491         }
2492
2493         kfree_skb(hdev->recv_evt);
2494         hdev->recv_evt = NULL;
2495
2496         /* After this point our queues are empty
2497          * and no tasks are scheduled. */
2498         hdev->close(hdev);
2499
2500         /* Clear flags */
2501         hdev->flags &= BIT(HCI_RAW);
2502         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2503
2504         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2505                 if (hdev->dev_type == HCI_BREDR) {
2506                         hci_dev_lock(hdev);
2507                         mgmt_powered(hdev, 0);
2508                         hci_dev_unlock(hdev);
2509                 }
2510         }
2511
2512         /* Controller radio is available but is currently powered down */
2513         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2514
2515         memset(hdev->eir, 0, sizeof(hdev->eir));
2516         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2517         bacpy(&hdev->random_addr, BDADDR_ANY);
2518
2519         hci_req_unlock(hdev);
2520
2521         hci_dev_put(hdev);
2522         return 0;
2523 }
2524
2525 int hci_dev_close(__u16 dev)
2526 {
2527         struct hci_dev *hdev;
2528         int err;
2529
2530         hdev = hci_dev_get(dev);
2531         if (!hdev)
2532                 return -ENODEV;
2533
2534         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2535                 err = -EBUSY;
2536                 goto done;
2537         }
2538
2539         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2540                 cancel_delayed_work(&hdev->power_off);
2541
2542         err = hci_dev_do_close(hdev);
2543
2544 done:
2545         hci_dev_put(hdev);
2546         return err;
2547 }
2548
2549 int hci_dev_reset(__u16 dev)
2550 {
2551         struct hci_dev *hdev;
2552         int ret = 0;
2553
2554         hdev = hci_dev_get(dev);
2555         if (!hdev)
2556                 return -ENODEV;
2557
2558         hci_req_lock(hdev);
2559
2560         if (!test_bit(HCI_UP, &hdev->flags)) {
2561                 ret = -ENETDOWN;
2562                 goto done;
2563         }
2564
2565         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2566                 ret = -EBUSY;
2567                 goto done;
2568         }
2569
2570         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2571                 ret = -EOPNOTSUPP;
2572                 goto done;
2573         }
2574
2575         /* Drop queues */
2576         skb_queue_purge(&hdev->rx_q);
2577         skb_queue_purge(&hdev->cmd_q);
2578
2579         hci_dev_lock(hdev);
2580         hci_inquiry_cache_flush(hdev);
2581         hci_conn_hash_flush(hdev);
2582         hci_dev_unlock(hdev);
2583
2584         if (hdev->flush)
2585                 hdev->flush(hdev);
2586
2587         atomic_set(&hdev->cmd_cnt, 1);
2588         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2589
2590         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2591
2592 done:
2593         hci_req_unlock(hdev);
2594         hci_dev_put(hdev);
2595         return ret;
2596 }
2597
2598 int hci_dev_reset_stat(__u16 dev)
2599 {
2600         struct hci_dev *hdev;
2601         int ret = 0;
2602
2603         hdev = hci_dev_get(dev);
2604         if (!hdev)
2605                 return -ENODEV;
2606
2607         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2608                 ret = -EBUSY;
2609                 goto done;
2610         }
2611
2612         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2613                 ret = -EOPNOTSUPP;
2614                 goto done;
2615         }
2616
2617         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2618
2619 done:
2620         hci_dev_put(hdev);
2621         return ret;
2622 }
2623
2624 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2625 {
2626         struct hci_dev *hdev;
2627         struct hci_dev_req dr;
2628         int err = 0;
2629
2630         if (copy_from_user(&dr, arg, sizeof(dr)))
2631                 return -EFAULT;
2632
2633         hdev = hci_dev_get(dr.dev_id);
2634         if (!hdev)
2635                 return -ENODEV;
2636
2637         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2638                 err = -EBUSY;
2639                 goto done;
2640         }
2641
2642         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2643                 err = -EOPNOTSUPP;
2644                 goto done;
2645         }
2646
2647         if (hdev->dev_type != HCI_BREDR) {
2648                 err = -EOPNOTSUPP;
2649                 goto done;
2650         }
2651
2652         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2653                 err = -EOPNOTSUPP;
2654                 goto done;
2655         }
2656
2657         switch (cmd) {
2658         case HCISETAUTH:
2659                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2660                                    HCI_INIT_TIMEOUT);
2661                 break;
2662
2663         case HCISETENCRYPT:
2664                 if (!lmp_encrypt_capable(hdev)) {
2665                         err = -EOPNOTSUPP;
2666                         break;
2667                 }
2668
2669                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2670                         /* Auth must be enabled first */
2671                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2672                                            HCI_INIT_TIMEOUT);
2673                         if (err)
2674                                 break;
2675                 }
2676
2677                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2678                                    HCI_INIT_TIMEOUT);
2679                 break;
2680
2681         case HCISETSCAN:
2682                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2683                                    HCI_INIT_TIMEOUT);
2684                 break;
2685
2686         case HCISETLINKPOL:
2687                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2688                                    HCI_INIT_TIMEOUT);
2689                 break;
2690
2691         case HCISETLINKMODE:
2692                 hdev->link_mode = ((__u16) dr.dev_opt) &
2693                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2694                 break;
2695
2696         case HCISETPTYPE:
2697                 hdev->pkt_type = (__u16) dr.dev_opt;
2698                 break;
2699
2700         case HCISETACLMTU:
2701                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2702                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2703                 break;
2704
2705         case HCISETSCOMTU:
2706                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2707                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2708                 break;
2709
2710         default:
2711                 err = -EINVAL;
2712                 break;
2713         }
2714
2715 done:
2716         hci_dev_put(hdev);
2717         return err;
2718 }
2719
2720 int hci_get_dev_list(void __user *arg)
2721 {
2722         struct hci_dev *hdev;
2723         struct hci_dev_list_req *dl;
2724         struct hci_dev_req *dr;
2725         int n = 0, size, err;
2726         __u16 dev_num;
2727
2728         if (get_user(dev_num, (__u16 __user *) arg))
2729                 return -EFAULT;
2730
2731         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2732                 return -EINVAL;
2733
2734         size = sizeof(*dl) + dev_num * sizeof(*dr);
2735
2736         dl = kzalloc(size, GFP_KERNEL);
2737         if (!dl)
2738                 return -ENOMEM;
2739
2740         dr = dl->dev_req;
2741
2742         read_lock(&hci_dev_list_lock);
2743         list_for_each_entry(hdev, &hci_dev_list, list) {
2744                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2745                         cancel_delayed_work(&hdev->power_off);
2746
2747                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2748                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2749
2750                 (dr + n)->dev_id  = hdev->id;
2751                 (dr + n)->dev_opt = hdev->flags;
2752
2753                 if (++n >= dev_num)
2754                         break;
2755         }
2756         read_unlock(&hci_dev_list_lock);
2757
2758         dl->dev_num = n;
2759         size = sizeof(*dl) + n * sizeof(*dr);
2760
2761         err = copy_to_user(arg, dl, size);
2762         kfree(dl);
2763
2764         return err ? -EFAULT : 0;
2765 }
2766
2767 int hci_get_dev_info(void __user *arg)
2768 {
2769         struct hci_dev *hdev;
2770         struct hci_dev_info di;
2771         int err = 0;
2772
2773         if (copy_from_user(&di, arg, sizeof(di)))
2774                 return -EFAULT;
2775
2776         hdev = hci_dev_get(di.dev_id);
2777         if (!hdev)
2778                 return -ENODEV;
2779
2780         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2781                 cancel_delayed_work_sync(&hdev->power_off);
2782
2783         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2784                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2785
2786         strcpy(di.name, hdev->name);
2787         di.bdaddr   = hdev->bdaddr;
2788         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2789         di.flags    = hdev->flags;
2790         di.pkt_type = hdev->pkt_type;
2791         if (lmp_bredr_capable(hdev)) {
2792                 di.acl_mtu  = hdev->acl_mtu;
2793                 di.acl_pkts = hdev->acl_pkts;
2794                 di.sco_mtu  = hdev->sco_mtu;
2795                 di.sco_pkts = hdev->sco_pkts;
2796         } else {
2797                 di.acl_mtu  = hdev->le_mtu;
2798                 di.acl_pkts = hdev->le_pkts;
2799                 di.sco_mtu  = 0;
2800                 di.sco_pkts = 0;
2801         }
2802         di.link_policy = hdev->link_policy;
2803         di.link_mode   = hdev->link_mode;
2804
2805         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2806         memcpy(&di.features, &hdev->features, sizeof(di.features));
2807
2808         if (copy_to_user(arg, &di, sizeof(di)))
2809                 err = -EFAULT;
2810
2811         hci_dev_put(hdev);
2812
2813         return err;
2814 }
2815
2816 /* ---- Interface to HCI drivers ---- */
2817
2818 static int hci_rfkill_set_block(void *data, bool blocked)
2819 {
2820         struct hci_dev *hdev = data;
2821
2822         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2823
2824         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2825                 return -EBUSY;
2826
2827         if (blocked) {
2828                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2829                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2830                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
2831                         hci_dev_do_close(hdev);
2832         } else {
2833                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2834         }
2835
2836         return 0;
2837 }
2838
2839 static const struct rfkill_ops hci_rfkill_ops = {
2840         .set_block = hci_rfkill_set_block,
2841 };
2842
2843 static void hci_power_on(struct work_struct *work)
2844 {
2845         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2846         int err;
2847
2848         BT_DBG("%s", hdev->name);
2849
2850         err = hci_dev_do_open(hdev);
2851         if (err < 0) {
2852                 mgmt_set_powered_failed(hdev, err);
2853                 return;
2854         }
2855
2856         /* During the HCI setup phase, a few error conditions are
2857          * ignored and they need to be checked now. If they are still
2858          * valid, it is important to turn the device back off.
2859          */
2860         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2861             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2862             (hdev->dev_type == HCI_BREDR &&
2863              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2864              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2865                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2866                 hci_dev_do_close(hdev);
2867         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2868                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2869                                    HCI_AUTO_OFF_TIMEOUT);
2870         }
2871
2872         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2873                 /* For unconfigured devices, set the HCI_RAW flag
2874                  * so that userspace can easily identify them.
2875                  */
2876                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2877                         set_bit(HCI_RAW, &hdev->flags);
2878
2879                 /* For fully configured devices, this will send
2880                  * the Index Added event. For unconfigured devices,
2881                  * it will send Unconfigued Index Added event.
2882                  *
2883                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2884                  * and no event will be send.
2885                  */
2886                 mgmt_index_added(hdev);
2887         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2888                 /* Powering on the controller with HCI_CONFIG set only
2889                  * happens with the transition from unconfigured to
2890                  * configured. This will send the Index Added event.
2891                  */
2892                 mgmt_index_added(hdev);
2893         }
2894 }
2895
2896 static void hci_power_off(struct work_struct *work)
2897 {
2898         struct hci_dev *hdev = container_of(work, struct hci_dev,
2899                                             power_off.work);
2900
2901         BT_DBG("%s", hdev->name);
2902
2903         hci_dev_do_close(hdev);
2904 }
2905
2906 static void hci_discov_off(struct work_struct *work)
2907 {
2908         struct hci_dev *hdev;
2909
2910         hdev = container_of(work, struct hci_dev, discov_off.work);
2911
2912         BT_DBG("%s", hdev->name);
2913
2914         mgmt_discoverable_timeout(hdev);
2915 }
2916
2917 void hci_uuids_clear(struct hci_dev *hdev)
2918 {
2919         struct bt_uuid *uuid, *tmp;
2920
2921         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2922                 list_del(&uuid->list);
2923                 kfree(uuid);
2924         }
2925 }
2926
2927 void hci_link_keys_clear(struct hci_dev *hdev)
2928 {
2929         struct list_head *p, *n;
2930
2931         list_for_each_safe(p, n, &hdev->link_keys) {
2932                 struct link_key *key;
2933
2934                 key = list_entry(p, struct link_key, list);
2935
2936                 list_del(p);
2937                 kfree(key);
2938         }
2939 }
2940
2941 void hci_smp_ltks_clear(struct hci_dev *hdev)
2942 {
2943         struct smp_ltk *k, *tmp;
2944
2945         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2946                 list_del(&k->list);
2947                 kfree(k);
2948         }
2949 }
2950
2951 void hci_smp_irks_clear(struct hci_dev *hdev)
2952 {
2953         struct smp_irk *k, *tmp;
2954
2955         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2956                 list_del(&k->list);
2957                 kfree(k);
2958         }
2959 }
2960
2961 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2962 {
2963         struct link_key *k;
2964
2965         list_for_each_entry(k, &hdev->link_keys, list)
2966                 if (bacmp(bdaddr, &k->bdaddr) == 0)
2967                         return k;
2968
2969         return NULL;
2970 }
2971
2972 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2973                                u8 key_type, u8 old_key_type)
2974 {
2975         /* Legacy key */
2976         if (key_type < 0x03)
2977                 return true;
2978
2979         /* Debug keys are insecure so don't store them persistently */
2980         if (key_type == HCI_LK_DEBUG_COMBINATION)
2981                 return false;
2982
2983         /* Changed combination key and there's no previous one */
2984         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2985                 return false;
2986
2987         /* Security mode 3 case */
2988         if (!conn)
2989                 return true;
2990
2991         /* Neither local nor remote side had no-bonding as requirement */
2992         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2993                 return true;
2994
2995         /* Local side had dedicated bonding as requirement */
2996         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2997                 return true;
2998
2999         /* Remote side had dedicated bonding as requirement */
3000         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3001                 return true;
3002
3003         /* If none of the above criteria match, then don't store the key
3004          * persistently */
3005         return false;
3006 }
3007
3008 static bool ltk_type_master(u8 type)
3009 {
3010         return (type == SMP_LTK);
3011 }
3012
3013 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3014                              bool master)
3015 {
3016         struct smp_ltk *k;
3017
3018         list_for_each_entry(k, &hdev->long_term_keys, list) {
3019                 if (k->ediv != ediv || k->rand != rand)
3020                         continue;
3021
3022                 if (ltk_type_master(k->type) != master)
3023                         continue;
3024
3025                 return k;
3026         }
3027
3028         return NULL;
3029 }
3030
3031 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3032                                      u8 addr_type, bool master)
3033 {
3034         struct smp_ltk *k;
3035
3036         list_for_each_entry(k, &hdev->long_term_keys, list)
3037                 if (addr_type == k->bdaddr_type &&
3038                     bacmp(bdaddr, &k->bdaddr) == 0 &&
3039                     ltk_type_master(k->type) == master)
3040                         return k;
3041
3042         return NULL;
3043 }
3044
3045 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3046 {
3047         struct smp_irk *irk;
3048
3049         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3050                 if (!bacmp(&irk->rpa, rpa))
3051                         return irk;
3052         }
3053
3054         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3055                 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3056                         bacpy(&irk->rpa, rpa);
3057                         return irk;
3058                 }
3059         }
3060
3061         return NULL;
3062 }
3063
3064 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3065                                      u8 addr_type)
3066 {
3067         struct smp_irk *irk;
3068
3069         /* Identity Address must be public or static random */
3070         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3071                 return NULL;
3072
3073         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3074                 if (addr_type == irk->addr_type &&
3075                     bacmp(bdaddr, &irk->bdaddr) == 0)
3076                         return irk;
3077         }
3078
3079         return NULL;
3080 }
3081
3082 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3083                                   bdaddr_t *bdaddr, u8 *val, u8 type,
3084                                   u8 pin_len, bool *persistent)
3085 {
3086         struct link_key *key, *old_key;
3087         u8 old_key_type;
3088
3089         old_key = hci_find_link_key(hdev, bdaddr);
3090         if (old_key) {
3091                 old_key_type = old_key->type;
3092                 key = old_key;
3093         } else {
3094                 old_key_type = conn ? conn->key_type : 0xff;
3095                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3096                 if (!key)
3097                         return NULL;
3098                 list_add(&key->list, &hdev->link_keys);
3099         }
3100
3101         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3102
3103         /* Some buggy controller combinations generate a changed
3104          * combination key for legacy pairing even when there's no
3105          * previous key */
3106         if (type == HCI_LK_CHANGED_COMBINATION &&
3107             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3108                 type = HCI_LK_COMBINATION;
3109                 if (conn)
3110                         conn->key_type = type;
3111         }
3112
3113         bacpy(&key->bdaddr, bdaddr);
3114         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3115         key->pin_len = pin_len;
3116
3117         if (type == HCI_LK_CHANGED_COMBINATION)
3118                 key->type = old_key_type;
3119         else
3120                 key->type = type;
3121
3122         if (persistent)
3123                 *persistent = hci_persistent_key(hdev, conn, type,
3124                                                  old_key_type);
3125
3126         return key;
3127 }
3128
3129 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3130                             u8 addr_type, u8 type, u8 authenticated,
3131                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3132 {
3133         struct smp_ltk *key, *old_key;
3134         bool master = ltk_type_master(type);
3135
3136         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3137         if (old_key)
3138                 key = old_key;
3139         else {
3140                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3141                 if (!key)
3142                         return NULL;
3143                 list_add(&key->list, &hdev->long_term_keys);
3144         }
3145
3146         bacpy(&key->bdaddr, bdaddr);
3147         key->bdaddr_type = addr_type;
3148         memcpy(key->val, tk, sizeof(key->val));
3149         key->authenticated = authenticated;
3150         key->ediv = ediv;
3151         key->rand = rand;
3152         key->enc_size = enc_size;
3153         key->type = type;
3154
3155         return key;
3156 }
3157
3158 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3159                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3160 {
3161         struct smp_irk *irk;
3162
3163         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3164         if (!irk) {
3165                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3166                 if (!irk)
3167                         return NULL;
3168
3169                 bacpy(&irk->bdaddr, bdaddr);
3170                 irk->addr_type = addr_type;
3171
3172                 list_add(&irk->list, &hdev->identity_resolving_keys);
3173         }
3174
3175         memcpy(irk->val, val, 16);
3176         bacpy(&irk->rpa, rpa);
3177
3178         return irk;
3179 }
3180
3181 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3182 {
3183         struct link_key *key;
3184
3185         key = hci_find_link_key(hdev, bdaddr);
3186         if (!key)
3187                 return -ENOENT;
3188
3189         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3190
3191         list_del(&key->list);
3192         kfree(key);
3193
3194         return 0;
3195 }
3196
3197 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3198 {
3199         struct smp_ltk *k, *tmp;
3200         int removed = 0;
3201
3202         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3203                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3204                         continue;
3205
3206                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3207
3208                 list_del(&k->list);
3209                 kfree(k);
3210                 removed++;
3211         }
3212
3213         return removed ? 0 : -ENOENT;
3214 }
3215
3216 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3217 {
3218         struct smp_irk *k, *tmp;
3219
3220         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3221                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3222                         continue;
3223
3224                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3225
3226                 list_del(&k->list);
3227                 kfree(k);
3228         }
3229 }
3230
3231 /* HCI command timer function */
3232 static void hci_cmd_timeout(struct work_struct *work)
3233 {
3234         struct hci_dev *hdev = container_of(work, struct hci_dev,
3235                                             cmd_timer.work);
3236
3237         if (hdev->sent_cmd) {
3238                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3239                 u16 opcode = __le16_to_cpu(sent->opcode);
3240
3241                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3242         } else {
3243                 BT_ERR("%s command tx timeout", hdev->name);
3244         }
3245
3246         atomic_set(&hdev->cmd_cnt, 1);
3247         queue_work(hdev->workqueue, &hdev->cmd_work);
3248 }
3249
3250 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3251                                           bdaddr_t *bdaddr)
3252 {
3253         struct oob_data *data;
3254
3255         list_for_each_entry(data, &hdev->remote_oob_data, list)
3256                 if (bacmp(bdaddr, &data->bdaddr) == 0)
3257                         return data;
3258
3259         return NULL;
3260 }
3261
3262 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3263 {
3264         struct oob_data *data;
3265
3266         data = hci_find_remote_oob_data(hdev, bdaddr);
3267         if (!data)
3268                 return -ENOENT;
3269
3270         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3271
3272         list_del(&data->list);
3273         kfree(data);
3274
3275         return 0;
3276 }
3277
3278 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3279 {
3280         struct oob_data *data, *n;
3281
3282         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3283                 list_del(&data->list);
3284                 kfree(data);
3285         }
3286 }
3287
3288 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3289                             u8 *hash, u8 *randomizer)
3290 {
3291         struct oob_data *data;
3292
3293         data = hci_find_remote_oob_data(hdev, bdaddr);
3294         if (!data) {
3295                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3296                 if (!data)
3297                         return -ENOMEM;
3298
3299                 bacpy(&data->bdaddr, bdaddr);
3300                 list_add(&data->list, &hdev->remote_oob_data);
3301         }
3302
3303         memcpy(data->hash192, hash, sizeof(data->hash192));
3304         memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3305
3306         memset(data->hash256, 0, sizeof(data->hash256));
3307         memset(data->randomizer256, 0, sizeof(data->randomizer256));
3308
3309         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3310
3311         return 0;
3312 }
3313
3314 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3315                                 u8 *hash192, u8 *randomizer192,
3316                                 u8 *hash256, u8 *randomizer256)
3317 {
3318         struct oob_data *data;
3319
3320         data = hci_find_remote_oob_data(hdev, bdaddr);
3321         if (!data) {
3322                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3323                 if (!data)
3324                         return -ENOMEM;
3325
3326                 bacpy(&data->bdaddr, bdaddr);
3327                 list_add(&data->list, &hdev->remote_oob_data);
3328         }
3329
3330         memcpy(data->hash192, hash192, sizeof(data->hash192));
3331         memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3332
3333         memcpy(data->hash256, hash256, sizeof(data->hash256));
3334         memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3335
3336         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3337
3338         return 0;
3339 }
3340
3341 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3342                                          bdaddr_t *bdaddr, u8 type)
3343 {
3344         struct bdaddr_list *b;
3345
3346         list_for_each_entry(b, &hdev->blacklist, list) {
3347                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3348                         return b;
3349         }
3350
3351         return NULL;
3352 }
3353
3354 static void hci_blacklist_clear(struct hci_dev *hdev)
3355 {
3356         struct list_head *p, *n;
3357
3358         list_for_each_safe(p, n, &hdev->blacklist) {
3359                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3360
3361                 list_del(p);
3362                 kfree(b);
3363         }
3364 }
3365
3366 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3367 {
3368         struct bdaddr_list *entry;
3369
3370         if (!bacmp(bdaddr, BDADDR_ANY))
3371                 return -EBADF;
3372
3373         if (hci_blacklist_lookup(hdev, bdaddr, type))
3374                 return -EEXIST;
3375
3376         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3377         if (!entry)
3378                 return -ENOMEM;
3379
3380         bacpy(&entry->bdaddr, bdaddr);
3381         entry->bdaddr_type = type;
3382
3383         list_add(&entry->list, &hdev->blacklist);
3384
3385         return 0;
3386 }
3387
3388 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3389 {
3390         struct bdaddr_list *entry;
3391
3392         if (!bacmp(bdaddr, BDADDR_ANY)) {
3393                 hci_blacklist_clear(hdev);
3394                 return 0;
3395         }
3396
3397         entry = hci_blacklist_lookup(hdev, bdaddr, type);
3398         if (!entry)
3399                 return -ENOENT;
3400
3401         list_del(&entry->list);
3402         kfree(entry);
3403
3404         return 0;
3405 }
3406
3407 struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3408                                           bdaddr_t *bdaddr, u8 type)
3409 {
3410         struct bdaddr_list *b;
3411
3412         list_for_each_entry(b, &hdev->le_white_list, list) {
3413                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3414                         return b;
3415         }
3416
3417         return NULL;
3418 }
3419
3420 void hci_white_list_clear(struct hci_dev *hdev)
3421 {
3422         struct list_head *p, *n;
3423
3424         list_for_each_safe(p, n, &hdev->le_white_list) {
3425                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3426
3427                 list_del(p);
3428                 kfree(b);
3429         }
3430 }
3431
3432 int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3433 {
3434         struct bdaddr_list *entry;
3435
3436         if (!bacmp(bdaddr, BDADDR_ANY))
3437                 return -EBADF;
3438
3439         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3440         if (!entry)
3441                 return -ENOMEM;
3442
3443         bacpy(&entry->bdaddr, bdaddr);
3444         entry->bdaddr_type = type;
3445
3446         list_add(&entry->list, &hdev->le_white_list);
3447
3448         return 0;
3449 }
3450
3451 int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3452 {
3453         struct bdaddr_list *entry;
3454
3455         if (!bacmp(bdaddr, BDADDR_ANY))
3456                 return -EBADF;
3457
3458         entry = hci_white_list_lookup(hdev, bdaddr, type);
3459         if (!entry)
3460                 return -ENOENT;
3461
3462         list_del(&entry->list);
3463         kfree(entry);
3464
3465         return 0;
3466 }
3467
3468 /* This function requires the caller holds hdev->lock */
3469 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3470                                                bdaddr_t *addr, u8 addr_type)
3471 {
3472         struct hci_conn_params *params;
3473
3474         /* The conn params list only contains identity addresses */
3475         if (!hci_is_identity_address(addr, addr_type))
3476                 return NULL;
3477
3478         list_for_each_entry(params, &hdev->le_conn_params, list) {
3479                 if (bacmp(&params->addr, addr) == 0 &&
3480                     params->addr_type == addr_type) {
3481                         return params;
3482                 }
3483         }
3484
3485         return NULL;
3486 }
3487
3488 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3489 {
3490         struct hci_conn *conn;
3491
3492         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3493         if (!conn)
3494                 return false;
3495
3496         if (conn->dst_type != type)
3497                 return false;
3498
3499         if (conn->state != BT_CONNECTED)
3500                 return false;
3501
3502         return true;
3503 }
3504
3505 /* This function requires the caller holds hdev->lock */
3506 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3507                                                   bdaddr_t *addr, u8 addr_type)
3508 {
3509         struct hci_conn_params *param;
3510
3511         /* The list only contains identity addresses */
3512         if (!hci_is_identity_address(addr, addr_type))
3513                 return NULL;
3514
3515         list_for_each_entry(param, list, action) {
3516                 if (bacmp(&param->addr, addr) == 0 &&
3517                     param->addr_type == addr_type)
3518                         return param;
3519         }
3520
3521         return NULL;
3522 }
3523
3524 /* This function requires the caller holds hdev->lock */
3525 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3526                                             bdaddr_t *addr, u8 addr_type)
3527 {
3528         struct hci_conn_params *params;
3529
3530         if (!hci_is_identity_address(addr, addr_type))
3531                 return NULL;
3532
3533         params = hci_conn_params_lookup(hdev, addr, addr_type);
3534         if (params)
3535                 return params;
3536
3537         params = kzalloc(sizeof(*params), GFP_KERNEL);
3538         if (!params) {
3539                 BT_ERR("Out of memory");
3540                 return NULL;
3541         }
3542
3543         bacpy(&params->addr, addr);
3544         params->addr_type = addr_type;
3545
3546         list_add(&params->list, &hdev->le_conn_params);
3547         INIT_LIST_HEAD(&params->action);
3548
3549         params->conn_min_interval = hdev->le_conn_min_interval;
3550         params->conn_max_interval = hdev->le_conn_max_interval;
3551         params->conn_latency = hdev->le_conn_latency;
3552         params->supervision_timeout = hdev->le_supv_timeout;
3553         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3554
3555         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3556
3557         return params;
3558 }
3559
3560 /* This function requires the caller holds hdev->lock */
3561 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3562                         u8 auto_connect)
3563 {
3564         struct hci_conn_params *params;
3565
3566         params = hci_conn_params_add(hdev, addr, addr_type);
3567         if (!params)
3568                 return -EIO;
3569
3570         if (params->auto_connect == auto_connect)
3571                 return 0;
3572
3573         list_del_init(&params->action);
3574
3575         switch (auto_connect) {
3576         case HCI_AUTO_CONN_DISABLED:
3577         case HCI_AUTO_CONN_LINK_LOSS:
3578                 hci_update_background_scan(hdev);
3579                 break;
3580         case HCI_AUTO_CONN_REPORT:
3581                 list_add(&params->action, &hdev->pend_le_reports);
3582                 hci_update_background_scan(hdev);
3583                 break;
3584         case HCI_AUTO_CONN_ALWAYS:
3585                 if (!is_connected(hdev, addr, addr_type)) {
3586                         list_add(&params->action, &hdev->pend_le_conns);
3587                         hci_update_background_scan(hdev);
3588                 }
3589                 break;
3590         }
3591
3592         params->auto_connect = auto_connect;
3593
3594         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3595                auto_connect);
3596
3597         return 0;
3598 }
3599
3600 /* This function requires the caller holds hdev->lock */
3601 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3602 {
3603         struct hci_conn_params *params;
3604
3605         params = hci_conn_params_lookup(hdev, addr, addr_type);
3606         if (!params)
3607                 return;
3608
3609         list_del(&params->action);
3610         list_del(&params->list);
3611         kfree(params);
3612
3613         hci_update_background_scan(hdev);
3614
3615         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3616 }
3617
3618 /* This function requires the caller holds hdev->lock */
3619 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3620 {
3621         struct hci_conn_params *params, *tmp;
3622
3623         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3624                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3625                         continue;
3626                 list_del(&params->list);
3627                 kfree(params);
3628         }
3629
3630         BT_DBG("All LE disabled connection parameters were removed");
3631 }
3632
3633 /* This function requires the caller holds hdev->lock */
3634 void hci_conn_params_clear_all(struct hci_dev *hdev)
3635 {
3636         struct hci_conn_params *params, *tmp;
3637
3638         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3639                 list_del(&params->action);
3640                 list_del(&params->list);
3641                 kfree(params);
3642         }
3643
3644         hci_update_background_scan(hdev);
3645
3646         BT_DBG("All LE connection parameters were removed");
3647 }
3648
3649 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3650 {
3651         if (status) {
3652                 BT_ERR("Failed to start inquiry: status %d", status);
3653
3654                 hci_dev_lock(hdev);
3655                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3656                 hci_dev_unlock(hdev);
3657                 return;
3658         }
3659 }
3660
3661 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3662 {
3663         /* General inquiry access code (GIAC) */
3664         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3665         struct hci_request req;
3666         struct hci_cp_inquiry cp;
3667         int err;
3668
3669         if (status) {
3670                 BT_ERR("Failed to disable LE scanning: status %d", status);
3671                 return;
3672         }
3673
3674         switch (hdev->discovery.type) {
3675         case DISCOV_TYPE_LE:
3676                 hci_dev_lock(hdev);
3677                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3678                 hci_dev_unlock(hdev);
3679                 break;
3680
3681         case DISCOV_TYPE_INTERLEAVED:
3682                 hci_req_init(&req, hdev);
3683
3684                 memset(&cp, 0, sizeof(cp));
3685                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3686                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3687                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3688
3689                 hci_dev_lock(hdev);
3690
3691                 hci_inquiry_cache_flush(hdev);
3692
3693                 err = hci_req_run(&req, inquiry_complete);
3694                 if (err) {
3695                         BT_ERR("Inquiry request failed: err %d", err);
3696                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3697                 }
3698
3699                 hci_dev_unlock(hdev);
3700                 break;
3701         }
3702 }
3703
3704 static void le_scan_disable_work(struct work_struct *work)
3705 {
3706         struct hci_dev *hdev = container_of(work, struct hci_dev,
3707                                             le_scan_disable.work);
3708         struct hci_request req;
3709         int err;
3710
3711         BT_DBG("%s", hdev->name);
3712
3713         hci_req_init(&req, hdev);
3714
3715         hci_req_add_le_scan_disable(&req);
3716
3717         err = hci_req_run(&req, le_scan_disable_work_complete);
3718         if (err)
3719                 BT_ERR("Disable LE scanning request failed: err %d", err);
3720 }
3721
3722 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3723 {
3724         struct hci_dev *hdev = req->hdev;
3725
3726         /* If we're advertising or initiating an LE connection we can't
3727          * go ahead and change the random address at this time. This is
3728          * because the eventual initiator address used for the
3729          * subsequently created connection will be undefined (some
3730          * controllers use the new address and others the one we had
3731          * when the operation started).
3732          *
3733          * In this kind of scenario skip the update and let the random
3734          * address be updated at the next cycle.
3735          */
3736         if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3737             hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3738                 BT_DBG("Deferring random address update");
3739                 return;
3740         }
3741
3742         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3743 }
3744
3745 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3746                               u8 *own_addr_type)
3747 {
3748         struct hci_dev *hdev = req->hdev;
3749         int err;
3750
3751         /* If privacy is enabled use a resolvable private address. If
3752          * current RPA has expired or there is something else than
3753          * the current RPA in use, then generate a new one.
3754          */
3755         if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3756                 int to;
3757
3758                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3759
3760                 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3761                     !bacmp(&hdev->random_addr, &hdev->rpa))
3762                         return 0;
3763
3764                 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3765                 if (err < 0) {
3766                         BT_ERR("%s failed to generate new RPA", hdev->name);
3767                         return err;
3768                 }
3769
3770                 set_random_addr(req, &hdev->rpa);
3771
3772                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3773                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3774
3775                 return 0;
3776         }
3777
3778         /* In case of required privacy without resolvable private address,
3779          * use an unresolvable private address. This is useful for active
3780          * scanning and non-connectable advertising.
3781          */
3782         if (require_privacy) {
3783                 bdaddr_t urpa;
3784
3785                 get_random_bytes(&urpa, 6);
3786                 urpa.b[5] &= 0x3f;      /* Clear two most significant bits */
3787
3788                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3789                 set_random_addr(req, &urpa);
3790                 return 0;
3791         }
3792
3793         /* If forcing static address is in use or there is no public
3794          * address use the static address as random address (but skip
3795          * the HCI command if the current random address is already the
3796          * static one.
3797          */
3798         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3799             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3800                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3801                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3802                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3803                                     &hdev->static_addr);
3804                 return 0;
3805         }
3806
3807         /* Neither privacy nor static address is being used so use a
3808          * public address.
3809          */
3810         *own_addr_type = ADDR_LE_DEV_PUBLIC;
3811
3812         return 0;
3813 }
3814
3815 /* Copy the Identity Address of the controller.
3816  *
3817  * If the controller has a public BD_ADDR, then by default use that one.
3818  * If this is a LE only controller without a public address, default to
3819  * the static random address.
3820  *
3821  * For debugging purposes it is possible to force controllers with a
3822  * public address to use the static random address instead.
3823  */
3824 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3825                                u8 *bdaddr_type)
3826 {
3827         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3828             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3829                 bacpy(bdaddr, &hdev->static_addr);
3830                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3831         } else {
3832                 bacpy(bdaddr, &hdev->bdaddr);
3833                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3834         }
3835 }
3836
3837 /* Alloc HCI device */
3838 struct hci_dev *hci_alloc_dev(void)
3839 {
3840         struct hci_dev *hdev;
3841
3842         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3843         if (!hdev)
3844                 return NULL;
3845
3846         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3847         hdev->esco_type = (ESCO_HV1);
3848         hdev->link_mode = (HCI_LM_ACCEPT);
3849         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3850         hdev->io_capability = 0x03;     /* No Input No Output */
3851         hdev->manufacturer = 0xffff;    /* Default to internal use */
3852         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3853         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3854
3855         hdev->sniff_max_interval = 800;
3856         hdev->sniff_min_interval = 80;
3857
3858         hdev->le_adv_channel_map = 0x07;
3859         hdev->le_scan_interval = 0x0060;
3860         hdev->le_scan_window = 0x0030;
3861         hdev->le_conn_min_interval = 0x0028;
3862         hdev->le_conn_max_interval = 0x0038;
3863         hdev->le_conn_latency = 0x0000;
3864         hdev->le_supv_timeout = 0x002a;
3865
3866         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3867         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3868         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3869         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3870
3871         mutex_init(&hdev->lock);
3872         mutex_init(&hdev->req_lock);
3873
3874         INIT_LIST_HEAD(&hdev->mgmt_pending);
3875         INIT_LIST_HEAD(&hdev->blacklist);
3876         INIT_LIST_HEAD(&hdev->uuids);
3877         INIT_LIST_HEAD(&hdev->link_keys);
3878         INIT_LIST_HEAD(&hdev->long_term_keys);
3879         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3880         INIT_LIST_HEAD(&hdev->remote_oob_data);
3881         INIT_LIST_HEAD(&hdev->le_white_list);
3882         INIT_LIST_HEAD(&hdev->le_conn_params);
3883         INIT_LIST_HEAD(&hdev->pend_le_conns);
3884         INIT_LIST_HEAD(&hdev->pend_le_reports);
3885         INIT_LIST_HEAD(&hdev->conn_hash.list);
3886
3887         INIT_WORK(&hdev->rx_work, hci_rx_work);
3888         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3889         INIT_WORK(&hdev->tx_work, hci_tx_work);
3890         INIT_WORK(&hdev->power_on, hci_power_on);
3891
3892         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3893         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3894         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3895
3896         skb_queue_head_init(&hdev->rx_q);
3897         skb_queue_head_init(&hdev->cmd_q);
3898         skb_queue_head_init(&hdev->raw_q);
3899
3900         init_waitqueue_head(&hdev->req_wait_q);
3901
3902         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3903
3904         hci_init_sysfs(hdev);
3905         discovery_init(hdev);
3906
3907         return hdev;
3908 }
3909 EXPORT_SYMBOL(hci_alloc_dev);
3910
3911 /* Free HCI device */
3912 void hci_free_dev(struct hci_dev *hdev)
3913 {
3914         /* will free via device release */
3915         put_device(&hdev->dev);
3916 }
3917 EXPORT_SYMBOL(hci_free_dev);
3918
3919 /* Register HCI device */
3920 int hci_register_dev(struct hci_dev *hdev)
3921 {
3922         int id, error;
3923
3924         if (!hdev->open || !hdev->close)
3925                 return -EINVAL;
3926
3927         /* Do not allow HCI_AMP devices to register at index 0,
3928          * so the index can be used as the AMP controller ID.
3929          */
3930         switch (hdev->dev_type) {
3931         case HCI_BREDR:
3932                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3933                 break;
3934         case HCI_AMP:
3935                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3936                 break;
3937         default:
3938                 return -EINVAL;
3939         }
3940
3941         if (id < 0)
3942                 return id;
3943
3944         sprintf(hdev->name, "hci%d", id);
3945         hdev->id = id;
3946
3947         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3948
3949         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3950                                           WQ_MEM_RECLAIM, 1, hdev->name);
3951         if (!hdev->workqueue) {
3952                 error = -ENOMEM;
3953                 goto err;
3954         }
3955
3956         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3957                                               WQ_MEM_RECLAIM, 1, hdev->name);
3958         if (!hdev->req_workqueue) {
3959                 destroy_workqueue(hdev->workqueue);
3960                 error = -ENOMEM;
3961                 goto err;
3962         }
3963
3964         if (!IS_ERR_OR_NULL(bt_debugfs))
3965                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3966
3967         dev_set_name(&hdev->dev, "%s", hdev->name);
3968
3969         hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3970                                                CRYPTO_ALG_ASYNC);
3971         if (IS_ERR(hdev->tfm_aes)) {
3972                 BT_ERR("Unable to create crypto context");
3973                 error = PTR_ERR(hdev->tfm_aes);
3974                 hdev->tfm_aes = NULL;
3975                 goto err_wqueue;
3976         }
3977
3978         error = device_add(&hdev->dev);
3979         if (error < 0)
3980                 goto err_tfm;
3981
3982         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3983                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3984                                     hdev);
3985         if (hdev->rfkill) {
3986                 if (rfkill_register(hdev->rfkill) < 0) {
3987                         rfkill_destroy(hdev->rfkill);
3988                         hdev->rfkill = NULL;
3989                 }
3990         }
3991
3992         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3993                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3994
3995         set_bit(HCI_SETUP, &hdev->dev_flags);
3996         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3997
3998         if (hdev->dev_type == HCI_BREDR) {
3999                 /* Assume BR/EDR support until proven otherwise (such as
4000                  * through reading supported features during init.
4001                  */
4002                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4003         }
4004
4005         write_lock(&hci_dev_list_lock);
4006         list_add(&hdev->list, &hci_dev_list);
4007         write_unlock(&hci_dev_list_lock);
4008
4009         /* Devices that are marked for raw-only usage are unconfigured
4010          * and should not be included in normal operation.
4011          */
4012         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4013                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4014
4015         hci_notify(hdev, HCI_DEV_REG);
4016         hci_dev_hold(hdev);
4017
4018         queue_work(hdev->req_workqueue, &hdev->power_on);
4019
4020         return id;
4021
4022 err_tfm:
4023         crypto_free_blkcipher(hdev->tfm_aes);
4024 err_wqueue:
4025         destroy_workqueue(hdev->workqueue);
4026         destroy_workqueue(hdev->req_workqueue);
4027 err:
4028         ida_simple_remove(&hci_index_ida, hdev->id);
4029
4030         return error;
4031 }
4032 EXPORT_SYMBOL(hci_register_dev);
4033
4034 /* Unregister HCI device */
4035 void hci_unregister_dev(struct hci_dev *hdev)
4036 {
4037         int i, id;
4038
4039         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4040
4041         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4042
4043         id = hdev->id;
4044
4045         write_lock(&hci_dev_list_lock);
4046         list_del(&hdev->list);
4047         write_unlock(&hci_dev_list_lock);
4048
4049         hci_dev_do_close(hdev);
4050
4051         for (i = 0; i < NUM_REASSEMBLY; i++)
4052                 kfree_skb(hdev->reassembly[i]);
4053
4054         cancel_work_sync(&hdev->power_on);
4055
4056         if (!test_bit(HCI_INIT, &hdev->flags) &&
4057             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4058             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4059                 hci_dev_lock(hdev);
4060                 mgmt_index_removed(hdev);
4061                 hci_dev_unlock(hdev);
4062         }
4063
4064         /* mgmt_index_removed should take care of emptying the
4065          * pending list */
4066         BUG_ON(!list_empty(&hdev->mgmt_pending));
4067
4068         hci_notify(hdev, HCI_DEV_UNREG);
4069
4070         if (hdev->rfkill) {
4071                 rfkill_unregister(hdev->rfkill);
4072                 rfkill_destroy(hdev->rfkill);
4073         }
4074
4075         if (hdev->tfm_aes)
4076                 crypto_free_blkcipher(hdev->tfm_aes);
4077
4078         device_del(&hdev->dev);
4079
4080         debugfs_remove_recursive(hdev->debugfs);
4081
4082         destroy_workqueue(hdev->workqueue);
4083         destroy_workqueue(hdev->req_workqueue);
4084
4085         hci_dev_lock(hdev);
4086         hci_blacklist_clear(hdev);
4087         hci_uuids_clear(hdev);
4088         hci_link_keys_clear(hdev);
4089         hci_smp_ltks_clear(hdev);
4090         hci_smp_irks_clear(hdev);
4091         hci_remote_oob_data_clear(hdev);
4092         hci_white_list_clear(hdev);
4093         hci_conn_params_clear_all(hdev);
4094         hci_dev_unlock(hdev);
4095
4096         hci_dev_put(hdev);
4097
4098         ida_simple_remove(&hci_index_ida, id);
4099 }
4100 EXPORT_SYMBOL(hci_unregister_dev);
4101
4102 /* Suspend HCI device */
4103 int hci_suspend_dev(struct hci_dev *hdev)
4104 {
4105         hci_notify(hdev, HCI_DEV_SUSPEND);
4106         return 0;
4107 }
4108 EXPORT_SYMBOL(hci_suspend_dev);
4109
4110 /* Resume HCI device */
4111 int hci_resume_dev(struct hci_dev *hdev)
4112 {
4113         hci_notify(hdev, HCI_DEV_RESUME);
4114         return 0;
4115 }
4116 EXPORT_SYMBOL(hci_resume_dev);
4117
4118 /* Receive frame from HCI drivers */
4119 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4120 {
4121         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4122                       && !test_bit(HCI_INIT, &hdev->flags))) {
4123                 kfree_skb(skb);
4124                 return -ENXIO;
4125         }
4126
4127         /* Incoming skb */
4128         bt_cb(skb)->incoming = 1;
4129
4130         /* Time stamp */
4131         __net_timestamp(skb);
4132
4133         skb_queue_tail(&hdev->rx_q, skb);
4134         queue_work(hdev->workqueue, &hdev->rx_work);
4135
4136         return 0;
4137 }
4138 EXPORT_SYMBOL(hci_recv_frame);
4139
4140 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4141                           int count, __u8 index)
4142 {
4143         int len = 0;
4144         int hlen = 0;
4145         int remain = count;
4146         struct sk_buff *skb;
4147         struct bt_skb_cb *scb;
4148
4149         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4150             index >= NUM_REASSEMBLY)
4151                 return -EILSEQ;
4152
4153         skb = hdev->reassembly[index];
4154
4155         if (!skb) {
4156                 switch (type) {
4157                 case HCI_ACLDATA_PKT:
4158                         len = HCI_MAX_FRAME_SIZE;
4159                         hlen = HCI_ACL_HDR_SIZE;
4160                         break;
4161                 case HCI_EVENT_PKT:
4162                         len = HCI_MAX_EVENT_SIZE;
4163                         hlen = HCI_EVENT_HDR_SIZE;
4164                         break;
4165                 case HCI_SCODATA_PKT:
4166                         len = HCI_MAX_SCO_SIZE;
4167                         hlen = HCI_SCO_HDR_SIZE;
4168                         break;
4169                 }
4170
4171                 skb = bt_skb_alloc(len, GFP_ATOMIC);
4172                 if (!skb)
4173                         return -ENOMEM;
4174
4175                 scb = (void *) skb->cb;
4176                 scb->expect = hlen;
4177                 scb->pkt_type = type;
4178
4179                 hdev->reassembly[index] = skb;
4180         }
4181
4182         while (count) {
4183                 scb = (void *) skb->cb;
4184                 len = min_t(uint, scb->expect, count);
4185
4186                 memcpy(skb_put(skb, len), data, len);
4187
4188                 count -= len;
4189                 data += len;
4190                 scb->expect -= len;
4191                 remain = count;
4192
4193                 switch (type) {
4194                 case HCI_EVENT_PKT:
4195                         if (skb->len == HCI_EVENT_HDR_SIZE) {
4196                                 struct hci_event_hdr *h = hci_event_hdr(skb);
4197                                 scb->expect = h->plen;
4198
4199                                 if (skb_tailroom(skb) < scb->expect) {
4200                                         kfree_skb(skb);
4201                                         hdev->reassembly[index] = NULL;
4202                                         return -ENOMEM;
4203                                 }
4204                         }
4205                         break;
4206
4207                 case HCI_ACLDATA_PKT:
4208                         if (skb->len  == HCI_ACL_HDR_SIZE) {
4209                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4210                                 scb->expect = __le16_to_cpu(h->dlen);
4211
4212                                 if (skb_tailroom(skb) < scb->expect) {
4213                                         kfree_skb(skb);
4214                                         hdev->reassembly[index] = NULL;
4215                                         return -ENOMEM;
4216                                 }
4217                         }
4218                         break;
4219
4220                 case HCI_SCODATA_PKT:
4221                         if (skb->len == HCI_SCO_HDR_SIZE) {
4222                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4223                                 scb->expect = h->dlen;
4224
4225                                 if (skb_tailroom(skb) < scb->expect) {
4226                                         kfree_skb(skb);
4227                                         hdev->reassembly[index] = NULL;
4228                                         return -ENOMEM;
4229                                 }
4230                         }
4231                         break;
4232                 }
4233
4234                 if (scb->expect == 0) {
4235                         /* Complete frame */
4236
4237                         bt_cb(skb)->pkt_type = type;
4238                         hci_recv_frame(hdev, skb);
4239
4240                         hdev->reassembly[index] = NULL;
4241                         return remain;
4242                 }
4243         }
4244
4245         return remain;
4246 }
4247
4248 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4249 {
4250         int rem = 0;
4251
4252         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4253                 return -EILSEQ;
4254
4255         while (count) {
4256                 rem = hci_reassembly(hdev, type, data, count, type - 1);
4257                 if (rem < 0)
4258                         return rem;
4259
4260                 data += (count - rem);
4261                 count = rem;
4262         }
4263
4264         return rem;
4265 }
4266 EXPORT_SYMBOL(hci_recv_fragment);
4267
4268 #define STREAM_REASSEMBLY 0
4269
4270 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4271 {
4272         int type;
4273         int rem = 0;
4274
4275         while (count) {
4276                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4277
4278                 if (!skb) {
4279                         struct { char type; } *pkt;
4280
4281                         /* Start of the frame */
4282                         pkt = data;
4283                         type = pkt->type;
4284
4285                         data++;
4286                         count--;
4287                 } else
4288                         type = bt_cb(skb)->pkt_type;
4289
4290                 rem = hci_reassembly(hdev, type, data, count,
4291                                      STREAM_REASSEMBLY);
4292                 if (rem < 0)
4293                         return rem;
4294
4295                 data += (count - rem);
4296                 count = rem;
4297         }
4298
4299         return rem;
4300 }
4301 EXPORT_SYMBOL(hci_recv_stream_fragment);
4302
4303 /* ---- Interface to upper protocols ---- */
4304
4305 int hci_register_cb(struct hci_cb *cb)
4306 {
4307         BT_DBG("%p name %s", cb, cb->name);
4308
4309         write_lock(&hci_cb_list_lock);
4310         list_add(&cb->list, &hci_cb_list);
4311         write_unlock(&hci_cb_list_lock);
4312
4313         return 0;
4314 }
4315 EXPORT_SYMBOL(hci_register_cb);
4316
4317 int hci_unregister_cb(struct hci_cb *cb)
4318 {
4319         BT_DBG("%p name %s", cb, cb->name);
4320
4321         write_lock(&hci_cb_list_lock);
4322         list_del(&cb->list);
4323         write_unlock(&hci_cb_list_lock);
4324
4325         return 0;
4326 }
4327 EXPORT_SYMBOL(hci_unregister_cb);
4328
4329 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4330 {
4331         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4332
4333         /* Time stamp */
4334         __net_timestamp(skb);
4335
4336         /* Send copy to monitor */
4337         hci_send_to_monitor(hdev, skb);
4338
4339         if (atomic_read(&hdev->promisc)) {
4340                 /* Send copy to the sockets */
4341                 hci_send_to_sock(hdev, skb);
4342         }
4343
4344         /* Get rid of skb owner, prior to sending to the driver. */
4345         skb_orphan(skb);
4346
4347         if (hdev->send(hdev, skb) < 0)
4348                 BT_ERR("%s sending frame failed", hdev->name);
4349 }
4350
4351 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4352 {
4353         skb_queue_head_init(&req->cmd_q);
4354         req->hdev = hdev;
4355         req->err = 0;
4356 }
4357
4358 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4359 {
4360         struct hci_dev *hdev = req->hdev;
4361         struct sk_buff *skb;
4362         unsigned long flags;
4363
4364         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4365
4366         /* If an error occured during request building, remove all HCI
4367          * commands queued on the HCI request queue.
4368          */
4369         if (req->err) {
4370                 skb_queue_purge(&req->cmd_q);
4371                 return req->err;
4372         }
4373
4374         /* Do not allow empty requests */
4375         if (skb_queue_empty(&req->cmd_q))
4376                 return -ENODATA;
4377
4378         skb = skb_peek_tail(&req->cmd_q);
4379         bt_cb(skb)->req.complete = complete;
4380
4381         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4382         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4383         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4384
4385         queue_work(hdev->workqueue, &hdev->cmd_work);
4386
4387         return 0;
4388 }
4389
4390 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4391                                        u32 plen, const void *param)
4392 {
4393         int len = HCI_COMMAND_HDR_SIZE + plen;
4394         struct hci_command_hdr *hdr;
4395         struct sk_buff *skb;
4396
4397         skb = bt_skb_alloc(len, GFP_ATOMIC);
4398         if (!skb)
4399                 return NULL;
4400
4401         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4402         hdr->opcode = cpu_to_le16(opcode);
4403         hdr->plen   = plen;
4404
4405         if (plen)
4406                 memcpy(skb_put(skb, plen), param, plen);
4407
4408         BT_DBG("skb len %d", skb->len);
4409
4410         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4411
4412         return skb;
4413 }
4414
4415 /* Send HCI command */
4416 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4417                  const void *param)
4418 {
4419         struct sk_buff *skb;
4420
4421         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4422
4423         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4424         if (!skb) {
4425                 BT_ERR("%s no memory for command", hdev->name);
4426                 return -ENOMEM;
4427         }
4428
4429         /* Stand-alone HCI commands must be flaged as
4430          * single-command requests.
4431          */
4432         bt_cb(skb)->req.start = true;
4433
4434         skb_queue_tail(&hdev->cmd_q, skb);
4435         queue_work(hdev->workqueue, &hdev->cmd_work);
4436
4437         return 0;
4438 }
4439
4440 /* Queue a command to an asynchronous HCI request */
4441 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4442                     const void *param, u8 event)
4443 {
4444         struct hci_dev *hdev = req->hdev;
4445         struct sk_buff *skb;
4446
4447         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4448
4449         /* If an error occured during request building, there is no point in
4450          * queueing the HCI command. We can simply return.
4451          */
4452         if (req->err)
4453                 return;
4454
4455         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4456         if (!skb) {
4457                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4458                        hdev->name, opcode);
4459                 req->err = -ENOMEM;
4460                 return;
4461         }
4462
4463         if (skb_queue_empty(&req->cmd_q))
4464                 bt_cb(skb)->req.start = true;
4465
4466         bt_cb(skb)->req.event = event;
4467
4468         skb_queue_tail(&req->cmd_q, skb);
4469 }
4470
4471 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4472                  const void *param)
4473 {
4474         hci_req_add_ev(req, opcode, plen, param, 0);
4475 }
4476
4477 /* Get data from the previously sent command */
4478 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4479 {
4480         struct hci_command_hdr *hdr;
4481
4482         if (!hdev->sent_cmd)
4483                 return NULL;
4484
4485         hdr = (void *) hdev->sent_cmd->data;
4486
4487         if (hdr->opcode != cpu_to_le16(opcode))
4488                 return NULL;
4489
4490         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4491
4492         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4493 }
4494
4495 /* Send ACL data */
4496 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4497 {
4498         struct hci_acl_hdr *hdr;
4499         int len = skb->len;
4500
4501         skb_push(skb, HCI_ACL_HDR_SIZE);
4502         skb_reset_transport_header(skb);
4503         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4504         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4505         hdr->dlen   = cpu_to_le16(len);
4506 }
4507
4508 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4509                           struct sk_buff *skb, __u16 flags)
4510 {
4511         struct hci_conn *conn = chan->conn;
4512         struct hci_dev *hdev = conn->hdev;
4513         struct sk_buff *list;
4514
4515         skb->len = skb_headlen(skb);
4516         skb->data_len = 0;
4517
4518         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4519
4520         switch (hdev->dev_type) {
4521         case HCI_BREDR:
4522                 hci_add_acl_hdr(skb, conn->handle, flags);
4523                 break;
4524         case HCI_AMP:
4525                 hci_add_acl_hdr(skb, chan->handle, flags);
4526                 break;
4527         default:
4528                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4529                 return;
4530         }
4531
4532         list = skb_shinfo(skb)->frag_list;
4533         if (!list) {
4534                 /* Non fragmented */
4535                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4536
4537                 skb_queue_tail(queue, skb);
4538         } else {
4539                 /* Fragmented */
4540                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4541
4542                 skb_shinfo(skb)->frag_list = NULL;
4543
4544                 /* Queue all fragments atomically */
4545                 spin_lock(&queue->lock);
4546
4547                 __skb_queue_tail(queue, skb);
4548
4549                 flags &= ~ACL_START;
4550                 flags |= ACL_CONT;
4551                 do {
4552                         skb = list; list = list->next;
4553
4554                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4555                         hci_add_acl_hdr(skb, conn->handle, flags);
4556
4557                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4558
4559                         __skb_queue_tail(queue, skb);
4560                 } while (list);
4561
4562                 spin_unlock(&queue->lock);
4563         }
4564 }
4565
4566 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4567 {
4568         struct hci_dev *hdev = chan->conn->hdev;
4569
4570         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4571
4572         hci_queue_acl(chan, &chan->data_q, skb, flags);
4573
4574         queue_work(hdev->workqueue, &hdev->tx_work);
4575 }
4576
4577 /* Send SCO data */
4578 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4579 {
4580         struct hci_dev *hdev = conn->hdev;
4581         struct hci_sco_hdr hdr;
4582
4583         BT_DBG("%s len %d", hdev->name, skb->len);
4584
4585         hdr.handle = cpu_to_le16(conn->handle);
4586         hdr.dlen   = skb->len;
4587
4588         skb_push(skb, HCI_SCO_HDR_SIZE);
4589         skb_reset_transport_header(skb);
4590         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4591
4592         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4593
4594         skb_queue_tail(&conn->data_q, skb);
4595         queue_work(hdev->workqueue, &hdev->tx_work);
4596 }
4597
4598 /* ---- HCI TX task (outgoing data) ---- */
4599
4600 /* HCI Connection scheduler */
4601 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4602                                      int *quote)
4603 {
4604         struct hci_conn_hash *h = &hdev->conn_hash;
4605         struct hci_conn *conn = NULL, *c;
4606         unsigned int num = 0, min = ~0;
4607
4608         /* We don't have to lock device here. Connections are always
4609          * added and removed with TX task disabled. */
4610
4611         rcu_read_lock();
4612
4613         list_for_each_entry_rcu(c, &h->list, list) {
4614                 if (c->type != type || skb_queue_empty(&c->data_q))
4615                         continue;
4616
4617                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4618                         continue;
4619
4620                 num++;
4621
4622                 if (c->sent < min) {
4623                         min  = c->sent;
4624                         conn = c;
4625                 }
4626
4627                 if (hci_conn_num(hdev, type) == num)
4628                         break;
4629         }
4630
4631         rcu_read_unlock();
4632
4633         if (conn) {
4634                 int cnt, q;
4635
4636                 switch (conn->type) {
4637                 case ACL_LINK:
4638                         cnt = hdev->acl_cnt;
4639                         break;
4640                 case SCO_LINK:
4641                 case ESCO_LINK:
4642                         cnt = hdev->sco_cnt;
4643                         break;
4644                 case LE_LINK:
4645                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4646                         break;
4647                 default:
4648                         cnt = 0;
4649                         BT_ERR("Unknown link type");
4650                 }
4651
4652                 q = cnt / num;
4653                 *quote = q ? q : 1;
4654         } else
4655                 *quote = 0;
4656
4657         BT_DBG("conn %p quote %d", conn, *quote);
4658         return conn;
4659 }
4660
4661 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4662 {
4663         struct hci_conn_hash *h = &hdev->conn_hash;
4664         struct hci_conn *c;
4665
4666         BT_ERR("%s link tx timeout", hdev->name);
4667
4668         rcu_read_lock();
4669
4670         /* Kill stalled connections */
4671         list_for_each_entry_rcu(c, &h->list, list) {
4672                 if (c->type == type && c->sent) {
4673                         BT_ERR("%s killing stalled connection %pMR",
4674                                hdev->name, &c->dst);
4675                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4676                 }
4677         }
4678
4679         rcu_read_unlock();
4680 }
4681
4682 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4683                                       int *quote)
4684 {
4685         struct hci_conn_hash *h = &hdev->conn_hash;
4686         struct hci_chan *chan = NULL;
4687         unsigned int num = 0, min = ~0, cur_prio = 0;
4688         struct hci_conn *conn;
4689         int cnt, q, conn_num = 0;
4690
4691         BT_DBG("%s", hdev->name);
4692
4693         rcu_read_lock();
4694
4695         list_for_each_entry_rcu(conn, &h->list, list) {
4696                 struct hci_chan *tmp;
4697
4698                 if (conn->type != type)
4699                         continue;
4700
4701                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4702                         continue;
4703
4704                 conn_num++;
4705
4706                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4707                         struct sk_buff *skb;
4708
4709                         if (skb_queue_empty(&tmp->data_q))
4710                                 continue;
4711
4712                         skb = skb_peek(&tmp->data_q);
4713                         if (skb->priority < cur_prio)
4714                                 continue;
4715
4716                         if (skb->priority > cur_prio) {
4717                                 num = 0;
4718                                 min = ~0;
4719                                 cur_prio = skb->priority;
4720                         }
4721
4722                         num++;
4723
4724                         if (conn->sent < min) {
4725                                 min  = conn->sent;
4726                                 chan = tmp;
4727                         }
4728                 }
4729
4730                 if (hci_conn_num(hdev, type) == conn_num)
4731                         break;
4732         }
4733
4734         rcu_read_unlock();
4735
4736         if (!chan)
4737                 return NULL;
4738
4739         switch (chan->conn->type) {
4740         case ACL_LINK:
4741                 cnt = hdev->acl_cnt;
4742                 break;
4743         case AMP_LINK:
4744                 cnt = hdev->block_cnt;
4745                 break;
4746         case SCO_LINK:
4747         case ESCO_LINK:
4748                 cnt = hdev->sco_cnt;
4749                 break;
4750         case LE_LINK:
4751                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4752                 break;
4753         default:
4754                 cnt = 0;
4755                 BT_ERR("Unknown link type");
4756         }
4757
4758         q = cnt / num;
4759         *quote = q ? q : 1;
4760         BT_DBG("chan %p quote %d", chan, *quote);
4761         return chan;
4762 }
4763
4764 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4765 {
4766         struct hci_conn_hash *h = &hdev->conn_hash;
4767         struct hci_conn *conn;
4768         int num = 0;
4769
4770         BT_DBG("%s", hdev->name);
4771
4772         rcu_read_lock();
4773
4774         list_for_each_entry_rcu(conn, &h->list, list) {
4775                 struct hci_chan *chan;
4776
4777                 if (conn->type != type)
4778                         continue;
4779
4780                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4781                         continue;
4782
4783                 num++;
4784
4785                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4786                         struct sk_buff *skb;
4787
4788                         if (chan->sent) {
4789                                 chan->sent = 0;
4790                                 continue;
4791                         }
4792
4793                         if (skb_queue_empty(&chan->data_q))
4794                                 continue;
4795
4796                         skb = skb_peek(&chan->data_q);
4797                         if (skb->priority >= HCI_PRIO_MAX - 1)
4798                                 continue;
4799
4800                         skb->priority = HCI_PRIO_MAX - 1;
4801
4802                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4803                                skb->priority);
4804                 }
4805
4806                 if (hci_conn_num(hdev, type) == num)
4807                         break;
4808         }
4809
4810         rcu_read_unlock();
4811
4812 }
4813
4814 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4815 {
4816         /* Calculate count of blocks used by this packet */
4817         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4818 }
4819
4820 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4821 {
4822         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4823                 /* ACL tx timeout must be longer than maximum
4824                  * link supervision timeout (40.9 seconds) */
4825                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4826                                        HCI_ACL_TX_TIMEOUT))
4827                         hci_link_tx_to(hdev, ACL_LINK);
4828         }
4829 }
4830
4831 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4832 {
4833         unsigned int cnt = hdev->acl_cnt;
4834         struct hci_chan *chan;
4835         struct sk_buff *skb;
4836         int quote;
4837
4838         __check_timeout(hdev, cnt);
4839
4840         while (hdev->acl_cnt &&
4841                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4842                 u32 priority = (skb_peek(&chan->data_q))->priority;
4843                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4844                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4845                                skb->len, skb->priority);
4846
4847                         /* Stop if priority has changed */
4848                         if (skb->priority < priority)
4849                                 break;
4850
4851                         skb = skb_dequeue(&chan->data_q);
4852
4853                         hci_conn_enter_active_mode(chan->conn,
4854                                                    bt_cb(skb)->force_active);
4855
4856                         hci_send_frame(hdev, skb);
4857                         hdev->acl_last_tx = jiffies;
4858
4859                         hdev->acl_cnt--;
4860                         chan->sent++;
4861                         chan->conn->sent++;
4862                 }
4863         }
4864
4865         if (cnt != hdev->acl_cnt)
4866                 hci_prio_recalculate(hdev, ACL_LINK);
4867 }
4868
4869 static void hci_sched_acl_blk(struct hci_dev *hdev)
4870 {
4871         unsigned int cnt = hdev->block_cnt;
4872         struct hci_chan *chan;
4873         struct sk_buff *skb;
4874         int quote;
4875         u8 type;
4876
4877         __check_timeout(hdev, cnt);
4878
4879         BT_DBG("%s", hdev->name);
4880
4881         if (hdev->dev_type == HCI_AMP)
4882                 type = AMP_LINK;
4883         else
4884                 type = ACL_LINK;
4885
4886         while (hdev->block_cnt > 0 &&
4887                (chan = hci_chan_sent(hdev, type, &quote))) {
4888                 u32 priority = (skb_peek(&chan->data_q))->priority;
4889                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4890                         int blocks;
4891
4892                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4893                                skb->len, skb->priority);
4894
4895                         /* Stop if priority has changed */
4896                         if (skb->priority < priority)
4897                                 break;
4898
4899                         skb = skb_dequeue(&chan->data_q);
4900
4901                         blocks = __get_blocks(hdev, skb);
4902                         if (blocks > hdev->block_cnt)
4903                                 return;
4904
4905                         hci_conn_enter_active_mode(chan->conn,
4906                                                    bt_cb(skb)->force_active);
4907
4908                         hci_send_frame(hdev, skb);
4909                         hdev->acl_last_tx = jiffies;
4910
4911                         hdev->block_cnt -= blocks;
4912                         quote -= blocks;
4913
4914                         chan->sent += blocks;
4915                         chan->conn->sent += blocks;
4916                 }
4917         }
4918
4919         if (cnt != hdev->block_cnt)
4920                 hci_prio_recalculate(hdev, type);
4921 }
4922
4923 static void hci_sched_acl(struct hci_dev *hdev)
4924 {
4925         BT_DBG("%s", hdev->name);
4926
4927         /* No ACL link over BR/EDR controller */
4928         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4929                 return;
4930
4931         /* No AMP link over AMP controller */
4932         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4933                 return;
4934
4935         switch (hdev->flow_ctl_mode) {
4936         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4937                 hci_sched_acl_pkt(hdev);
4938                 break;
4939
4940         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4941                 hci_sched_acl_blk(hdev);
4942                 break;
4943         }
4944 }
4945
4946 /* Schedule SCO */
4947 static void hci_sched_sco(struct hci_dev *hdev)
4948 {
4949         struct hci_conn *conn;
4950         struct sk_buff *skb;
4951         int quote;
4952
4953         BT_DBG("%s", hdev->name);
4954
4955         if (!hci_conn_num(hdev, SCO_LINK))
4956                 return;
4957
4958         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4959                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4960                         BT_DBG("skb %p len %d", skb, skb->len);
4961                         hci_send_frame(hdev, skb);
4962
4963                         conn->sent++;
4964                         if (conn->sent == ~0)
4965                                 conn->sent = 0;
4966                 }
4967         }
4968 }
4969
4970 static void hci_sched_esco(struct hci_dev *hdev)
4971 {
4972         struct hci_conn *conn;
4973         struct sk_buff *skb;
4974         int quote;
4975
4976         BT_DBG("%s", hdev->name);
4977
4978         if (!hci_conn_num(hdev, ESCO_LINK))
4979                 return;
4980
4981         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4982                                                      &quote))) {
4983                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4984                         BT_DBG("skb %p len %d", skb, skb->len);
4985                         hci_send_frame(hdev, skb);
4986
4987                         conn->sent++;
4988                         if (conn->sent == ~0)
4989                                 conn->sent = 0;
4990                 }
4991         }
4992 }
4993
4994 static void hci_sched_le(struct hci_dev *hdev)
4995 {
4996         struct hci_chan *chan;
4997         struct sk_buff *skb;
4998         int quote, cnt, tmp;
4999
5000         BT_DBG("%s", hdev->name);
5001
5002         if (!hci_conn_num(hdev, LE_LINK))
5003                 return;
5004
5005         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5006                 /* LE tx timeout must be longer than maximum
5007                  * link supervision timeout (40.9 seconds) */
5008                 if (!hdev->le_cnt && hdev->le_pkts &&
5009                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
5010                         hci_link_tx_to(hdev, LE_LINK);
5011         }
5012
5013         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5014         tmp = cnt;
5015         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
5016                 u32 priority = (skb_peek(&chan->data_q))->priority;
5017                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5018                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5019                                skb->len, skb->priority);
5020
5021                         /* Stop if priority has changed */
5022                         if (skb->priority < priority)
5023                                 break;
5024
5025                         skb = skb_dequeue(&chan->data_q);
5026
5027                         hci_send_frame(hdev, skb);
5028                         hdev->le_last_tx = jiffies;
5029
5030                         cnt--;
5031                         chan->sent++;
5032                         chan->conn->sent++;
5033                 }
5034         }
5035
5036         if (hdev->le_pkts)
5037                 hdev->le_cnt = cnt;
5038         else
5039                 hdev->acl_cnt = cnt;
5040
5041         if (cnt != tmp)
5042                 hci_prio_recalculate(hdev, LE_LINK);
5043 }
5044
5045 static void hci_tx_work(struct work_struct *work)
5046 {
5047         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5048         struct sk_buff *skb;
5049
5050         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5051                hdev->sco_cnt, hdev->le_cnt);
5052
5053         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5054                 /* Schedule queues and send stuff to HCI driver */
5055                 hci_sched_acl(hdev);
5056                 hci_sched_sco(hdev);
5057                 hci_sched_esco(hdev);
5058                 hci_sched_le(hdev);
5059         }
5060
5061         /* Send next queued raw (unknown type) packet */
5062         while ((skb = skb_dequeue(&hdev->raw_q)))
5063                 hci_send_frame(hdev, skb);
5064 }
5065
5066 /* ----- HCI RX task (incoming data processing) ----- */
5067
5068 /* ACL data packet */
5069 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5070 {
5071         struct hci_acl_hdr *hdr = (void *) skb->data;
5072         struct hci_conn *conn;
5073         __u16 handle, flags;
5074
5075         skb_pull(skb, HCI_ACL_HDR_SIZE);
5076
5077         handle = __le16_to_cpu(hdr->handle);
5078         flags  = hci_flags(handle);
5079         handle = hci_handle(handle);
5080
5081         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5082                handle, flags);
5083
5084         hdev->stat.acl_rx++;
5085
5086         hci_dev_lock(hdev);
5087         conn = hci_conn_hash_lookup_handle(hdev, handle);
5088         hci_dev_unlock(hdev);
5089
5090         if (conn) {
5091                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5092
5093                 /* Send to upper protocol */
5094                 l2cap_recv_acldata(conn, skb, flags);
5095                 return;
5096         } else {
5097                 BT_ERR("%s ACL packet for unknown connection handle %d",
5098                        hdev->name, handle);
5099         }
5100
5101         kfree_skb(skb);
5102 }
5103
5104 /* SCO data packet */
5105 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5106 {
5107         struct hci_sco_hdr *hdr = (void *) skb->data;
5108         struct hci_conn *conn;
5109         __u16 handle;
5110
5111         skb_pull(skb, HCI_SCO_HDR_SIZE);
5112
5113         handle = __le16_to_cpu(hdr->handle);
5114
5115         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5116
5117         hdev->stat.sco_rx++;
5118
5119         hci_dev_lock(hdev);
5120         conn = hci_conn_hash_lookup_handle(hdev, handle);
5121         hci_dev_unlock(hdev);
5122
5123         if (conn) {
5124                 /* Send to upper protocol */
5125                 sco_recv_scodata(conn, skb);
5126                 return;
5127         } else {
5128                 BT_ERR("%s SCO packet for unknown connection handle %d",
5129                        hdev->name, handle);
5130         }
5131
5132         kfree_skb(skb);
5133 }
5134
5135 static bool hci_req_is_complete(struct hci_dev *hdev)
5136 {
5137         struct sk_buff *skb;
5138
5139         skb = skb_peek(&hdev->cmd_q);
5140         if (!skb)
5141                 return true;
5142
5143         return bt_cb(skb)->req.start;
5144 }
5145
5146 static void hci_resend_last(struct hci_dev *hdev)
5147 {
5148         struct hci_command_hdr *sent;
5149         struct sk_buff *skb;
5150         u16 opcode;
5151
5152         if (!hdev->sent_cmd)
5153                 return;
5154
5155         sent = (void *) hdev->sent_cmd->data;
5156         opcode = __le16_to_cpu(sent->opcode);
5157         if (opcode == HCI_OP_RESET)
5158                 return;
5159
5160         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5161         if (!skb)
5162                 return;
5163
5164         skb_queue_head(&hdev->cmd_q, skb);
5165         queue_work(hdev->workqueue, &hdev->cmd_work);
5166 }
5167
5168 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5169 {
5170         hci_req_complete_t req_complete = NULL;
5171         struct sk_buff *skb;
5172         unsigned long flags;
5173
5174         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5175
5176         /* If the completed command doesn't match the last one that was
5177          * sent we need to do special handling of it.
5178          */
5179         if (!hci_sent_cmd_data(hdev, opcode)) {
5180                 /* Some CSR based controllers generate a spontaneous
5181                  * reset complete event during init and any pending
5182                  * command will never be completed. In such a case we
5183                  * need to resend whatever was the last sent
5184                  * command.
5185                  */
5186                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5187                         hci_resend_last(hdev);
5188
5189                 return;
5190         }
5191
5192         /* If the command succeeded and there's still more commands in
5193          * this request the request is not yet complete.
5194          */
5195         if (!status && !hci_req_is_complete(hdev))
5196                 return;
5197
5198         /* If this was the last command in a request the complete
5199          * callback would be found in hdev->sent_cmd instead of the
5200          * command queue (hdev->cmd_q).
5201          */
5202         if (hdev->sent_cmd) {
5203                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5204
5205                 if (req_complete) {
5206                         /* We must set the complete callback to NULL to
5207                          * avoid calling the callback more than once if
5208                          * this function gets called again.
5209                          */
5210                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
5211
5212                         goto call_complete;
5213                 }
5214         }
5215
5216         /* Remove all pending commands belonging to this request */
5217         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5218         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5219                 if (bt_cb(skb)->req.start) {
5220                         __skb_queue_head(&hdev->cmd_q, skb);
5221                         break;
5222                 }
5223
5224                 req_complete = bt_cb(skb)->req.complete;
5225                 kfree_skb(skb);
5226         }
5227         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5228
5229 call_complete:
5230         if (req_complete)
5231                 req_complete(hdev, status);
5232 }
5233
5234 static void hci_rx_work(struct work_struct *work)
5235 {
5236         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5237         struct sk_buff *skb;
5238
5239         BT_DBG("%s", hdev->name);
5240
5241         while ((skb = skb_dequeue(&hdev->rx_q))) {
5242                 /* Send copy to monitor */
5243                 hci_send_to_monitor(hdev, skb);
5244
5245                 if (atomic_read(&hdev->promisc)) {
5246                         /* Send copy to the sockets */
5247                         hci_send_to_sock(hdev, skb);
5248                 }
5249
5250                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5251                         kfree_skb(skb);
5252                         continue;
5253                 }
5254
5255                 if (test_bit(HCI_INIT, &hdev->flags)) {
5256                         /* Don't process data packets in this states. */
5257                         switch (bt_cb(skb)->pkt_type) {
5258                         case HCI_ACLDATA_PKT:
5259                         case HCI_SCODATA_PKT:
5260                                 kfree_skb(skb);
5261                                 continue;
5262                         }
5263                 }
5264
5265                 /* Process frame */
5266                 switch (bt_cb(skb)->pkt_type) {
5267                 case HCI_EVENT_PKT:
5268                         BT_DBG("%s Event packet", hdev->name);
5269                         hci_event_packet(hdev, skb);
5270                         break;
5271
5272                 case HCI_ACLDATA_PKT:
5273                         BT_DBG("%s ACL data packet", hdev->name);
5274                         hci_acldata_packet(hdev, skb);
5275                         break;
5276
5277                 case HCI_SCODATA_PKT:
5278                         BT_DBG("%s SCO data packet", hdev->name);
5279                         hci_scodata_packet(hdev, skb);
5280                         break;
5281
5282                 default:
5283                         kfree_skb(skb);
5284                         break;
5285                 }
5286         }
5287 }
5288
5289 static void hci_cmd_work(struct work_struct *work)
5290 {
5291         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5292         struct sk_buff *skb;
5293
5294         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5295                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5296
5297         /* Send queued commands */
5298         if (atomic_read(&hdev->cmd_cnt)) {
5299                 skb = skb_dequeue(&hdev->cmd_q);
5300                 if (!skb)
5301                         return;
5302
5303                 kfree_skb(hdev->sent_cmd);
5304
5305                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5306                 if (hdev->sent_cmd) {
5307                         atomic_dec(&hdev->cmd_cnt);
5308                         hci_send_frame(hdev, skb);
5309                         if (test_bit(HCI_RESET, &hdev->flags))
5310                                 cancel_delayed_work(&hdev->cmd_timer);
5311                         else
5312                                 schedule_delayed_work(&hdev->cmd_timer,
5313                                                       HCI_CMD_TIMEOUT);
5314                 } else {
5315                         skb_queue_head(&hdev->cmd_q, skb);
5316                         queue_work(hdev->workqueue, &hdev->cmd_work);
5317                 }
5318         }
5319 }
5320
5321 void hci_req_add_le_scan_disable(struct hci_request *req)
5322 {
5323         struct hci_cp_le_set_scan_enable cp;
5324
5325         memset(&cp, 0, sizeof(cp));
5326         cp.enable = LE_SCAN_DISABLE;
5327         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5328 }
5329
5330 void hci_req_add_le_passive_scan(struct hci_request *req)
5331 {
5332         struct hci_cp_le_set_scan_param param_cp;
5333         struct hci_cp_le_set_scan_enable enable_cp;
5334         struct hci_dev *hdev = req->hdev;
5335         u8 own_addr_type;
5336
5337         /* Set require_privacy to false since no SCAN_REQ are send
5338          * during passive scanning. Not using an unresolvable address
5339          * here is important so that peer devices using direct
5340          * advertising with our address will be correctly reported
5341          * by the controller.
5342          */
5343         if (hci_update_random_address(req, false, &own_addr_type))
5344                 return;
5345
5346         memset(&param_cp, 0, sizeof(param_cp));
5347         param_cp.type = LE_SCAN_PASSIVE;
5348         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5349         param_cp.window = cpu_to_le16(hdev->le_scan_window);
5350         param_cp.own_address_type = own_addr_type;
5351         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5352                     &param_cp);
5353
5354         memset(&enable_cp, 0, sizeof(enable_cp));
5355         enable_cp.enable = LE_SCAN_ENABLE;
5356         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5357         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5358                     &enable_cp);
5359 }
5360
5361 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5362 {
5363         if (status)
5364                 BT_DBG("HCI request failed to update background scanning: "
5365                        "status 0x%2.2x", status);
5366 }
5367
5368 /* This function controls the background scanning based on hdev->pend_le_conns
5369  * list. If there are pending LE connection we start the background scanning,
5370  * otherwise we stop it.
5371  *
5372  * This function requires the caller holds hdev->lock.
5373  */
5374 void hci_update_background_scan(struct hci_dev *hdev)
5375 {
5376         struct hci_request req;
5377         struct hci_conn *conn;
5378         int err;
5379
5380         if (!test_bit(HCI_UP, &hdev->flags) ||
5381             test_bit(HCI_INIT, &hdev->flags) ||
5382             test_bit(HCI_SETUP, &hdev->dev_flags) ||
5383             test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5384             test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5385             test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5386                 return;
5387
5388         hci_req_init(&req, hdev);
5389
5390         if (list_empty(&hdev->pend_le_conns) &&
5391             list_empty(&hdev->pend_le_reports)) {
5392                 /* If there is no pending LE connections or devices
5393                  * to be scanned for, we should stop the background
5394                  * scanning.
5395                  */
5396
5397                 /* If controller is not scanning we are done. */
5398                 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5399                         return;
5400
5401                 hci_req_add_le_scan_disable(&req);
5402
5403                 BT_DBG("%s stopping background scanning", hdev->name);
5404         } else {
5405                 /* If there is at least one pending LE connection, we should
5406                  * keep the background scan running.
5407                  */
5408
5409                 /* If controller is connecting, we should not start scanning
5410                  * since some controllers are not able to scan and connect at
5411                  * the same time.
5412                  */
5413                 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5414                 if (conn)
5415                         return;
5416
5417                 /* If controller is currently scanning, we stop it to ensure we
5418                  * don't miss any advertising (due to duplicates filter).
5419                  */
5420                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5421                         hci_req_add_le_scan_disable(&req);
5422
5423                 hci_req_add_le_passive_scan(&req);
5424
5425                 BT_DBG("%s starting background scanning", hdev->name);
5426         }
5427
5428         err = hci_req_run(&req, update_background_scan_complete);
5429         if (err)
5430                 BT_ERR("Failed to run HCI request: err %d", err);
5431 }