Bluetooth: Update discovery state earlier in hci_discovery_set_state
[cascardo/linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "smp.h"
41
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
45
46 /* HCI device list */
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
49
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
53
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
56
57 /* ---- HCI notifications ---- */
58
59 static void hci_notify(struct hci_dev *hdev, int event)
60 {
61         hci_sock_dev_event(hdev, event);
62 }
63
64 /* ---- HCI debugfs entries ---- */
65
66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67                              size_t count, loff_t *ppos)
68 {
69         struct hci_dev *hdev = file->private_data;
70         char buf[3];
71
72         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
73         buf[1] = '\n';
74         buf[2] = '\0';
75         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76 }
77
78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79                               size_t count, loff_t *ppos)
80 {
81         struct hci_dev *hdev = file->private_data;
82         struct sk_buff *skb;
83         char buf[32];
84         size_t buf_size = min(count, (sizeof(buf)-1));
85         bool enable;
86         int err;
87
88         if (!test_bit(HCI_UP, &hdev->flags))
89                 return -ENETDOWN;
90
91         if (copy_from_user(buf, user_buf, buf_size))
92                 return -EFAULT;
93
94         buf[buf_size] = '\0';
95         if (strtobool(buf, &enable))
96                 return -EINVAL;
97
98         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
99                 return -EALREADY;
100
101         hci_req_lock(hdev);
102         if (enable)
103                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
104                                      HCI_CMD_TIMEOUT);
105         else
106                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
107                                      HCI_CMD_TIMEOUT);
108         hci_req_unlock(hdev);
109
110         if (IS_ERR(skb))
111                 return PTR_ERR(skb);
112
113         err = -bt_to_errno(skb->data[0]);
114         kfree_skb(skb);
115
116         if (err < 0)
117                 return err;
118
119         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
120
121         return count;
122 }
123
124 static const struct file_operations dut_mode_fops = {
125         .open           = simple_open,
126         .read           = dut_mode_read,
127         .write          = dut_mode_write,
128         .llseek         = default_llseek,
129 };
130
131 static int features_show(struct seq_file *f, void *ptr)
132 {
133         struct hci_dev *hdev = f->private;
134         u8 p;
135
136         hci_dev_lock(hdev);
137         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
138                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
139                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140                            hdev->features[p][0], hdev->features[p][1],
141                            hdev->features[p][2], hdev->features[p][3],
142                            hdev->features[p][4], hdev->features[p][5],
143                            hdev->features[p][6], hdev->features[p][7]);
144         }
145         if (lmp_le_capable(hdev))
146                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148                            hdev->le_features[0], hdev->le_features[1],
149                            hdev->le_features[2], hdev->le_features[3],
150                            hdev->le_features[4], hdev->le_features[5],
151                            hdev->le_features[6], hdev->le_features[7]);
152         hci_dev_unlock(hdev);
153
154         return 0;
155 }
156
157 static int features_open(struct inode *inode, struct file *file)
158 {
159         return single_open(file, features_show, inode->i_private);
160 }
161
162 static const struct file_operations features_fops = {
163         .open           = features_open,
164         .read           = seq_read,
165         .llseek         = seq_lseek,
166         .release        = single_release,
167 };
168
169 static int blacklist_show(struct seq_file *f, void *p)
170 {
171         struct hci_dev *hdev = f->private;
172         struct bdaddr_list *b;
173
174         hci_dev_lock(hdev);
175         list_for_each_entry(b, &hdev->blacklist, list)
176                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
177         hci_dev_unlock(hdev);
178
179         return 0;
180 }
181
182 static int blacklist_open(struct inode *inode, struct file *file)
183 {
184         return single_open(file, blacklist_show, inode->i_private);
185 }
186
187 static const struct file_operations blacklist_fops = {
188         .open           = blacklist_open,
189         .read           = seq_read,
190         .llseek         = seq_lseek,
191         .release        = single_release,
192 };
193
194 static int uuids_show(struct seq_file *f, void *p)
195 {
196         struct hci_dev *hdev = f->private;
197         struct bt_uuid *uuid;
198
199         hci_dev_lock(hdev);
200         list_for_each_entry(uuid, &hdev->uuids, list) {
201                 u8 i, val[16];
202
203                 /* The Bluetooth UUID values are stored in big endian,
204                  * but with reversed byte order. So convert them into
205                  * the right order for the %pUb modifier.
206                  */
207                 for (i = 0; i < 16; i++)
208                         val[i] = uuid->uuid[15 - i];
209
210                 seq_printf(f, "%pUb\n", val);
211         }
212         hci_dev_unlock(hdev);
213
214         return 0;
215 }
216
217 static int uuids_open(struct inode *inode, struct file *file)
218 {
219         return single_open(file, uuids_show, inode->i_private);
220 }
221
222 static const struct file_operations uuids_fops = {
223         .open           = uuids_open,
224         .read           = seq_read,
225         .llseek         = seq_lseek,
226         .release        = single_release,
227 };
228
229 static int inquiry_cache_show(struct seq_file *f, void *p)
230 {
231         struct hci_dev *hdev = f->private;
232         struct discovery_state *cache = &hdev->discovery;
233         struct inquiry_entry *e;
234
235         hci_dev_lock(hdev);
236
237         list_for_each_entry(e, &cache->all, all) {
238                 struct inquiry_data *data = &e->data;
239                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
240                            &data->bdaddr,
241                            data->pscan_rep_mode, data->pscan_period_mode,
242                            data->pscan_mode, data->dev_class[2],
243                            data->dev_class[1], data->dev_class[0],
244                            __le16_to_cpu(data->clock_offset),
245                            data->rssi, data->ssp_mode, e->timestamp);
246         }
247
248         hci_dev_unlock(hdev);
249
250         return 0;
251 }
252
253 static int inquiry_cache_open(struct inode *inode, struct file *file)
254 {
255         return single_open(file, inquiry_cache_show, inode->i_private);
256 }
257
258 static const struct file_operations inquiry_cache_fops = {
259         .open           = inquiry_cache_open,
260         .read           = seq_read,
261         .llseek         = seq_lseek,
262         .release        = single_release,
263 };
264
265 static int link_keys_show(struct seq_file *f, void *ptr)
266 {
267         struct hci_dev *hdev = f->private;
268         struct list_head *p, *n;
269
270         hci_dev_lock(hdev);
271         list_for_each_safe(p, n, &hdev->link_keys) {
272                 struct link_key *key = list_entry(p, struct link_key, list);
273                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
274                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
275         }
276         hci_dev_unlock(hdev);
277
278         return 0;
279 }
280
281 static int link_keys_open(struct inode *inode, struct file *file)
282 {
283         return single_open(file, link_keys_show, inode->i_private);
284 }
285
286 static const struct file_operations link_keys_fops = {
287         .open           = link_keys_open,
288         .read           = seq_read,
289         .llseek         = seq_lseek,
290         .release        = single_release,
291 };
292
293 static int dev_class_show(struct seq_file *f, void *ptr)
294 {
295         struct hci_dev *hdev = f->private;
296
297         hci_dev_lock(hdev);
298         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
299                    hdev->dev_class[1], hdev->dev_class[0]);
300         hci_dev_unlock(hdev);
301
302         return 0;
303 }
304
305 static int dev_class_open(struct inode *inode, struct file *file)
306 {
307         return single_open(file, dev_class_show, inode->i_private);
308 }
309
310 static const struct file_operations dev_class_fops = {
311         .open           = dev_class_open,
312         .read           = seq_read,
313         .llseek         = seq_lseek,
314         .release        = single_release,
315 };
316
317 static int voice_setting_get(void *data, u64 *val)
318 {
319         struct hci_dev *hdev = data;
320
321         hci_dev_lock(hdev);
322         *val = hdev->voice_setting;
323         hci_dev_unlock(hdev);
324
325         return 0;
326 }
327
328 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
329                         NULL, "0x%4.4llx\n");
330
331 static int auto_accept_delay_set(void *data, u64 val)
332 {
333         struct hci_dev *hdev = data;
334
335         hci_dev_lock(hdev);
336         hdev->auto_accept_delay = val;
337         hci_dev_unlock(hdev);
338
339         return 0;
340 }
341
342 static int auto_accept_delay_get(void *data, u64 *val)
343 {
344         struct hci_dev *hdev = data;
345
346         hci_dev_lock(hdev);
347         *val = hdev->auto_accept_delay;
348         hci_dev_unlock(hdev);
349
350         return 0;
351 }
352
353 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
354                         auto_accept_delay_set, "%llu\n");
355
356 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
357                                      size_t count, loff_t *ppos)
358 {
359         struct hci_dev *hdev = file->private_data;
360         char buf[3];
361
362         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
363         buf[1] = '\n';
364         buf[2] = '\0';
365         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
366 }
367
368 static ssize_t force_sc_support_write(struct file *file,
369                                       const char __user *user_buf,
370                                       size_t count, loff_t *ppos)
371 {
372         struct hci_dev *hdev = file->private_data;
373         char buf[32];
374         size_t buf_size = min(count, (sizeof(buf)-1));
375         bool enable;
376
377         if (test_bit(HCI_UP, &hdev->flags))
378                 return -EBUSY;
379
380         if (copy_from_user(buf, user_buf, buf_size))
381                 return -EFAULT;
382
383         buf[buf_size] = '\0';
384         if (strtobool(buf, &enable))
385                 return -EINVAL;
386
387         if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
388                 return -EALREADY;
389
390         change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
391
392         return count;
393 }
394
395 static const struct file_operations force_sc_support_fops = {
396         .open           = simple_open,
397         .read           = force_sc_support_read,
398         .write          = force_sc_support_write,
399         .llseek         = default_llseek,
400 };
401
402 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
403                                  size_t count, loff_t *ppos)
404 {
405         struct hci_dev *hdev = file->private_data;
406         char buf[3];
407
408         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
409         buf[1] = '\n';
410         buf[2] = '\0';
411         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
412 }
413
414 static const struct file_operations sc_only_mode_fops = {
415         .open           = simple_open,
416         .read           = sc_only_mode_read,
417         .llseek         = default_llseek,
418 };
419
420 static int idle_timeout_set(void *data, u64 val)
421 {
422         struct hci_dev *hdev = data;
423
424         if (val != 0 && (val < 500 || val > 3600000))
425                 return -EINVAL;
426
427         hci_dev_lock(hdev);
428         hdev->idle_timeout = val;
429         hci_dev_unlock(hdev);
430
431         return 0;
432 }
433
434 static int idle_timeout_get(void *data, u64 *val)
435 {
436         struct hci_dev *hdev = data;
437
438         hci_dev_lock(hdev);
439         *val = hdev->idle_timeout;
440         hci_dev_unlock(hdev);
441
442         return 0;
443 }
444
445 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
446                         idle_timeout_set, "%llu\n");
447
448 static int rpa_timeout_set(void *data, u64 val)
449 {
450         struct hci_dev *hdev = data;
451
452         /* Require the RPA timeout to be at least 30 seconds and at most
453          * 24 hours.
454          */
455         if (val < 30 || val > (60 * 60 * 24))
456                 return -EINVAL;
457
458         hci_dev_lock(hdev);
459         hdev->rpa_timeout = val;
460         hci_dev_unlock(hdev);
461
462         return 0;
463 }
464
465 static int rpa_timeout_get(void *data, u64 *val)
466 {
467         struct hci_dev *hdev = data;
468
469         hci_dev_lock(hdev);
470         *val = hdev->rpa_timeout;
471         hci_dev_unlock(hdev);
472
473         return 0;
474 }
475
476 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
477                         rpa_timeout_set, "%llu\n");
478
479 static int sniff_min_interval_set(void *data, u64 val)
480 {
481         struct hci_dev *hdev = data;
482
483         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
484                 return -EINVAL;
485
486         hci_dev_lock(hdev);
487         hdev->sniff_min_interval = val;
488         hci_dev_unlock(hdev);
489
490         return 0;
491 }
492
493 static int sniff_min_interval_get(void *data, u64 *val)
494 {
495         struct hci_dev *hdev = data;
496
497         hci_dev_lock(hdev);
498         *val = hdev->sniff_min_interval;
499         hci_dev_unlock(hdev);
500
501         return 0;
502 }
503
504 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
505                         sniff_min_interval_set, "%llu\n");
506
507 static int sniff_max_interval_set(void *data, u64 val)
508 {
509         struct hci_dev *hdev = data;
510
511         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
512                 return -EINVAL;
513
514         hci_dev_lock(hdev);
515         hdev->sniff_max_interval = val;
516         hci_dev_unlock(hdev);
517
518         return 0;
519 }
520
521 static int sniff_max_interval_get(void *data, u64 *val)
522 {
523         struct hci_dev *hdev = data;
524
525         hci_dev_lock(hdev);
526         *val = hdev->sniff_max_interval;
527         hci_dev_unlock(hdev);
528
529         return 0;
530 }
531
532 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
533                         sniff_max_interval_set, "%llu\n");
534
535 static int conn_info_min_age_set(void *data, u64 val)
536 {
537         struct hci_dev *hdev = data;
538
539         if (val == 0 || val > hdev->conn_info_max_age)
540                 return -EINVAL;
541
542         hci_dev_lock(hdev);
543         hdev->conn_info_min_age = val;
544         hci_dev_unlock(hdev);
545
546         return 0;
547 }
548
549 static int conn_info_min_age_get(void *data, u64 *val)
550 {
551         struct hci_dev *hdev = data;
552
553         hci_dev_lock(hdev);
554         *val = hdev->conn_info_min_age;
555         hci_dev_unlock(hdev);
556
557         return 0;
558 }
559
560 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
561                         conn_info_min_age_set, "%llu\n");
562
563 static int conn_info_max_age_set(void *data, u64 val)
564 {
565         struct hci_dev *hdev = data;
566
567         if (val == 0 || val < hdev->conn_info_min_age)
568                 return -EINVAL;
569
570         hci_dev_lock(hdev);
571         hdev->conn_info_max_age = val;
572         hci_dev_unlock(hdev);
573
574         return 0;
575 }
576
577 static int conn_info_max_age_get(void *data, u64 *val)
578 {
579         struct hci_dev *hdev = data;
580
581         hci_dev_lock(hdev);
582         *val = hdev->conn_info_max_age;
583         hci_dev_unlock(hdev);
584
585         return 0;
586 }
587
588 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
589                         conn_info_max_age_set, "%llu\n");
590
591 static int identity_show(struct seq_file *f, void *p)
592 {
593         struct hci_dev *hdev = f->private;
594         bdaddr_t addr;
595         u8 addr_type;
596
597         hci_dev_lock(hdev);
598
599         hci_copy_identity_address(hdev, &addr, &addr_type);
600
601         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
602                    16, hdev->irk, &hdev->rpa);
603
604         hci_dev_unlock(hdev);
605
606         return 0;
607 }
608
609 static int identity_open(struct inode *inode, struct file *file)
610 {
611         return single_open(file, identity_show, inode->i_private);
612 }
613
614 static const struct file_operations identity_fops = {
615         .open           = identity_open,
616         .read           = seq_read,
617         .llseek         = seq_lseek,
618         .release        = single_release,
619 };
620
621 static int random_address_show(struct seq_file *f, void *p)
622 {
623         struct hci_dev *hdev = f->private;
624
625         hci_dev_lock(hdev);
626         seq_printf(f, "%pMR\n", &hdev->random_addr);
627         hci_dev_unlock(hdev);
628
629         return 0;
630 }
631
632 static int random_address_open(struct inode *inode, struct file *file)
633 {
634         return single_open(file, random_address_show, inode->i_private);
635 }
636
637 static const struct file_operations random_address_fops = {
638         .open           = random_address_open,
639         .read           = seq_read,
640         .llseek         = seq_lseek,
641         .release        = single_release,
642 };
643
644 static int static_address_show(struct seq_file *f, void *p)
645 {
646         struct hci_dev *hdev = f->private;
647
648         hci_dev_lock(hdev);
649         seq_printf(f, "%pMR\n", &hdev->static_addr);
650         hci_dev_unlock(hdev);
651
652         return 0;
653 }
654
655 static int static_address_open(struct inode *inode, struct file *file)
656 {
657         return single_open(file, static_address_show, inode->i_private);
658 }
659
660 static const struct file_operations static_address_fops = {
661         .open           = static_address_open,
662         .read           = seq_read,
663         .llseek         = seq_lseek,
664         .release        = single_release,
665 };
666
667 static ssize_t force_static_address_read(struct file *file,
668                                          char __user *user_buf,
669                                          size_t count, loff_t *ppos)
670 {
671         struct hci_dev *hdev = file->private_data;
672         char buf[3];
673
674         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
675         buf[1] = '\n';
676         buf[2] = '\0';
677         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
678 }
679
680 static ssize_t force_static_address_write(struct file *file,
681                                           const char __user *user_buf,
682                                           size_t count, loff_t *ppos)
683 {
684         struct hci_dev *hdev = file->private_data;
685         char buf[32];
686         size_t buf_size = min(count, (sizeof(buf)-1));
687         bool enable;
688
689         if (test_bit(HCI_UP, &hdev->flags))
690                 return -EBUSY;
691
692         if (copy_from_user(buf, user_buf, buf_size))
693                 return -EFAULT;
694
695         buf[buf_size] = '\0';
696         if (strtobool(buf, &enable))
697                 return -EINVAL;
698
699         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
700                 return -EALREADY;
701
702         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
703
704         return count;
705 }
706
707 static const struct file_operations force_static_address_fops = {
708         .open           = simple_open,
709         .read           = force_static_address_read,
710         .write          = force_static_address_write,
711         .llseek         = default_llseek,
712 };
713
714 static int white_list_show(struct seq_file *f, void *ptr)
715 {
716         struct hci_dev *hdev = f->private;
717         struct bdaddr_list *b;
718
719         hci_dev_lock(hdev);
720         list_for_each_entry(b, &hdev->le_white_list, list)
721                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
722         hci_dev_unlock(hdev);
723
724         return 0;
725 }
726
727 static int white_list_open(struct inode *inode, struct file *file)
728 {
729         return single_open(file, white_list_show, inode->i_private);
730 }
731
732 static const struct file_operations white_list_fops = {
733         .open           = white_list_open,
734         .read           = seq_read,
735         .llseek         = seq_lseek,
736         .release        = single_release,
737 };
738
739 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
740 {
741         struct hci_dev *hdev = f->private;
742         struct list_head *p, *n;
743
744         hci_dev_lock(hdev);
745         list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
746                 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
747                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
748                            &irk->bdaddr, irk->addr_type,
749                            16, irk->val, &irk->rpa);
750         }
751         hci_dev_unlock(hdev);
752
753         return 0;
754 }
755
756 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
757 {
758         return single_open(file, identity_resolving_keys_show,
759                            inode->i_private);
760 }
761
762 static const struct file_operations identity_resolving_keys_fops = {
763         .open           = identity_resolving_keys_open,
764         .read           = seq_read,
765         .llseek         = seq_lseek,
766         .release        = single_release,
767 };
768
769 static int long_term_keys_show(struct seq_file *f, void *ptr)
770 {
771         struct hci_dev *hdev = f->private;
772         struct list_head *p, *n;
773
774         hci_dev_lock(hdev);
775         list_for_each_safe(p, n, &hdev->long_term_keys) {
776                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
777                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
778                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
779                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
780                            __le64_to_cpu(ltk->rand), 16, ltk->val);
781         }
782         hci_dev_unlock(hdev);
783
784         return 0;
785 }
786
787 static int long_term_keys_open(struct inode *inode, struct file *file)
788 {
789         return single_open(file, long_term_keys_show, inode->i_private);
790 }
791
792 static const struct file_operations long_term_keys_fops = {
793         .open           = long_term_keys_open,
794         .read           = seq_read,
795         .llseek         = seq_lseek,
796         .release        = single_release,
797 };
798
799 static int conn_min_interval_set(void *data, u64 val)
800 {
801         struct hci_dev *hdev = data;
802
803         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
804                 return -EINVAL;
805
806         hci_dev_lock(hdev);
807         hdev->le_conn_min_interval = val;
808         hci_dev_unlock(hdev);
809
810         return 0;
811 }
812
813 static int conn_min_interval_get(void *data, u64 *val)
814 {
815         struct hci_dev *hdev = data;
816
817         hci_dev_lock(hdev);
818         *val = hdev->le_conn_min_interval;
819         hci_dev_unlock(hdev);
820
821         return 0;
822 }
823
824 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
825                         conn_min_interval_set, "%llu\n");
826
827 static int conn_max_interval_set(void *data, u64 val)
828 {
829         struct hci_dev *hdev = data;
830
831         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
832                 return -EINVAL;
833
834         hci_dev_lock(hdev);
835         hdev->le_conn_max_interval = val;
836         hci_dev_unlock(hdev);
837
838         return 0;
839 }
840
841 static int conn_max_interval_get(void *data, u64 *val)
842 {
843         struct hci_dev *hdev = data;
844
845         hci_dev_lock(hdev);
846         *val = hdev->le_conn_max_interval;
847         hci_dev_unlock(hdev);
848
849         return 0;
850 }
851
852 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
853                         conn_max_interval_set, "%llu\n");
854
855 static int conn_latency_set(void *data, u64 val)
856 {
857         struct hci_dev *hdev = data;
858
859         if (val > 0x01f3)
860                 return -EINVAL;
861
862         hci_dev_lock(hdev);
863         hdev->le_conn_latency = val;
864         hci_dev_unlock(hdev);
865
866         return 0;
867 }
868
869 static int conn_latency_get(void *data, u64 *val)
870 {
871         struct hci_dev *hdev = data;
872
873         hci_dev_lock(hdev);
874         *val = hdev->le_conn_latency;
875         hci_dev_unlock(hdev);
876
877         return 0;
878 }
879
880 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
881                         conn_latency_set, "%llu\n");
882
883 static int supervision_timeout_set(void *data, u64 val)
884 {
885         struct hci_dev *hdev = data;
886
887         if (val < 0x000a || val > 0x0c80)
888                 return -EINVAL;
889
890         hci_dev_lock(hdev);
891         hdev->le_supv_timeout = val;
892         hci_dev_unlock(hdev);
893
894         return 0;
895 }
896
897 static int supervision_timeout_get(void *data, u64 *val)
898 {
899         struct hci_dev *hdev = data;
900
901         hci_dev_lock(hdev);
902         *val = hdev->le_supv_timeout;
903         hci_dev_unlock(hdev);
904
905         return 0;
906 }
907
908 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
909                         supervision_timeout_set, "%llu\n");
910
911 static int adv_channel_map_set(void *data, u64 val)
912 {
913         struct hci_dev *hdev = data;
914
915         if (val < 0x01 || val > 0x07)
916                 return -EINVAL;
917
918         hci_dev_lock(hdev);
919         hdev->le_adv_channel_map = val;
920         hci_dev_unlock(hdev);
921
922         return 0;
923 }
924
925 static int adv_channel_map_get(void *data, u64 *val)
926 {
927         struct hci_dev *hdev = data;
928
929         hci_dev_lock(hdev);
930         *val = hdev->le_adv_channel_map;
931         hci_dev_unlock(hdev);
932
933         return 0;
934 }
935
936 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
937                         adv_channel_map_set, "%llu\n");
938
939 static int device_list_show(struct seq_file *f, void *ptr)
940 {
941         struct hci_dev *hdev = f->private;
942         struct hci_conn_params *p;
943
944         hci_dev_lock(hdev);
945         list_for_each_entry(p, &hdev->le_conn_params, list) {
946                 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
947                            p->auto_connect);
948         }
949         hci_dev_unlock(hdev);
950
951         return 0;
952 }
953
954 static int device_list_open(struct inode *inode, struct file *file)
955 {
956         return single_open(file, device_list_show, inode->i_private);
957 }
958
959 static const struct file_operations device_list_fops = {
960         .open           = device_list_open,
961         .read           = seq_read,
962         .llseek         = seq_lseek,
963         .release        = single_release,
964 };
965
966 /* ---- HCI requests ---- */
967
968 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
969 {
970         BT_DBG("%s result 0x%2.2x", hdev->name, result);
971
972         if (hdev->req_status == HCI_REQ_PEND) {
973                 hdev->req_result = result;
974                 hdev->req_status = HCI_REQ_DONE;
975                 wake_up_interruptible(&hdev->req_wait_q);
976         }
977 }
978
979 static void hci_req_cancel(struct hci_dev *hdev, int err)
980 {
981         BT_DBG("%s err 0x%2.2x", hdev->name, err);
982
983         if (hdev->req_status == HCI_REQ_PEND) {
984                 hdev->req_result = err;
985                 hdev->req_status = HCI_REQ_CANCELED;
986                 wake_up_interruptible(&hdev->req_wait_q);
987         }
988 }
989
990 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
991                                             u8 event)
992 {
993         struct hci_ev_cmd_complete *ev;
994         struct hci_event_hdr *hdr;
995         struct sk_buff *skb;
996
997         hci_dev_lock(hdev);
998
999         skb = hdev->recv_evt;
1000         hdev->recv_evt = NULL;
1001
1002         hci_dev_unlock(hdev);
1003
1004         if (!skb)
1005                 return ERR_PTR(-ENODATA);
1006
1007         if (skb->len < sizeof(*hdr)) {
1008                 BT_ERR("Too short HCI event");
1009                 goto failed;
1010         }
1011
1012         hdr = (void *) skb->data;
1013         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1014
1015         if (event) {
1016                 if (hdr->evt != event)
1017                         goto failed;
1018                 return skb;
1019         }
1020
1021         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1022                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1023                 goto failed;
1024         }
1025
1026         if (skb->len < sizeof(*ev)) {
1027                 BT_ERR("Too short cmd_complete event");
1028                 goto failed;
1029         }
1030
1031         ev = (void *) skb->data;
1032         skb_pull(skb, sizeof(*ev));
1033
1034         if (opcode == __le16_to_cpu(ev->opcode))
1035                 return skb;
1036
1037         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1038                __le16_to_cpu(ev->opcode));
1039
1040 failed:
1041         kfree_skb(skb);
1042         return ERR_PTR(-ENODATA);
1043 }
1044
1045 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1046                                   const void *param, u8 event, u32 timeout)
1047 {
1048         DECLARE_WAITQUEUE(wait, current);
1049         struct hci_request req;
1050         int err = 0;
1051
1052         BT_DBG("%s", hdev->name);
1053
1054         hci_req_init(&req, hdev);
1055
1056         hci_req_add_ev(&req, opcode, plen, param, event);
1057
1058         hdev->req_status = HCI_REQ_PEND;
1059
1060         err = hci_req_run(&req, hci_req_sync_complete);
1061         if (err < 0)
1062                 return ERR_PTR(err);
1063
1064         add_wait_queue(&hdev->req_wait_q, &wait);
1065         set_current_state(TASK_INTERRUPTIBLE);
1066
1067         schedule_timeout(timeout);
1068
1069         remove_wait_queue(&hdev->req_wait_q, &wait);
1070
1071         if (signal_pending(current))
1072                 return ERR_PTR(-EINTR);
1073
1074         switch (hdev->req_status) {
1075         case HCI_REQ_DONE:
1076                 err = -bt_to_errno(hdev->req_result);
1077                 break;
1078
1079         case HCI_REQ_CANCELED:
1080                 err = -hdev->req_result;
1081                 break;
1082
1083         default:
1084                 err = -ETIMEDOUT;
1085                 break;
1086         }
1087
1088         hdev->req_status = hdev->req_result = 0;
1089
1090         BT_DBG("%s end: err %d", hdev->name, err);
1091
1092         if (err < 0)
1093                 return ERR_PTR(err);
1094
1095         return hci_get_cmd_complete(hdev, opcode, event);
1096 }
1097 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1098
1099 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1100                                const void *param, u32 timeout)
1101 {
1102         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1103 }
1104 EXPORT_SYMBOL(__hci_cmd_sync);
1105
1106 /* Execute request and wait for completion. */
1107 static int __hci_req_sync(struct hci_dev *hdev,
1108                           void (*func)(struct hci_request *req,
1109                                       unsigned long opt),
1110                           unsigned long opt, __u32 timeout)
1111 {
1112         struct hci_request req;
1113         DECLARE_WAITQUEUE(wait, current);
1114         int err = 0;
1115
1116         BT_DBG("%s start", hdev->name);
1117
1118         hci_req_init(&req, hdev);
1119
1120         hdev->req_status = HCI_REQ_PEND;
1121
1122         func(&req, opt);
1123
1124         err = hci_req_run(&req, hci_req_sync_complete);
1125         if (err < 0) {
1126                 hdev->req_status = 0;
1127
1128                 /* ENODATA means the HCI request command queue is empty.
1129                  * This can happen when a request with conditionals doesn't
1130                  * trigger any commands to be sent. This is normal behavior
1131                  * and should not trigger an error return.
1132                  */
1133                 if (err == -ENODATA)
1134                         return 0;
1135
1136                 return err;
1137         }
1138
1139         add_wait_queue(&hdev->req_wait_q, &wait);
1140         set_current_state(TASK_INTERRUPTIBLE);
1141
1142         schedule_timeout(timeout);
1143
1144         remove_wait_queue(&hdev->req_wait_q, &wait);
1145
1146         if (signal_pending(current))
1147                 return -EINTR;
1148
1149         switch (hdev->req_status) {
1150         case HCI_REQ_DONE:
1151                 err = -bt_to_errno(hdev->req_result);
1152                 break;
1153
1154         case HCI_REQ_CANCELED:
1155                 err = -hdev->req_result;
1156                 break;
1157
1158         default:
1159                 err = -ETIMEDOUT;
1160                 break;
1161         }
1162
1163         hdev->req_status = hdev->req_result = 0;
1164
1165         BT_DBG("%s end: err %d", hdev->name, err);
1166
1167         return err;
1168 }
1169
1170 static int hci_req_sync(struct hci_dev *hdev,
1171                         void (*req)(struct hci_request *req,
1172                                     unsigned long opt),
1173                         unsigned long opt, __u32 timeout)
1174 {
1175         int ret;
1176
1177         if (!test_bit(HCI_UP, &hdev->flags))
1178                 return -ENETDOWN;
1179
1180         /* Serialize all requests */
1181         hci_req_lock(hdev);
1182         ret = __hci_req_sync(hdev, req, opt, timeout);
1183         hci_req_unlock(hdev);
1184
1185         return ret;
1186 }
1187
1188 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1189 {
1190         BT_DBG("%s %ld", req->hdev->name, opt);
1191
1192         /* Reset device */
1193         set_bit(HCI_RESET, &req->hdev->flags);
1194         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1195 }
1196
1197 static void bredr_init(struct hci_request *req)
1198 {
1199         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1200
1201         /* Read Local Supported Features */
1202         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1203
1204         /* Read Local Version */
1205         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1206
1207         /* Read BD Address */
1208         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1209 }
1210
1211 static void amp_init(struct hci_request *req)
1212 {
1213         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1214
1215         /* Read Local Version */
1216         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1217
1218         /* Read Local Supported Commands */
1219         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1220
1221         /* Read Local Supported Features */
1222         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1223
1224         /* Read Local AMP Info */
1225         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1226
1227         /* Read Data Blk size */
1228         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1229
1230         /* Read Flow Control Mode */
1231         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1232
1233         /* Read Location Data */
1234         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1235 }
1236
1237 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1238 {
1239         struct hci_dev *hdev = req->hdev;
1240
1241         BT_DBG("%s %ld", hdev->name, opt);
1242
1243         /* Reset */
1244         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1245                 hci_reset_req(req, 0);
1246
1247         switch (hdev->dev_type) {
1248         case HCI_BREDR:
1249                 bredr_init(req);
1250                 break;
1251
1252         case HCI_AMP:
1253                 amp_init(req);
1254                 break;
1255
1256         default:
1257                 BT_ERR("Unknown device type %d", hdev->dev_type);
1258                 break;
1259         }
1260 }
1261
1262 static void bredr_setup(struct hci_request *req)
1263 {
1264         struct hci_dev *hdev = req->hdev;
1265
1266         __le16 param;
1267         __u8 flt_type;
1268
1269         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1270         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1271
1272         /* Read Class of Device */
1273         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1274
1275         /* Read Local Name */
1276         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1277
1278         /* Read Voice Setting */
1279         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1280
1281         /* Read Number of Supported IAC */
1282         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1283
1284         /* Read Current IAC LAP */
1285         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1286
1287         /* Clear Event Filters */
1288         flt_type = HCI_FLT_CLEAR_ALL;
1289         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1290
1291         /* Connection accept timeout ~20 secs */
1292         param = cpu_to_le16(0x7d00);
1293         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1294
1295         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1296          * but it does not support page scan related HCI commands.
1297          */
1298         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1299                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1300                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1301         }
1302 }
1303
1304 static void le_setup(struct hci_request *req)
1305 {
1306         struct hci_dev *hdev = req->hdev;
1307
1308         /* Read LE Buffer Size */
1309         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1310
1311         /* Read LE Local Supported Features */
1312         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1313
1314         /* Read LE Supported States */
1315         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1316
1317         /* Read LE Advertising Channel TX Power */
1318         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1319
1320         /* Read LE White List Size */
1321         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1322
1323         /* Clear LE White List */
1324         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1325
1326         /* LE-only controllers have LE implicitly enabled */
1327         if (!lmp_bredr_capable(hdev))
1328                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1329 }
1330
1331 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1332 {
1333         if (lmp_ext_inq_capable(hdev))
1334                 return 0x02;
1335
1336         if (lmp_inq_rssi_capable(hdev))
1337                 return 0x01;
1338
1339         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1340             hdev->lmp_subver == 0x0757)
1341                 return 0x01;
1342
1343         if (hdev->manufacturer == 15) {
1344                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1345                         return 0x01;
1346                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1347                         return 0x01;
1348                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1349                         return 0x01;
1350         }
1351
1352         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1353             hdev->lmp_subver == 0x1805)
1354                 return 0x01;
1355
1356         return 0x00;
1357 }
1358
1359 static void hci_setup_inquiry_mode(struct hci_request *req)
1360 {
1361         u8 mode;
1362
1363         mode = hci_get_inquiry_mode(req->hdev);
1364
1365         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1366 }
1367
1368 static void hci_setup_event_mask(struct hci_request *req)
1369 {
1370         struct hci_dev *hdev = req->hdev;
1371
1372         /* The second byte is 0xff instead of 0x9f (two reserved bits
1373          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1374          * command otherwise.
1375          */
1376         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1377
1378         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1379          * any event mask for pre 1.2 devices.
1380          */
1381         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1382                 return;
1383
1384         if (lmp_bredr_capable(hdev)) {
1385                 events[4] |= 0x01; /* Flow Specification Complete */
1386                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1387                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1388                 events[5] |= 0x08; /* Synchronous Connection Complete */
1389                 events[5] |= 0x10; /* Synchronous Connection Changed */
1390         } else {
1391                 /* Use a different default for LE-only devices */
1392                 memset(events, 0, sizeof(events));
1393                 events[0] |= 0x10; /* Disconnection Complete */
1394                 events[0] |= 0x80; /* Encryption Change */
1395                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1396                 events[1] |= 0x20; /* Command Complete */
1397                 events[1] |= 0x40; /* Command Status */
1398                 events[1] |= 0x80; /* Hardware Error */
1399                 events[2] |= 0x04; /* Number of Completed Packets */
1400                 events[3] |= 0x02; /* Data Buffer Overflow */
1401                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1402         }
1403
1404         if (lmp_inq_rssi_capable(hdev))
1405                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1406
1407         if (lmp_sniffsubr_capable(hdev))
1408                 events[5] |= 0x20; /* Sniff Subrating */
1409
1410         if (lmp_pause_enc_capable(hdev))
1411                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1412
1413         if (lmp_ext_inq_capable(hdev))
1414                 events[5] |= 0x40; /* Extended Inquiry Result */
1415
1416         if (lmp_no_flush_capable(hdev))
1417                 events[7] |= 0x01; /* Enhanced Flush Complete */
1418
1419         if (lmp_lsto_capable(hdev))
1420                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1421
1422         if (lmp_ssp_capable(hdev)) {
1423                 events[6] |= 0x01;      /* IO Capability Request */
1424                 events[6] |= 0x02;      /* IO Capability Response */
1425                 events[6] |= 0x04;      /* User Confirmation Request */
1426                 events[6] |= 0x08;      /* User Passkey Request */
1427                 events[6] |= 0x10;      /* Remote OOB Data Request */
1428                 events[6] |= 0x20;      /* Simple Pairing Complete */
1429                 events[7] |= 0x04;      /* User Passkey Notification */
1430                 events[7] |= 0x08;      /* Keypress Notification */
1431                 events[7] |= 0x10;      /* Remote Host Supported
1432                                          * Features Notification
1433                                          */
1434         }
1435
1436         if (lmp_le_capable(hdev))
1437                 events[7] |= 0x20;      /* LE Meta-Event */
1438
1439         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1440 }
1441
1442 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1443 {
1444         struct hci_dev *hdev = req->hdev;
1445
1446         if (lmp_bredr_capable(hdev))
1447                 bredr_setup(req);
1448         else
1449                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1450
1451         if (lmp_le_capable(hdev))
1452                 le_setup(req);
1453
1454         hci_setup_event_mask(req);
1455
1456         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1457          * local supported commands HCI command.
1458          */
1459         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1460                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1461
1462         if (lmp_ssp_capable(hdev)) {
1463                 /* When SSP is available, then the host features page
1464                  * should also be available as well. However some
1465                  * controllers list the max_page as 0 as long as SSP
1466                  * has not been enabled. To achieve proper debugging
1467                  * output, force the minimum max_page to 1 at least.
1468                  */
1469                 hdev->max_page = 0x01;
1470
1471                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1472                         u8 mode = 0x01;
1473                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1474                                     sizeof(mode), &mode);
1475                 } else {
1476                         struct hci_cp_write_eir cp;
1477
1478                         memset(hdev->eir, 0, sizeof(hdev->eir));
1479                         memset(&cp, 0, sizeof(cp));
1480
1481                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1482                 }
1483         }
1484
1485         if (lmp_inq_rssi_capable(hdev))
1486                 hci_setup_inquiry_mode(req);
1487
1488         if (lmp_inq_tx_pwr_capable(hdev))
1489                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1490
1491         if (lmp_ext_feat_capable(hdev)) {
1492                 struct hci_cp_read_local_ext_features cp;
1493
1494                 cp.page = 0x01;
1495                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1496                             sizeof(cp), &cp);
1497         }
1498
1499         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1500                 u8 enable = 1;
1501                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1502                             &enable);
1503         }
1504 }
1505
1506 static void hci_setup_link_policy(struct hci_request *req)
1507 {
1508         struct hci_dev *hdev = req->hdev;
1509         struct hci_cp_write_def_link_policy cp;
1510         u16 link_policy = 0;
1511
1512         if (lmp_rswitch_capable(hdev))
1513                 link_policy |= HCI_LP_RSWITCH;
1514         if (lmp_hold_capable(hdev))
1515                 link_policy |= HCI_LP_HOLD;
1516         if (lmp_sniff_capable(hdev))
1517                 link_policy |= HCI_LP_SNIFF;
1518         if (lmp_park_capable(hdev))
1519                 link_policy |= HCI_LP_PARK;
1520
1521         cp.policy = cpu_to_le16(link_policy);
1522         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1523 }
1524
1525 static void hci_set_le_support(struct hci_request *req)
1526 {
1527         struct hci_dev *hdev = req->hdev;
1528         struct hci_cp_write_le_host_supported cp;
1529
1530         /* LE-only devices do not support explicit enablement */
1531         if (!lmp_bredr_capable(hdev))
1532                 return;
1533
1534         memset(&cp, 0, sizeof(cp));
1535
1536         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1537                 cp.le = 0x01;
1538                 cp.simul = lmp_le_br_capable(hdev);
1539         }
1540
1541         if (cp.le != lmp_host_le_capable(hdev))
1542                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1543                             &cp);
1544 }
1545
1546 static void hci_set_event_mask_page_2(struct hci_request *req)
1547 {
1548         struct hci_dev *hdev = req->hdev;
1549         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1550
1551         /* If Connectionless Slave Broadcast master role is supported
1552          * enable all necessary events for it.
1553          */
1554         if (lmp_csb_master_capable(hdev)) {
1555                 events[1] |= 0x40;      /* Triggered Clock Capture */
1556                 events[1] |= 0x80;      /* Synchronization Train Complete */
1557                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1558                 events[2] |= 0x20;      /* CSB Channel Map Change */
1559         }
1560
1561         /* If Connectionless Slave Broadcast slave role is supported
1562          * enable all necessary events for it.
1563          */
1564         if (lmp_csb_slave_capable(hdev)) {
1565                 events[2] |= 0x01;      /* Synchronization Train Received */
1566                 events[2] |= 0x02;      /* CSB Receive */
1567                 events[2] |= 0x04;      /* CSB Timeout */
1568                 events[2] |= 0x08;      /* Truncated Page Complete */
1569         }
1570
1571         /* Enable Authenticated Payload Timeout Expired event if supported */
1572         if (lmp_ping_capable(hdev))
1573                 events[2] |= 0x80;
1574
1575         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1576 }
1577
1578 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1579 {
1580         struct hci_dev *hdev = req->hdev;
1581         u8 p;
1582
1583         /* Some Broadcom based Bluetooth controllers do not support the
1584          * Delete Stored Link Key command. They are clearly indicating its
1585          * absence in the bit mask of supported commands.
1586          *
1587          * Check the supported commands and only if the the command is marked
1588          * as supported send it. If not supported assume that the controller
1589          * does not have actual support for stored link keys which makes this
1590          * command redundant anyway.
1591          *
1592          * Some controllers indicate that they support handling deleting
1593          * stored link keys, but they don't. The quirk lets a driver
1594          * just disable this command.
1595          */
1596         if (hdev->commands[6] & 0x80 &&
1597             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1598                 struct hci_cp_delete_stored_link_key cp;
1599
1600                 bacpy(&cp.bdaddr, BDADDR_ANY);
1601                 cp.delete_all = 0x01;
1602                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1603                             sizeof(cp), &cp);
1604         }
1605
1606         if (hdev->commands[5] & 0x10)
1607                 hci_setup_link_policy(req);
1608
1609         if (lmp_le_capable(hdev)) {
1610                 u8 events[8];
1611
1612                 memset(events, 0, sizeof(events));
1613                 events[0] = 0x1f;
1614
1615                 /* If controller supports the Connection Parameters Request
1616                  * Link Layer Procedure, enable the corresponding event.
1617                  */
1618                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1619                         events[0] |= 0x20;      /* LE Remote Connection
1620                                                  * Parameter Request
1621                                                  */
1622
1623                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1624                             events);
1625
1626                 hci_set_le_support(req);
1627         }
1628
1629         /* Read features beyond page 1 if available */
1630         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1631                 struct hci_cp_read_local_ext_features cp;
1632
1633                 cp.page = p;
1634                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1635                             sizeof(cp), &cp);
1636         }
1637 }
1638
1639 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1640 {
1641         struct hci_dev *hdev = req->hdev;
1642
1643         /* Set event mask page 2 if the HCI command for it is supported */
1644         if (hdev->commands[22] & 0x04)
1645                 hci_set_event_mask_page_2(req);
1646
1647         /* Check for Synchronization Train support */
1648         if (lmp_sync_train_capable(hdev))
1649                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1650
1651         /* Enable Secure Connections if supported and configured */
1652         if ((lmp_sc_capable(hdev) ||
1653              test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1654             test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1655                 u8 support = 0x01;
1656                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1657                             sizeof(support), &support);
1658         }
1659 }
1660
1661 static int __hci_init(struct hci_dev *hdev)
1662 {
1663         int err;
1664
1665         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1666         if (err < 0)
1667                 return err;
1668
1669         /* The Device Under Test (DUT) mode is special and available for
1670          * all controller types. So just create it early on.
1671          */
1672         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1673                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1674                                     &dut_mode_fops);
1675         }
1676
1677         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1678          * BR/EDR/LE type controllers. AMP controllers only need the
1679          * first stage init.
1680          */
1681         if (hdev->dev_type != HCI_BREDR)
1682                 return 0;
1683
1684         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1685         if (err < 0)
1686                 return err;
1687
1688         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1689         if (err < 0)
1690                 return err;
1691
1692         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1693         if (err < 0)
1694                 return err;
1695
1696         /* Only create debugfs entries during the initial setup
1697          * phase and not every time the controller gets powered on.
1698          */
1699         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1700                 return 0;
1701
1702         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1703                             &features_fops);
1704         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1705                            &hdev->manufacturer);
1706         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1707         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1708         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1709                             &blacklist_fops);
1710         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1711
1712         debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1713                             &conn_info_min_age_fops);
1714         debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1715                             &conn_info_max_age_fops);
1716
1717         if (lmp_bredr_capable(hdev)) {
1718                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1719                                     hdev, &inquiry_cache_fops);
1720                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1721                                     hdev, &link_keys_fops);
1722                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1723                                     hdev, &dev_class_fops);
1724                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1725                                     hdev, &voice_setting_fops);
1726         }
1727
1728         if (lmp_ssp_capable(hdev)) {
1729                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1730                                     hdev, &auto_accept_delay_fops);
1731                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1732                                     hdev, &force_sc_support_fops);
1733                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1734                                     hdev, &sc_only_mode_fops);
1735         }
1736
1737         if (lmp_sniff_capable(hdev)) {
1738                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1739                                     hdev, &idle_timeout_fops);
1740                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1741                                     hdev, &sniff_min_interval_fops);
1742                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1743                                     hdev, &sniff_max_interval_fops);
1744         }
1745
1746         if (lmp_le_capable(hdev)) {
1747                 debugfs_create_file("identity", 0400, hdev->debugfs,
1748                                     hdev, &identity_fops);
1749                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1750                                     hdev, &rpa_timeout_fops);
1751                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1752                                     hdev, &random_address_fops);
1753                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1754                                     hdev, &static_address_fops);
1755
1756                 /* For controllers with a public address, provide a debug
1757                  * option to force the usage of the configured static
1758                  * address. By default the public address is used.
1759                  */
1760                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1761                         debugfs_create_file("force_static_address", 0644,
1762                                             hdev->debugfs, hdev,
1763                                             &force_static_address_fops);
1764
1765                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1766                                   &hdev->le_white_list_size);
1767                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1768                                     &white_list_fops);
1769                 debugfs_create_file("identity_resolving_keys", 0400,
1770                                     hdev->debugfs, hdev,
1771                                     &identity_resolving_keys_fops);
1772                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1773                                     hdev, &long_term_keys_fops);
1774                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1775                                     hdev, &conn_min_interval_fops);
1776                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1777                                     hdev, &conn_max_interval_fops);
1778                 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1779                                     hdev, &conn_latency_fops);
1780                 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1781                                     hdev, &supervision_timeout_fops);
1782                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1783                                     hdev, &adv_channel_map_fops);
1784                 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1785                                     &device_list_fops);
1786                 debugfs_create_u16("discov_interleaved_timeout", 0644,
1787                                    hdev->debugfs,
1788                                    &hdev->discov_interleaved_timeout);
1789         }
1790
1791         return 0;
1792 }
1793
1794 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1795 {
1796         struct hci_dev *hdev = req->hdev;
1797
1798         BT_DBG("%s %ld", hdev->name, opt);
1799
1800         /* Reset */
1801         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1802                 hci_reset_req(req, 0);
1803
1804         /* Read Local Version */
1805         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1806
1807         /* Read BD Address */
1808         if (hdev->set_bdaddr)
1809                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1810 }
1811
1812 static int __hci_unconf_init(struct hci_dev *hdev)
1813 {
1814         int err;
1815
1816         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1817                 return 0;
1818
1819         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1820         if (err < 0)
1821                 return err;
1822
1823         return 0;
1824 }
1825
1826 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1827 {
1828         __u8 scan = opt;
1829
1830         BT_DBG("%s %x", req->hdev->name, scan);
1831
1832         /* Inquiry and Page scans */
1833         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1834 }
1835
1836 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1837 {
1838         __u8 auth = opt;
1839
1840         BT_DBG("%s %x", req->hdev->name, auth);
1841
1842         /* Authentication */
1843         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1844 }
1845
1846 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1847 {
1848         __u8 encrypt = opt;
1849
1850         BT_DBG("%s %x", req->hdev->name, encrypt);
1851
1852         /* Encryption */
1853         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1854 }
1855
1856 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1857 {
1858         __le16 policy = cpu_to_le16(opt);
1859
1860         BT_DBG("%s %x", req->hdev->name, policy);
1861
1862         /* Default link policy */
1863         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1864 }
1865
1866 /* Get HCI device by index.
1867  * Device is held on return. */
1868 struct hci_dev *hci_dev_get(int index)
1869 {
1870         struct hci_dev *hdev = NULL, *d;
1871
1872         BT_DBG("%d", index);
1873
1874         if (index < 0)
1875                 return NULL;
1876
1877         read_lock(&hci_dev_list_lock);
1878         list_for_each_entry(d, &hci_dev_list, list) {
1879                 if (d->id == index) {
1880                         hdev = hci_dev_hold(d);
1881                         break;
1882                 }
1883         }
1884         read_unlock(&hci_dev_list_lock);
1885         return hdev;
1886 }
1887
1888 /* ---- Inquiry support ---- */
1889
1890 bool hci_discovery_active(struct hci_dev *hdev)
1891 {
1892         struct discovery_state *discov = &hdev->discovery;
1893
1894         switch (discov->state) {
1895         case DISCOVERY_FINDING:
1896         case DISCOVERY_RESOLVING:
1897                 return true;
1898
1899         default:
1900                 return false;
1901         }
1902 }
1903
1904 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1905 {
1906         int old_state = hdev->discovery.state;
1907
1908         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1909
1910         if (old_state == state)
1911                 return;
1912
1913         hdev->discovery.state = state;
1914
1915         switch (state) {
1916         case DISCOVERY_STOPPED:
1917                 hci_update_background_scan(hdev);
1918
1919                 if (old_state != DISCOVERY_STARTING)
1920                         mgmt_discovering(hdev, 0);
1921                 break;
1922         case DISCOVERY_STARTING:
1923                 break;
1924         case DISCOVERY_FINDING:
1925                 mgmt_discovering(hdev, 1);
1926                 break;
1927         case DISCOVERY_RESOLVING:
1928                 break;
1929         case DISCOVERY_STOPPING:
1930                 break;
1931         }
1932 }
1933
1934 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1935 {
1936         struct discovery_state *cache = &hdev->discovery;
1937         struct inquiry_entry *p, *n;
1938
1939         list_for_each_entry_safe(p, n, &cache->all, all) {
1940                 list_del(&p->all);
1941                 kfree(p);
1942         }
1943
1944         INIT_LIST_HEAD(&cache->unknown);
1945         INIT_LIST_HEAD(&cache->resolve);
1946 }
1947
1948 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1949                                                bdaddr_t *bdaddr)
1950 {
1951         struct discovery_state *cache = &hdev->discovery;
1952         struct inquiry_entry *e;
1953
1954         BT_DBG("cache %p, %pMR", cache, bdaddr);
1955
1956         list_for_each_entry(e, &cache->all, all) {
1957                 if (!bacmp(&e->data.bdaddr, bdaddr))
1958                         return e;
1959         }
1960
1961         return NULL;
1962 }
1963
1964 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1965                                                        bdaddr_t *bdaddr)
1966 {
1967         struct discovery_state *cache = &hdev->discovery;
1968         struct inquiry_entry *e;
1969
1970         BT_DBG("cache %p, %pMR", cache, bdaddr);
1971
1972         list_for_each_entry(e, &cache->unknown, list) {
1973                 if (!bacmp(&e->data.bdaddr, bdaddr))
1974                         return e;
1975         }
1976
1977         return NULL;
1978 }
1979
1980 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1981                                                        bdaddr_t *bdaddr,
1982                                                        int state)
1983 {
1984         struct discovery_state *cache = &hdev->discovery;
1985         struct inquiry_entry *e;
1986
1987         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1988
1989         list_for_each_entry(e, &cache->resolve, list) {
1990                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1991                         return e;
1992                 if (!bacmp(&e->data.bdaddr, bdaddr))
1993                         return e;
1994         }
1995
1996         return NULL;
1997 }
1998
1999 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2000                                       struct inquiry_entry *ie)
2001 {
2002         struct discovery_state *cache = &hdev->discovery;
2003         struct list_head *pos = &cache->resolve;
2004         struct inquiry_entry *p;
2005
2006         list_del(&ie->list);
2007
2008         list_for_each_entry(p, &cache->resolve, list) {
2009                 if (p->name_state != NAME_PENDING &&
2010                     abs(p->data.rssi) >= abs(ie->data.rssi))
2011                         break;
2012                 pos = &p->list;
2013         }
2014
2015         list_add(&ie->list, pos);
2016 }
2017
2018 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2019                              bool name_known)
2020 {
2021         struct discovery_state *cache = &hdev->discovery;
2022         struct inquiry_entry *ie;
2023         u32 flags = 0;
2024
2025         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2026
2027         hci_remove_remote_oob_data(hdev, &data->bdaddr);
2028
2029         if (!data->ssp_mode)
2030                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2031
2032         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2033         if (ie) {
2034                 if (!ie->data.ssp_mode)
2035                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2036
2037                 if (ie->name_state == NAME_NEEDED &&
2038                     data->rssi != ie->data.rssi) {
2039                         ie->data.rssi = data->rssi;
2040                         hci_inquiry_cache_update_resolve(hdev, ie);
2041                 }
2042
2043                 goto update;
2044         }
2045
2046         /* Entry not in the cache. Add new one. */
2047         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2048         if (!ie) {
2049                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2050                 goto done;
2051         }
2052
2053         list_add(&ie->all, &cache->all);
2054
2055         if (name_known) {
2056                 ie->name_state = NAME_KNOWN;
2057         } else {
2058                 ie->name_state = NAME_NOT_KNOWN;
2059                 list_add(&ie->list, &cache->unknown);
2060         }
2061
2062 update:
2063         if (name_known && ie->name_state != NAME_KNOWN &&
2064             ie->name_state != NAME_PENDING) {
2065                 ie->name_state = NAME_KNOWN;
2066                 list_del(&ie->list);
2067         }
2068
2069         memcpy(&ie->data, data, sizeof(*data));
2070         ie->timestamp = jiffies;
2071         cache->timestamp = jiffies;
2072
2073         if (ie->name_state == NAME_NOT_KNOWN)
2074                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2075
2076 done:
2077         return flags;
2078 }
2079
2080 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2081 {
2082         struct discovery_state *cache = &hdev->discovery;
2083         struct inquiry_info *info = (struct inquiry_info *) buf;
2084         struct inquiry_entry *e;
2085         int copied = 0;
2086
2087         list_for_each_entry(e, &cache->all, all) {
2088                 struct inquiry_data *data = &e->data;
2089
2090                 if (copied >= num)
2091                         break;
2092
2093                 bacpy(&info->bdaddr, &data->bdaddr);
2094                 info->pscan_rep_mode    = data->pscan_rep_mode;
2095                 info->pscan_period_mode = data->pscan_period_mode;
2096                 info->pscan_mode        = data->pscan_mode;
2097                 memcpy(info->dev_class, data->dev_class, 3);
2098                 info->clock_offset      = data->clock_offset;
2099
2100                 info++;
2101                 copied++;
2102         }
2103
2104         BT_DBG("cache %p, copied %d", cache, copied);
2105         return copied;
2106 }
2107
2108 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2109 {
2110         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2111         struct hci_dev *hdev = req->hdev;
2112         struct hci_cp_inquiry cp;
2113
2114         BT_DBG("%s", hdev->name);
2115
2116         if (test_bit(HCI_INQUIRY, &hdev->flags))
2117                 return;
2118
2119         /* Start Inquiry */
2120         memcpy(&cp.lap, &ir->lap, 3);
2121         cp.length  = ir->length;
2122         cp.num_rsp = ir->num_rsp;
2123         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2124 }
2125
2126 static int wait_inquiry(void *word)
2127 {
2128         schedule();
2129         return signal_pending(current);
2130 }
2131
2132 int hci_inquiry(void __user *arg)
2133 {
2134         __u8 __user *ptr = arg;
2135         struct hci_inquiry_req ir;
2136         struct hci_dev *hdev;
2137         int err = 0, do_inquiry = 0, max_rsp;
2138         long timeo;
2139         __u8 *buf;
2140
2141         if (copy_from_user(&ir, ptr, sizeof(ir)))
2142                 return -EFAULT;
2143
2144         hdev = hci_dev_get(ir.dev_id);
2145         if (!hdev)
2146                 return -ENODEV;
2147
2148         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2149                 err = -EBUSY;
2150                 goto done;
2151         }
2152
2153         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2154                 err = -EOPNOTSUPP;
2155                 goto done;
2156         }
2157
2158         if (hdev->dev_type != HCI_BREDR) {
2159                 err = -EOPNOTSUPP;
2160                 goto done;
2161         }
2162
2163         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2164                 err = -EOPNOTSUPP;
2165                 goto done;
2166         }
2167
2168         hci_dev_lock(hdev);
2169         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2170             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2171                 hci_inquiry_cache_flush(hdev);
2172                 do_inquiry = 1;
2173         }
2174         hci_dev_unlock(hdev);
2175
2176         timeo = ir.length * msecs_to_jiffies(2000);
2177
2178         if (do_inquiry) {
2179                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2180                                    timeo);
2181                 if (err < 0)
2182                         goto done;
2183
2184                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2185                  * cleared). If it is interrupted by a signal, return -EINTR.
2186                  */
2187                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2188                                 TASK_INTERRUPTIBLE))
2189                         return -EINTR;
2190         }
2191
2192         /* for unlimited number of responses we will use buffer with
2193          * 255 entries
2194          */
2195         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2196
2197         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2198          * copy it to the user space.
2199          */
2200         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2201         if (!buf) {
2202                 err = -ENOMEM;
2203                 goto done;
2204         }
2205
2206         hci_dev_lock(hdev);
2207         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2208         hci_dev_unlock(hdev);
2209
2210         BT_DBG("num_rsp %d", ir.num_rsp);
2211
2212         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2213                 ptr += sizeof(ir);
2214                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2215                                  ir.num_rsp))
2216                         err = -EFAULT;
2217         } else
2218                 err = -EFAULT;
2219
2220         kfree(buf);
2221
2222 done:
2223         hci_dev_put(hdev);
2224         return err;
2225 }
2226
2227 static int hci_dev_do_open(struct hci_dev *hdev)
2228 {
2229         int ret = 0;
2230
2231         BT_DBG("%s %p", hdev->name, hdev);
2232
2233         hci_req_lock(hdev);
2234
2235         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2236                 ret = -ENODEV;
2237                 goto done;
2238         }
2239
2240         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2241             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2242                 /* Check for rfkill but allow the HCI setup stage to
2243                  * proceed (which in itself doesn't cause any RF activity).
2244                  */
2245                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2246                         ret = -ERFKILL;
2247                         goto done;
2248                 }
2249
2250                 /* Check for valid public address or a configured static
2251                  * random adddress, but let the HCI setup proceed to
2252                  * be able to determine if there is a public address
2253                  * or not.
2254                  *
2255                  * In case of user channel usage, it is not important
2256                  * if a public address or static random address is
2257                  * available.
2258                  *
2259                  * This check is only valid for BR/EDR controllers
2260                  * since AMP controllers do not have an address.
2261                  */
2262                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2263                     hdev->dev_type == HCI_BREDR &&
2264                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2265                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2266                         ret = -EADDRNOTAVAIL;
2267                         goto done;
2268                 }
2269         }
2270
2271         if (test_bit(HCI_UP, &hdev->flags)) {
2272                 ret = -EALREADY;
2273                 goto done;
2274         }
2275
2276         if (hdev->open(hdev)) {
2277                 ret = -EIO;
2278                 goto done;
2279         }
2280
2281         atomic_set(&hdev->cmd_cnt, 1);
2282         set_bit(HCI_INIT, &hdev->flags);
2283
2284         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2285                 if (hdev->setup)
2286                         ret = hdev->setup(hdev);
2287
2288                 /* The transport driver can set these quirks before
2289                  * creating the HCI device or in its setup callback.
2290                  *
2291                  * In case any of them is set, the controller has to
2292                  * start up as unconfigured.
2293                  */
2294                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2295                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2296                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2297
2298                 /* For an unconfigured controller it is required to
2299                  * read at least the version information provided by
2300                  * the Read Local Version Information command.
2301                  *
2302                  * If the set_bdaddr driver callback is provided, then
2303                  * also the original Bluetooth public device address
2304                  * will be read using the Read BD Address command.
2305                  */
2306                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2307                         ret = __hci_unconf_init(hdev);
2308         }
2309
2310         if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2311                 /* If public address change is configured, ensure that
2312                  * the address gets programmed. If the driver does not
2313                  * support changing the public address, fail the power
2314                  * on procedure.
2315                  */
2316                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2317                     hdev->set_bdaddr)
2318                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2319                 else
2320                         ret = -EADDRNOTAVAIL;
2321         }
2322
2323         if (!ret) {
2324                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2325                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2326                         ret = __hci_init(hdev);
2327         }
2328
2329         clear_bit(HCI_INIT, &hdev->flags);
2330
2331         if (!ret) {
2332                 hci_dev_hold(hdev);
2333                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2334                 set_bit(HCI_UP, &hdev->flags);
2335                 hci_notify(hdev, HCI_DEV_UP);
2336                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2337                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2338                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2339                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2340                     hdev->dev_type == HCI_BREDR) {
2341                         hci_dev_lock(hdev);
2342                         mgmt_powered(hdev, 1);
2343                         hci_dev_unlock(hdev);
2344                 }
2345         } else {
2346                 /* Init failed, cleanup */
2347                 flush_work(&hdev->tx_work);
2348                 flush_work(&hdev->cmd_work);
2349                 flush_work(&hdev->rx_work);
2350
2351                 skb_queue_purge(&hdev->cmd_q);
2352                 skb_queue_purge(&hdev->rx_q);
2353
2354                 if (hdev->flush)
2355                         hdev->flush(hdev);
2356
2357                 if (hdev->sent_cmd) {
2358                         kfree_skb(hdev->sent_cmd);
2359                         hdev->sent_cmd = NULL;
2360                 }
2361
2362                 hdev->close(hdev);
2363                 hdev->flags &= BIT(HCI_RAW);
2364         }
2365
2366 done:
2367         hci_req_unlock(hdev);
2368         return ret;
2369 }
2370
2371 /* ---- HCI ioctl helpers ---- */
2372
2373 int hci_dev_open(__u16 dev)
2374 {
2375         struct hci_dev *hdev;
2376         int err;
2377
2378         hdev = hci_dev_get(dev);
2379         if (!hdev)
2380                 return -ENODEV;
2381
2382         /* Devices that are marked as unconfigured can only be powered
2383          * up as user channel. Trying to bring them up as normal devices
2384          * will result into a failure. Only user channel operation is
2385          * possible.
2386          *
2387          * When this function is called for a user channel, the flag
2388          * HCI_USER_CHANNEL will be set first before attempting to
2389          * open the device.
2390          */
2391         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2392             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2393                 err = -EOPNOTSUPP;
2394                 goto done;
2395         }
2396
2397         /* We need to ensure that no other power on/off work is pending
2398          * before proceeding to call hci_dev_do_open. This is
2399          * particularly important if the setup procedure has not yet
2400          * completed.
2401          */
2402         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2403                 cancel_delayed_work(&hdev->power_off);
2404
2405         /* After this call it is guaranteed that the setup procedure
2406          * has finished. This means that error conditions like RFKILL
2407          * or no valid public or static random address apply.
2408          */
2409         flush_workqueue(hdev->req_workqueue);
2410
2411         err = hci_dev_do_open(hdev);
2412
2413 done:
2414         hci_dev_put(hdev);
2415         return err;
2416 }
2417
2418 /* This function requires the caller holds hdev->lock */
2419 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2420 {
2421         struct hci_conn_params *p;
2422
2423         list_for_each_entry(p, &hdev->le_conn_params, list)
2424                 list_del_init(&p->action);
2425
2426         BT_DBG("All LE pending actions cleared");
2427 }
2428
2429 static int hci_dev_do_close(struct hci_dev *hdev)
2430 {
2431         BT_DBG("%s %p", hdev->name, hdev);
2432
2433         cancel_delayed_work(&hdev->power_off);
2434
2435         hci_req_cancel(hdev, ENODEV);
2436         hci_req_lock(hdev);
2437
2438         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2439                 cancel_delayed_work_sync(&hdev->cmd_timer);
2440                 hci_req_unlock(hdev);
2441                 return 0;
2442         }
2443
2444         /* Flush RX and TX works */
2445         flush_work(&hdev->tx_work);
2446         flush_work(&hdev->rx_work);
2447
2448         if (hdev->discov_timeout > 0) {
2449                 cancel_delayed_work(&hdev->discov_off);
2450                 hdev->discov_timeout = 0;
2451                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2452                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2453         }
2454
2455         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2456                 cancel_delayed_work(&hdev->service_cache);
2457
2458         cancel_delayed_work_sync(&hdev->le_scan_disable);
2459
2460         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2461                 cancel_delayed_work_sync(&hdev->rpa_expired);
2462
2463         hci_dev_lock(hdev);
2464         hci_inquiry_cache_flush(hdev);
2465         hci_conn_hash_flush(hdev);
2466         hci_pend_le_actions_clear(hdev);
2467         hci_dev_unlock(hdev);
2468
2469         hci_notify(hdev, HCI_DEV_DOWN);
2470
2471         if (hdev->flush)
2472                 hdev->flush(hdev);
2473
2474         /* Reset device */
2475         skb_queue_purge(&hdev->cmd_q);
2476         atomic_set(&hdev->cmd_cnt, 1);
2477         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2478             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2479             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2480                 set_bit(HCI_INIT, &hdev->flags);
2481                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2482                 clear_bit(HCI_INIT, &hdev->flags);
2483         }
2484
2485         /* flush cmd  work */
2486         flush_work(&hdev->cmd_work);
2487
2488         /* Drop queues */
2489         skb_queue_purge(&hdev->rx_q);
2490         skb_queue_purge(&hdev->cmd_q);
2491         skb_queue_purge(&hdev->raw_q);
2492
2493         /* Drop last sent command */
2494         if (hdev->sent_cmd) {
2495                 cancel_delayed_work_sync(&hdev->cmd_timer);
2496                 kfree_skb(hdev->sent_cmd);
2497                 hdev->sent_cmd = NULL;
2498         }
2499
2500         kfree_skb(hdev->recv_evt);
2501         hdev->recv_evt = NULL;
2502
2503         /* After this point our queues are empty
2504          * and no tasks are scheduled. */
2505         hdev->close(hdev);
2506
2507         /* Clear flags */
2508         hdev->flags &= BIT(HCI_RAW);
2509         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2510
2511         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2512                 if (hdev->dev_type == HCI_BREDR) {
2513                         hci_dev_lock(hdev);
2514                         mgmt_powered(hdev, 0);
2515                         hci_dev_unlock(hdev);
2516                 }
2517         }
2518
2519         /* Controller radio is available but is currently powered down */
2520         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2521
2522         memset(hdev->eir, 0, sizeof(hdev->eir));
2523         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2524         bacpy(&hdev->random_addr, BDADDR_ANY);
2525
2526         hci_req_unlock(hdev);
2527
2528         hci_dev_put(hdev);
2529         return 0;
2530 }
2531
2532 int hci_dev_close(__u16 dev)
2533 {
2534         struct hci_dev *hdev;
2535         int err;
2536
2537         hdev = hci_dev_get(dev);
2538         if (!hdev)
2539                 return -ENODEV;
2540
2541         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2542                 err = -EBUSY;
2543                 goto done;
2544         }
2545
2546         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2547                 cancel_delayed_work(&hdev->power_off);
2548
2549         err = hci_dev_do_close(hdev);
2550
2551 done:
2552         hci_dev_put(hdev);
2553         return err;
2554 }
2555
2556 int hci_dev_reset(__u16 dev)
2557 {
2558         struct hci_dev *hdev;
2559         int ret = 0;
2560
2561         hdev = hci_dev_get(dev);
2562         if (!hdev)
2563                 return -ENODEV;
2564
2565         hci_req_lock(hdev);
2566
2567         if (!test_bit(HCI_UP, &hdev->flags)) {
2568                 ret = -ENETDOWN;
2569                 goto done;
2570         }
2571
2572         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2573                 ret = -EBUSY;
2574                 goto done;
2575         }
2576
2577         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2578                 ret = -EOPNOTSUPP;
2579                 goto done;
2580         }
2581
2582         /* Drop queues */
2583         skb_queue_purge(&hdev->rx_q);
2584         skb_queue_purge(&hdev->cmd_q);
2585
2586         hci_dev_lock(hdev);
2587         hci_inquiry_cache_flush(hdev);
2588         hci_conn_hash_flush(hdev);
2589         hci_dev_unlock(hdev);
2590
2591         if (hdev->flush)
2592                 hdev->flush(hdev);
2593
2594         atomic_set(&hdev->cmd_cnt, 1);
2595         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2596
2597         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2598
2599 done:
2600         hci_req_unlock(hdev);
2601         hci_dev_put(hdev);
2602         return ret;
2603 }
2604
2605 int hci_dev_reset_stat(__u16 dev)
2606 {
2607         struct hci_dev *hdev;
2608         int ret = 0;
2609
2610         hdev = hci_dev_get(dev);
2611         if (!hdev)
2612                 return -ENODEV;
2613
2614         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2615                 ret = -EBUSY;
2616                 goto done;
2617         }
2618
2619         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2620                 ret = -EOPNOTSUPP;
2621                 goto done;
2622         }
2623
2624         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2625
2626 done:
2627         hci_dev_put(hdev);
2628         return ret;
2629 }
2630
2631 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2632 {
2633         struct hci_dev *hdev;
2634         struct hci_dev_req dr;
2635         int err = 0;
2636
2637         if (copy_from_user(&dr, arg, sizeof(dr)))
2638                 return -EFAULT;
2639
2640         hdev = hci_dev_get(dr.dev_id);
2641         if (!hdev)
2642                 return -ENODEV;
2643
2644         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2645                 err = -EBUSY;
2646                 goto done;
2647         }
2648
2649         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2650                 err = -EOPNOTSUPP;
2651                 goto done;
2652         }
2653
2654         if (hdev->dev_type != HCI_BREDR) {
2655                 err = -EOPNOTSUPP;
2656                 goto done;
2657         }
2658
2659         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2660                 err = -EOPNOTSUPP;
2661                 goto done;
2662         }
2663
2664         switch (cmd) {
2665         case HCISETAUTH:
2666                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2667                                    HCI_INIT_TIMEOUT);
2668                 break;
2669
2670         case HCISETENCRYPT:
2671                 if (!lmp_encrypt_capable(hdev)) {
2672                         err = -EOPNOTSUPP;
2673                         break;
2674                 }
2675
2676                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2677                         /* Auth must be enabled first */
2678                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2679                                            HCI_INIT_TIMEOUT);
2680                         if (err)
2681                                 break;
2682                 }
2683
2684                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2685                                    HCI_INIT_TIMEOUT);
2686                 break;
2687
2688         case HCISETSCAN:
2689                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2690                                    HCI_INIT_TIMEOUT);
2691                 break;
2692
2693         case HCISETLINKPOL:
2694                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2695                                    HCI_INIT_TIMEOUT);
2696                 break;
2697
2698         case HCISETLINKMODE:
2699                 hdev->link_mode = ((__u16) dr.dev_opt) &
2700                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2701                 break;
2702
2703         case HCISETPTYPE:
2704                 hdev->pkt_type = (__u16) dr.dev_opt;
2705                 break;
2706
2707         case HCISETACLMTU:
2708                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2709                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2710                 break;
2711
2712         case HCISETSCOMTU:
2713                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2714                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2715                 break;
2716
2717         default:
2718                 err = -EINVAL;
2719                 break;
2720         }
2721
2722 done:
2723         hci_dev_put(hdev);
2724         return err;
2725 }
2726
2727 int hci_get_dev_list(void __user *arg)
2728 {
2729         struct hci_dev *hdev;
2730         struct hci_dev_list_req *dl;
2731         struct hci_dev_req *dr;
2732         int n = 0, size, err;
2733         __u16 dev_num;
2734
2735         if (get_user(dev_num, (__u16 __user *) arg))
2736                 return -EFAULT;
2737
2738         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2739                 return -EINVAL;
2740
2741         size = sizeof(*dl) + dev_num * sizeof(*dr);
2742
2743         dl = kzalloc(size, GFP_KERNEL);
2744         if (!dl)
2745                 return -ENOMEM;
2746
2747         dr = dl->dev_req;
2748
2749         read_lock(&hci_dev_list_lock);
2750         list_for_each_entry(hdev, &hci_dev_list, list) {
2751                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2752                         cancel_delayed_work(&hdev->power_off);
2753
2754                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2755                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2756
2757                 (dr + n)->dev_id  = hdev->id;
2758                 (dr + n)->dev_opt = hdev->flags;
2759
2760                 if (++n >= dev_num)
2761                         break;
2762         }
2763         read_unlock(&hci_dev_list_lock);
2764
2765         dl->dev_num = n;
2766         size = sizeof(*dl) + n * sizeof(*dr);
2767
2768         err = copy_to_user(arg, dl, size);
2769         kfree(dl);
2770
2771         return err ? -EFAULT : 0;
2772 }
2773
2774 int hci_get_dev_info(void __user *arg)
2775 {
2776         struct hci_dev *hdev;
2777         struct hci_dev_info di;
2778         int err = 0;
2779
2780         if (copy_from_user(&di, arg, sizeof(di)))
2781                 return -EFAULT;
2782
2783         hdev = hci_dev_get(di.dev_id);
2784         if (!hdev)
2785                 return -ENODEV;
2786
2787         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2788                 cancel_delayed_work_sync(&hdev->power_off);
2789
2790         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2791                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2792
2793         strcpy(di.name, hdev->name);
2794         di.bdaddr   = hdev->bdaddr;
2795         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2796         di.flags    = hdev->flags;
2797         di.pkt_type = hdev->pkt_type;
2798         if (lmp_bredr_capable(hdev)) {
2799                 di.acl_mtu  = hdev->acl_mtu;
2800                 di.acl_pkts = hdev->acl_pkts;
2801                 di.sco_mtu  = hdev->sco_mtu;
2802                 di.sco_pkts = hdev->sco_pkts;
2803         } else {
2804                 di.acl_mtu  = hdev->le_mtu;
2805                 di.acl_pkts = hdev->le_pkts;
2806                 di.sco_mtu  = 0;
2807                 di.sco_pkts = 0;
2808         }
2809         di.link_policy = hdev->link_policy;
2810         di.link_mode   = hdev->link_mode;
2811
2812         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2813         memcpy(&di.features, &hdev->features, sizeof(di.features));
2814
2815         if (copy_to_user(arg, &di, sizeof(di)))
2816                 err = -EFAULT;
2817
2818         hci_dev_put(hdev);
2819
2820         return err;
2821 }
2822
2823 /* ---- Interface to HCI drivers ---- */
2824
2825 static int hci_rfkill_set_block(void *data, bool blocked)
2826 {
2827         struct hci_dev *hdev = data;
2828
2829         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2830
2831         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2832                 return -EBUSY;
2833
2834         if (blocked) {
2835                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2836                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2837                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
2838                         hci_dev_do_close(hdev);
2839         } else {
2840                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2841         }
2842
2843         return 0;
2844 }
2845
2846 static const struct rfkill_ops hci_rfkill_ops = {
2847         .set_block = hci_rfkill_set_block,
2848 };
2849
2850 static void hci_power_on(struct work_struct *work)
2851 {
2852         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2853         int err;
2854
2855         BT_DBG("%s", hdev->name);
2856
2857         err = hci_dev_do_open(hdev);
2858         if (err < 0) {
2859                 mgmt_set_powered_failed(hdev, err);
2860                 return;
2861         }
2862
2863         /* During the HCI setup phase, a few error conditions are
2864          * ignored and they need to be checked now. If they are still
2865          * valid, it is important to turn the device back off.
2866          */
2867         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2868             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2869             (hdev->dev_type == HCI_BREDR &&
2870              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2871              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2872                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2873                 hci_dev_do_close(hdev);
2874         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2875                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2876                                    HCI_AUTO_OFF_TIMEOUT);
2877         }
2878
2879         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2880                 /* For unconfigured devices, set the HCI_RAW flag
2881                  * so that userspace can easily identify them.
2882                  */
2883                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2884                         set_bit(HCI_RAW, &hdev->flags);
2885
2886                 /* For fully configured devices, this will send
2887                  * the Index Added event. For unconfigured devices,
2888                  * it will send Unconfigued Index Added event.
2889                  *
2890                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2891                  * and no event will be send.
2892                  */
2893                 mgmt_index_added(hdev);
2894         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2895                 /* When the controller is now configured, then it
2896                  * is important to clear the HCI_RAW flag.
2897                  */
2898                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2899                         clear_bit(HCI_RAW, &hdev->flags);
2900
2901                 /* Powering on the controller with HCI_CONFIG set only
2902                  * happens with the transition from unconfigured to
2903                  * configured. This will send the Index Added event.
2904                  */
2905                 mgmt_index_added(hdev);
2906         }
2907 }
2908
2909 static void hci_power_off(struct work_struct *work)
2910 {
2911         struct hci_dev *hdev = container_of(work, struct hci_dev,
2912                                             power_off.work);
2913
2914         BT_DBG("%s", hdev->name);
2915
2916         hci_dev_do_close(hdev);
2917 }
2918
2919 static void hci_discov_off(struct work_struct *work)
2920 {
2921         struct hci_dev *hdev;
2922
2923         hdev = container_of(work, struct hci_dev, discov_off.work);
2924
2925         BT_DBG("%s", hdev->name);
2926
2927         mgmt_discoverable_timeout(hdev);
2928 }
2929
2930 void hci_uuids_clear(struct hci_dev *hdev)
2931 {
2932         struct bt_uuid *uuid, *tmp;
2933
2934         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2935                 list_del(&uuid->list);
2936                 kfree(uuid);
2937         }
2938 }
2939
2940 void hci_link_keys_clear(struct hci_dev *hdev)
2941 {
2942         struct list_head *p, *n;
2943
2944         list_for_each_safe(p, n, &hdev->link_keys) {
2945                 struct link_key *key;
2946
2947                 key = list_entry(p, struct link_key, list);
2948
2949                 list_del(p);
2950                 kfree(key);
2951         }
2952 }
2953
2954 void hci_smp_ltks_clear(struct hci_dev *hdev)
2955 {
2956         struct smp_ltk *k, *tmp;
2957
2958         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2959                 list_del(&k->list);
2960                 kfree(k);
2961         }
2962 }
2963
2964 void hci_smp_irks_clear(struct hci_dev *hdev)
2965 {
2966         struct smp_irk *k, *tmp;
2967
2968         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2969                 list_del(&k->list);
2970                 kfree(k);
2971         }
2972 }
2973
2974 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2975 {
2976         struct link_key *k;
2977
2978         list_for_each_entry(k, &hdev->link_keys, list)
2979                 if (bacmp(bdaddr, &k->bdaddr) == 0)
2980                         return k;
2981
2982         return NULL;
2983 }
2984
2985 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2986                                u8 key_type, u8 old_key_type)
2987 {
2988         /* Legacy key */
2989         if (key_type < 0x03)
2990                 return true;
2991
2992         /* Debug keys are insecure so don't store them persistently */
2993         if (key_type == HCI_LK_DEBUG_COMBINATION)
2994                 return false;
2995
2996         /* Changed combination key and there's no previous one */
2997         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2998                 return false;
2999
3000         /* Security mode 3 case */
3001         if (!conn)
3002                 return true;
3003
3004         /* Neither local nor remote side had no-bonding as requirement */
3005         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3006                 return true;
3007
3008         /* Local side had dedicated bonding as requirement */
3009         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3010                 return true;
3011
3012         /* Remote side had dedicated bonding as requirement */
3013         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3014                 return true;
3015
3016         /* If none of the above criteria match, then don't store the key
3017          * persistently */
3018         return false;
3019 }
3020
3021 static bool ltk_type_master(u8 type)
3022 {
3023         return (type == SMP_LTK);
3024 }
3025
3026 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3027                              bool master)
3028 {
3029         struct smp_ltk *k;
3030
3031         list_for_each_entry(k, &hdev->long_term_keys, list) {
3032                 if (k->ediv != ediv || k->rand != rand)
3033                         continue;
3034
3035                 if (ltk_type_master(k->type) != master)
3036                         continue;
3037
3038                 return k;
3039         }
3040
3041         return NULL;
3042 }
3043
3044 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3045                                      u8 addr_type, bool master)
3046 {
3047         struct smp_ltk *k;
3048
3049         list_for_each_entry(k, &hdev->long_term_keys, list)
3050                 if (addr_type == k->bdaddr_type &&
3051                     bacmp(bdaddr, &k->bdaddr) == 0 &&
3052                     ltk_type_master(k->type) == master)
3053                         return k;
3054
3055         return NULL;
3056 }
3057
3058 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3059 {
3060         struct smp_irk *irk;
3061
3062         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3063                 if (!bacmp(&irk->rpa, rpa))
3064                         return irk;
3065         }
3066
3067         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3068                 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3069                         bacpy(&irk->rpa, rpa);
3070                         return irk;
3071                 }
3072         }
3073
3074         return NULL;
3075 }
3076
3077 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3078                                      u8 addr_type)
3079 {
3080         struct smp_irk *irk;
3081
3082         /* Identity Address must be public or static random */
3083         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3084                 return NULL;
3085
3086         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3087                 if (addr_type == irk->addr_type &&
3088                     bacmp(bdaddr, &irk->bdaddr) == 0)
3089                         return irk;
3090         }
3091
3092         return NULL;
3093 }
3094
3095 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3096                                   bdaddr_t *bdaddr, u8 *val, u8 type,
3097                                   u8 pin_len, bool *persistent)
3098 {
3099         struct link_key *key, *old_key;
3100         u8 old_key_type;
3101
3102         old_key = hci_find_link_key(hdev, bdaddr);
3103         if (old_key) {
3104                 old_key_type = old_key->type;
3105                 key = old_key;
3106         } else {
3107                 old_key_type = conn ? conn->key_type : 0xff;
3108                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3109                 if (!key)
3110                         return NULL;
3111                 list_add(&key->list, &hdev->link_keys);
3112         }
3113
3114         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3115
3116         /* Some buggy controller combinations generate a changed
3117          * combination key for legacy pairing even when there's no
3118          * previous key */
3119         if (type == HCI_LK_CHANGED_COMBINATION &&
3120             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3121                 type = HCI_LK_COMBINATION;
3122                 if (conn)
3123                         conn->key_type = type;
3124         }
3125
3126         bacpy(&key->bdaddr, bdaddr);
3127         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3128         key->pin_len = pin_len;
3129
3130         if (type == HCI_LK_CHANGED_COMBINATION)
3131                 key->type = old_key_type;
3132         else
3133                 key->type = type;
3134
3135         if (persistent)
3136                 *persistent = hci_persistent_key(hdev, conn, type,
3137                                                  old_key_type);
3138
3139         return key;
3140 }
3141
3142 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3143                             u8 addr_type, u8 type, u8 authenticated,
3144                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3145 {
3146         struct smp_ltk *key, *old_key;
3147         bool master = ltk_type_master(type);
3148
3149         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3150         if (old_key)
3151                 key = old_key;
3152         else {
3153                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3154                 if (!key)
3155                         return NULL;
3156                 list_add(&key->list, &hdev->long_term_keys);
3157         }
3158
3159         bacpy(&key->bdaddr, bdaddr);
3160         key->bdaddr_type = addr_type;
3161         memcpy(key->val, tk, sizeof(key->val));
3162         key->authenticated = authenticated;
3163         key->ediv = ediv;
3164         key->rand = rand;
3165         key->enc_size = enc_size;
3166         key->type = type;
3167
3168         return key;
3169 }
3170
3171 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3172                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3173 {
3174         struct smp_irk *irk;
3175
3176         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3177         if (!irk) {
3178                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3179                 if (!irk)
3180                         return NULL;
3181
3182                 bacpy(&irk->bdaddr, bdaddr);
3183                 irk->addr_type = addr_type;
3184
3185                 list_add(&irk->list, &hdev->identity_resolving_keys);
3186         }
3187
3188         memcpy(irk->val, val, 16);
3189         bacpy(&irk->rpa, rpa);
3190
3191         return irk;
3192 }
3193
3194 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3195 {
3196         struct link_key *key;
3197
3198         key = hci_find_link_key(hdev, bdaddr);
3199         if (!key)
3200                 return -ENOENT;
3201
3202         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3203
3204         list_del(&key->list);
3205         kfree(key);
3206
3207         return 0;
3208 }
3209
3210 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3211 {
3212         struct smp_ltk *k, *tmp;
3213         int removed = 0;
3214
3215         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3216                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3217                         continue;
3218
3219                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3220
3221                 list_del(&k->list);
3222                 kfree(k);
3223                 removed++;
3224         }
3225
3226         return removed ? 0 : -ENOENT;
3227 }
3228
3229 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3230 {
3231         struct smp_irk *k, *tmp;
3232
3233         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3234                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3235                         continue;
3236
3237                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3238
3239                 list_del(&k->list);
3240                 kfree(k);
3241         }
3242 }
3243
3244 /* HCI command timer function */
3245 static void hci_cmd_timeout(struct work_struct *work)
3246 {
3247         struct hci_dev *hdev = container_of(work, struct hci_dev,
3248                                             cmd_timer.work);
3249
3250         if (hdev->sent_cmd) {
3251                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3252                 u16 opcode = __le16_to_cpu(sent->opcode);
3253
3254                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3255         } else {
3256                 BT_ERR("%s command tx timeout", hdev->name);
3257         }
3258
3259         atomic_set(&hdev->cmd_cnt, 1);
3260         queue_work(hdev->workqueue, &hdev->cmd_work);
3261 }
3262
3263 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3264                                           bdaddr_t *bdaddr)
3265 {
3266         struct oob_data *data;
3267
3268         list_for_each_entry(data, &hdev->remote_oob_data, list)
3269                 if (bacmp(bdaddr, &data->bdaddr) == 0)
3270                         return data;
3271
3272         return NULL;
3273 }
3274
3275 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3276 {
3277         struct oob_data *data;
3278
3279         data = hci_find_remote_oob_data(hdev, bdaddr);
3280         if (!data)
3281                 return -ENOENT;
3282
3283         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3284
3285         list_del(&data->list);
3286         kfree(data);
3287
3288         return 0;
3289 }
3290
3291 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3292 {
3293         struct oob_data *data, *n;
3294
3295         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3296                 list_del(&data->list);
3297                 kfree(data);
3298         }
3299 }
3300
3301 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3302                             u8 *hash, u8 *randomizer)
3303 {
3304         struct oob_data *data;
3305
3306         data = hci_find_remote_oob_data(hdev, bdaddr);
3307         if (!data) {
3308                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3309                 if (!data)
3310                         return -ENOMEM;
3311
3312                 bacpy(&data->bdaddr, bdaddr);
3313                 list_add(&data->list, &hdev->remote_oob_data);
3314         }
3315
3316         memcpy(data->hash192, hash, sizeof(data->hash192));
3317         memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3318
3319         memset(data->hash256, 0, sizeof(data->hash256));
3320         memset(data->randomizer256, 0, sizeof(data->randomizer256));
3321
3322         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3323
3324         return 0;
3325 }
3326
3327 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3328                                 u8 *hash192, u8 *randomizer192,
3329                                 u8 *hash256, u8 *randomizer256)
3330 {
3331         struct oob_data *data;
3332
3333         data = hci_find_remote_oob_data(hdev, bdaddr);
3334         if (!data) {
3335                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3336                 if (!data)
3337                         return -ENOMEM;
3338
3339                 bacpy(&data->bdaddr, bdaddr);
3340                 list_add(&data->list, &hdev->remote_oob_data);
3341         }
3342
3343         memcpy(data->hash192, hash192, sizeof(data->hash192));
3344         memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3345
3346         memcpy(data->hash256, hash256, sizeof(data->hash256));
3347         memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3348
3349         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3350
3351         return 0;
3352 }
3353
3354 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3355                                          bdaddr_t *bdaddr, u8 type)
3356 {
3357         struct bdaddr_list *b;
3358
3359         list_for_each_entry(b, &hdev->blacklist, list) {
3360                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3361                         return b;
3362         }
3363
3364         return NULL;
3365 }
3366
3367 static void hci_blacklist_clear(struct hci_dev *hdev)
3368 {
3369         struct list_head *p, *n;
3370
3371         list_for_each_safe(p, n, &hdev->blacklist) {
3372                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3373
3374                 list_del(p);
3375                 kfree(b);
3376         }
3377 }
3378
3379 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3380 {
3381         struct bdaddr_list *entry;
3382
3383         if (!bacmp(bdaddr, BDADDR_ANY))
3384                 return -EBADF;
3385
3386         if (hci_blacklist_lookup(hdev, bdaddr, type))
3387                 return -EEXIST;
3388
3389         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3390         if (!entry)
3391                 return -ENOMEM;
3392
3393         bacpy(&entry->bdaddr, bdaddr);
3394         entry->bdaddr_type = type;
3395
3396         list_add(&entry->list, &hdev->blacklist);
3397
3398         return 0;
3399 }
3400
3401 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3402 {
3403         struct bdaddr_list *entry;
3404
3405         if (!bacmp(bdaddr, BDADDR_ANY)) {
3406                 hci_blacklist_clear(hdev);
3407                 return 0;
3408         }
3409
3410         entry = hci_blacklist_lookup(hdev, bdaddr, type);
3411         if (!entry)
3412                 return -ENOENT;
3413
3414         list_del(&entry->list);
3415         kfree(entry);
3416
3417         return 0;
3418 }
3419
3420 struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3421                                           bdaddr_t *bdaddr, u8 type)
3422 {
3423         struct bdaddr_list *b;
3424
3425         list_for_each_entry(b, &hdev->le_white_list, list) {
3426                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3427                         return b;
3428         }
3429
3430         return NULL;
3431 }
3432
3433 void hci_white_list_clear(struct hci_dev *hdev)
3434 {
3435         struct list_head *p, *n;
3436
3437         list_for_each_safe(p, n, &hdev->le_white_list) {
3438                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3439
3440                 list_del(p);
3441                 kfree(b);
3442         }
3443 }
3444
3445 int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3446 {
3447         struct bdaddr_list *entry;
3448
3449         if (!bacmp(bdaddr, BDADDR_ANY))
3450                 return -EBADF;
3451
3452         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3453         if (!entry)
3454                 return -ENOMEM;
3455
3456         bacpy(&entry->bdaddr, bdaddr);
3457         entry->bdaddr_type = type;
3458
3459         list_add(&entry->list, &hdev->le_white_list);
3460
3461         return 0;
3462 }
3463
3464 int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3465 {
3466         struct bdaddr_list *entry;
3467
3468         if (!bacmp(bdaddr, BDADDR_ANY))
3469                 return -EBADF;
3470
3471         entry = hci_white_list_lookup(hdev, bdaddr, type);
3472         if (!entry)
3473                 return -ENOENT;
3474
3475         list_del(&entry->list);
3476         kfree(entry);
3477
3478         return 0;
3479 }
3480
3481 /* This function requires the caller holds hdev->lock */
3482 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3483                                                bdaddr_t *addr, u8 addr_type)
3484 {
3485         struct hci_conn_params *params;
3486
3487         /* The conn params list only contains identity addresses */
3488         if (!hci_is_identity_address(addr, addr_type))
3489                 return NULL;
3490
3491         list_for_each_entry(params, &hdev->le_conn_params, list) {
3492                 if (bacmp(&params->addr, addr) == 0 &&
3493                     params->addr_type == addr_type) {
3494                         return params;
3495                 }
3496         }
3497
3498         return NULL;
3499 }
3500
3501 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3502 {
3503         struct hci_conn *conn;
3504
3505         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3506         if (!conn)
3507                 return false;
3508
3509         if (conn->dst_type != type)
3510                 return false;
3511
3512         if (conn->state != BT_CONNECTED)
3513                 return false;
3514
3515         return true;
3516 }
3517
3518 /* This function requires the caller holds hdev->lock */
3519 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3520                                                   bdaddr_t *addr, u8 addr_type)
3521 {
3522         struct hci_conn_params *param;
3523
3524         /* The list only contains identity addresses */
3525         if (!hci_is_identity_address(addr, addr_type))
3526                 return NULL;
3527
3528         list_for_each_entry(param, list, action) {
3529                 if (bacmp(&param->addr, addr) == 0 &&
3530                     param->addr_type == addr_type)
3531                         return param;
3532         }
3533
3534         return NULL;
3535 }
3536
3537 /* This function requires the caller holds hdev->lock */
3538 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3539                                             bdaddr_t *addr, u8 addr_type)
3540 {
3541         struct hci_conn_params *params;
3542
3543         if (!hci_is_identity_address(addr, addr_type))
3544                 return NULL;
3545
3546         params = hci_conn_params_lookup(hdev, addr, addr_type);
3547         if (params)
3548                 return params;
3549
3550         params = kzalloc(sizeof(*params), GFP_KERNEL);
3551         if (!params) {
3552                 BT_ERR("Out of memory");
3553                 return NULL;
3554         }
3555
3556         bacpy(&params->addr, addr);
3557         params->addr_type = addr_type;
3558
3559         list_add(&params->list, &hdev->le_conn_params);
3560         INIT_LIST_HEAD(&params->action);
3561
3562         params->conn_min_interval = hdev->le_conn_min_interval;
3563         params->conn_max_interval = hdev->le_conn_max_interval;
3564         params->conn_latency = hdev->le_conn_latency;
3565         params->supervision_timeout = hdev->le_supv_timeout;
3566         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3567
3568         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3569
3570         return params;
3571 }
3572
3573 /* This function requires the caller holds hdev->lock */
3574 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3575                         u8 auto_connect)
3576 {
3577         struct hci_conn_params *params;
3578
3579         params = hci_conn_params_add(hdev, addr, addr_type);
3580         if (!params)
3581                 return -EIO;
3582
3583         if (params->auto_connect == auto_connect)
3584                 return 0;
3585
3586         list_del_init(&params->action);
3587
3588         switch (auto_connect) {
3589         case HCI_AUTO_CONN_DISABLED:
3590         case HCI_AUTO_CONN_LINK_LOSS:
3591                 hci_update_background_scan(hdev);
3592                 break;
3593         case HCI_AUTO_CONN_REPORT:
3594                 list_add(&params->action, &hdev->pend_le_reports);
3595                 hci_update_background_scan(hdev);
3596                 break;
3597         case HCI_AUTO_CONN_ALWAYS:
3598                 if (!is_connected(hdev, addr, addr_type)) {
3599                         list_add(&params->action, &hdev->pend_le_conns);
3600                         hci_update_background_scan(hdev);
3601                 }
3602                 break;
3603         }
3604
3605         params->auto_connect = auto_connect;
3606
3607         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3608                auto_connect);
3609
3610         return 0;
3611 }
3612
3613 /* This function requires the caller holds hdev->lock */
3614 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3615 {
3616         struct hci_conn_params *params;
3617
3618         params = hci_conn_params_lookup(hdev, addr, addr_type);
3619         if (!params)
3620                 return;
3621
3622         list_del(&params->action);
3623         list_del(&params->list);
3624         kfree(params);
3625
3626         hci_update_background_scan(hdev);
3627
3628         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3629 }
3630
3631 /* This function requires the caller holds hdev->lock */
3632 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3633 {
3634         struct hci_conn_params *params, *tmp;
3635
3636         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3637                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3638                         continue;
3639                 list_del(&params->list);
3640                 kfree(params);
3641         }
3642
3643         BT_DBG("All LE disabled connection parameters were removed");
3644 }
3645
3646 /* This function requires the caller holds hdev->lock */
3647 void hci_conn_params_clear_all(struct hci_dev *hdev)
3648 {
3649         struct hci_conn_params *params, *tmp;
3650
3651         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3652                 list_del(&params->action);
3653                 list_del(&params->list);
3654                 kfree(params);
3655         }
3656
3657         hci_update_background_scan(hdev);
3658
3659         BT_DBG("All LE connection parameters were removed");
3660 }
3661
3662 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3663 {
3664         if (status) {
3665                 BT_ERR("Failed to start inquiry: status %d", status);
3666
3667                 hci_dev_lock(hdev);
3668                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3669                 hci_dev_unlock(hdev);
3670                 return;
3671         }
3672 }
3673
3674 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3675 {
3676         /* General inquiry access code (GIAC) */
3677         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3678         struct hci_request req;
3679         struct hci_cp_inquiry cp;
3680         int err;
3681
3682         if (status) {
3683                 BT_ERR("Failed to disable LE scanning: status %d", status);
3684                 return;
3685         }
3686
3687         switch (hdev->discovery.type) {
3688         case DISCOV_TYPE_LE:
3689                 hci_dev_lock(hdev);
3690                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3691                 hci_dev_unlock(hdev);
3692                 break;
3693
3694         case DISCOV_TYPE_INTERLEAVED:
3695                 hci_req_init(&req, hdev);
3696
3697                 memset(&cp, 0, sizeof(cp));
3698                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3699                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3700                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3701
3702                 hci_dev_lock(hdev);
3703
3704                 hci_inquiry_cache_flush(hdev);
3705
3706                 err = hci_req_run(&req, inquiry_complete);
3707                 if (err) {
3708                         BT_ERR("Inquiry request failed: err %d", err);
3709                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3710                 }
3711
3712                 hci_dev_unlock(hdev);
3713                 break;
3714         }
3715 }
3716
3717 static void le_scan_disable_work(struct work_struct *work)
3718 {
3719         struct hci_dev *hdev = container_of(work, struct hci_dev,
3720                                             le_scan_disable.work);
3721         struct hci_request req;
3722         int err;
3723
3724         BT_DBG("%s", hdev->name);
3725
3726         hci_req_init(&req, hdev);
3727
3728         hci_req_add_le_scan_disable(&req);
3729
3730         err = hci_req_run(&req, le_scan_disable_work_complete);
3731         if (err)
3732                 BT_ERR("Disable LE scanning request failed: err %d", err);
3733 }
3734
3735 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3736 {
3737         struct hci_dev *hdev = req->hdev;
3738
3739         /* If we're advertising or initiating an LE connection we can't
3740          * go ahead and change the random address at this time. This is
3741          * because the eventual initiator address used for the
3742          * subsequently created connection will be undefined (some
3743          * controllers use the new address and others the one we had
3744          * when the operation started).
3745          *
3746          * In this kind of scenario skip the update and let the random
3747          * address be updated at the next cycle.
3748          */
3749         if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3750             hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3751                 BT_DBG("Deferring random address update");
3752                 return;
3753         }
3754
3755         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3756 }
3757
3758 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3759                               u8 *own_addr_type)
3760 {
3761         struct hci_dev *hdev = req->hdev;
3762         int err;
3763
3764         /* If privacy is enabled use a resolvable private address. If
3765          * current RPA has expired or there is something else than
3766          * the current RPA in use, then generate a new one.
3767          */
3768         if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3769                 int to;
3770
3771                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3772
3773                 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3774                     !bacmp(&hdev->random_addr, &hdev->rpa))
3775                         return 0;
3776
3777                 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3778                 if (err < 0) {
3779                         BT_ERR("%s failed to generate new RPA", hdev->name);
3780                         return err;
3781                 }
3782
3783                 set_random_addr(req, &hdev->rpa);
3784
3785                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3786                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3787
3788                 return 0;
3789         }
3790
3791         /* In case of required privacy without resolvable private address,
3792          * use an unresolvable private address. This is useful for active
3793          * scanning and non-connectable advertising.
3794          */
3795         if (require_privacy) {
3796                 bdaddr_t urpa;
3797
3798                 get_random_bytes(&urpa, 6);
3799                 urpa.b[5] &= 0x3f;      /* Clear two most significant bits */
3800
3801                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3802                 set_random_addr(req, &urpa);
3803                 return 0;
3804         }
3805
3806         /* If forcing static address is in use or there is no public
3807          * address use the static address as random address (but skip
3808          * the HCI command if the current random address is already the
3809          * static one.
3810          */
3811         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3812             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3813                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3814                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3815                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3816                                     &hdev->static_addr);
3817                 return 0;
3818         }
3819
3820         /* Neither privacy nor static address is being used so use a
3821          * public address.
3822          */
3823         *own_addr_type = ADDR_LE_DEV_PUBLIC;
3824
3825         return 0;
3826 }
3827
3828 /* Copy the Identity Address of the controller.
3829  *
3830  * If the controller has a public BD_ADDR, then by default use that one.
3831  * If this is a LE only controller without a public address, default to
3832  * the static random address.
3833  *
3834  * For debugging purposes it is possible to force controllers with a
3835  * public address to use the static random address instead.
3836  */
3837 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3838                                u8 *bdaddr_type)
3839 {
3840         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3841             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3842                 bacpy(bdaddr, &hdev->static_addr);
3843                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3844         } else {
3845                 bacpy(bdaddr, &hdev->bdaddr);
3846                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3847         }
3848 }
3849
3850 /* Alloc HCI device */
3851 struct hci_dev *hci_alloc_dev(void)
3852 {
3853         struct hci_dev *hdev;
3854
3855         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3856         if (!hdev)
3857                 return NULL;
3858
3859         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3860         hdev->esco_type = (ESCO_HV1);
3861         hdev->link_mode = (HCI_LM_ACCEPT);
3862         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3863         hdev->io_capability = 0x03;     /* No Input No Output */
3864         hdev->manufacturer = 0xffff;    /* Default to internal use */
3865         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3866         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3867
3868         hdev->sniff_max_interval = 800;
3869         hdev->sniff_min_interval = 80;
3870
3871         hdev->le_adv_channel_map = 0x07;
3872         hdev->le_scan_interval = 0x0060;
3873         hdev->le_scan_window = 0x0030;
3874         hdev->le_conn_min_interval = 0x0028;
3875         hdev->le_conn_max_interval = 0x0038;
3876         hdev->le_conn_latency = 0x0000;
3877         hdev->le_supv_timeout = 0x002a;
3878
3879         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3880         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3881         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3882         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3883
3884         mutex_init(&hdev->lock);
3885         mutex_init(&hdev->req_lock);
3886
3887         INIT_LIST_HEAD(&hdev->mgmt_pending);
3888         INIT_LIST_HEAD(&hdev->blacklist);
3889         INIT_LIST_HEAD(&hdev->uuids);
3890         INIT_LIST_HEAD(&hdev->link_keys);
3891         INIT_LIST_HEAD(&hdev->long_term_keys);
3892         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3893         INIT_LIST_HEAD(&hdev->remote_oob_data);
3894         INIT_LIST_HEAD(&hdev->le_white_list);
3895         INIT_LIST_HEAD(&hdev->le_conn_params);
3896         INIT_LIST_HEAD(&hdev->pend_le_conns);
3897         INIT_LIST_HEAD(&hdev->pend_le_reports);
3898         INIT_LIST_HEAD(&hdev->conn_hash.list);
3899
3900         INIT_WORK(&hdev->rx_work, hci_rx_work);
3901         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3902         INIT_WORK(&hdev->tx_work, hci_tx_work);
3903         INIT_WORK(&hdev->power_on, hci_power_on);
3904
3905         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3906         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3907         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3908
3909         skb_queue_head_init(&hdev->rx_q);
3910         skb_queue_head_init(&hdev->cmd_q);
3911         skb_queue_head_init(&hdev->raw_q);
3912
3913         init_waitqueue_head(&hdev->req_wait_q);
3914
3915         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3916
3917         hci_init_sysfs(hdev);
3918         discovery_init(hdev);
3919
3920         return hdev;
3921 }
3922 EXPORT_SYMBOL(hci_alloc_dev);
3923
3924 /* Free HCI device */
3925 void hci_free_dev(struct hci_dev *hdev)
3926 {
3927         /* will free via device release */
3928         put_device(&hdev->dev);
3929 }
3930 EXPORT_SYMBOL(hci_free_dev);
3931
3932 /* Register HCI device */
3933 int hci_register_dev(struct hci_dev *hdev)
3934 {
3935         int id, error;
3936
3937         if (!hdev->open || !hdev->close || !hdev->send)
3938                 return -EINVAL;
3939
3940         /* Do not allow HCI_AMP devices to register at index 0,
3941          * so the index can be used as the AMP controller ID.
3942          */
3943         switch (hdev->dev_type) {
3944         case HCI_BREDR:
3945                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3946                 break;
3947         case HCI_AMP:
3948                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3949                 break;
3950         default:
3951                 return -EINVAL;
3952         }
3953
3954         if (id < 0)
3955                 return id;
3956
3957         sprintf(hdev->name, "hci%d", id);
3958         hdev->id = id;
3959
3960         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3961
3962         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3963                                           WQ_MEM_RECLAIM, 1, hdev->name);
3964         if (!hdev->workqueue) {
3965                 error = -ENOMEM;
3966                 goto err;
3967         }
3968
3969         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3970                                               WQ_MEM_RECLAIM, 1, hdev->name);
3971         if (!hdev->req_workqueue) {
3972                 destroy_workqueue(hdev->workqueue);
3973                 error = -ENOMEM;
3974                 goto err;
3975         }
3976
3977         if (!IS_ERR_OR_NULL(bt_debugfs))
3978                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3979
3980         dev_set_name(&hdev->dev, "%s", hdev->name);
3981
3982         hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3983                                                CRYPTO_ALG_ASYNC);
3984         if (IS_ERR(hdev->tfm_aes)) {
3985                 BT_ERR("Unable to create crypto context");
3986                 error = PTR_ERR(hdev->tfm_aes);
3987                 hdev->tfm_aes = NULL;
3988                 goto err_wqueue;
3989         }
3990
3991         error = device_add(&hdev->dev);
3992         if (error < 0)
3993                 goto err_tfm;
3994
3995         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3996                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3997                                     hdev);
3998         if (hdev->rfkill) {
3999                 if (rfkill_register(hdev->rfkill) < 0) {
4000                         rfkill_destroy(hdev->rfkill);
4001                         hdev->rfkill = NULL;
4002                 }
4003         }
4004
4005         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4006                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4007
4008         set_bit(HCI_SETUP, &hdev->dev_flags);
4009         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4010
4011         if (hdev->dev_type == HCI_BREDR) {
4012                 /* Assume BR/EDR support until proven otherwise (such as
4013                  * through reading supported features during init.
4014                  */
4015                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4016         }
4017
4018         write_lock(&hci_dev_list_lock);
4019         list_add(&hdev->list, &hci_dev_list);
4020         write_unlock(&hci_dev_list_lock);
4021
4022         /* Devices that are marked for raw-only usage are unconfigured
4023          * and should not be included in normal operation.
4024          */
4025         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4026                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4027
4028         hci_notify(hdev, HCI_DEV_REG);
4029         hci_dev_hold(hdev);
4030
4031         queue_work(hdev->req_workqueue, &hdev->power_on);
4032
4033         return id;
4034
4035 err_tfm:
4036         crypto_free_blkcipher(hdev->tfm_aes);
4037 err_wqueue:
4038         destroy_workqueue(hdev->workqueue);
4039         destroy_workqueue(hdev->req_workqueue);
4040 err:
4041         ida_simple_remove(&hci_index_ida, hdev->id);
4042
4043         return error;
4044 }
4045 EXPORT_SYMBOL(hci_register_dev);
4046
4047 /* Unregister HCI device */
4048 void hci_unregister_dev(struct hci_dev *hdev)
4049 {
4050         int i, id;
4051
4052         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4053
4054         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4055
4056         id = hdev->id;
4057
4058         write_lock(&hci_dev_list_lock);
4059         list_del(&hdev->list);
4060         write_unlock(&hci_dev_list_lock);
4061
4062         hci_dev_do_close(hdev);
4063
4064         for (i = 0; i < NUM_REASSEMBLY; i++)
4065                 kfree_skb(hdev->reassembly[i]);
4066
4067         cancel_work_sync(&hdev->power_on);
4068
4069         if (!test_bit(HCI_INIT, &hdev->flags) &&
4070             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4071             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4072                 hci_dev_lock(hdev);
4073                 mgmt_index_removed(hdev);
4074                 hci_dev_unlock(hdev);
4075         }
4076
4077         /* mgmt_index_removed should take care of emptying the
4078          * pending list */
4079         BUG_ON(!list_empty(&hdev->mgmt_pending));
4080
4081         hci_notify(hdev, HCI_DEV_UNREG);
4082
4083         if (hdev->rfkill) {
4084                 rfkill_unregister(hdev->rfkill);
4085                 rfkill_destroy(hdev->rfkill);
4086         }
4087
4088         if (hdev->tfm_aes)
4089                 crypto_free_blkcipher(hdev->tfm_aes);
4090
4091         device_del(&hdev->dev);
4092
4093         debugfs_remove_recursive(hdev->debugfs);
4094
4095         destroy_workqueue(hdev->workqueue);
4096         destroy_workqueue(hdev->req_workqueue);
4097
4098         hci_dev_lock(hdev);
4099         hci_blacklist_clear(hdev);
4100         hci_uuids_clear(hdev);
4101         hci_link_keys_clear(hdev);
4102         hci_smp_ltks_clear(hdev);
4103         hci_smp_irks_clear(hdev);
4104         hci_remote_oob_data_clear(hdev);
4105         hci_white_list_clear(hdev);
4106         hci_conn_params_clear_all(hdev);
4107         hci_dev_unlock(hdev);
4108
4109         hci_dev_put(hdev);
4110
4111         ida_simple_remove(&hci_index_ida, id);
4112 }
4113 EXPORT_SYMBOL(hci_unregister_dev);
4114
4115 /* Suspend HCI device */
4116 int hci_suspend_dev(struct hci_dev *hdev)
4117 {
4118         hci_notify(hdev, HCI_DEV_SUSPEND);
4119         return 0;
4120 }
4121 EXPORT_SYMBOL(hci_suspend_dev);
4122
4123 /* Resume HCI device */
4124 int hci_resume_dev(struct hci_dev *hdev)
4125 {
4126         hci_notify(hdev, HCI_DEV_RESUME);
4127         return 0;
4128 }
4129 EXPORT_SYMBOL(hci_resume_dev);
4130
4131 /* Receive frame from HCI drivers */
4132 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4133 {
4134         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4135                       && !test_bit(HCI_INIT, &hdev->flags))) {
4136                 kfree_skb(skb);
4137                 return -ENXIO;
4138         }
4139
4140         /* Incoming skb */
4141         bt_cb(skb)->incoming = 1;
4142
4143         /* Time stamp */
4144         __net_timestamp(skb);
4145
4146         skb_queue_tail(&hdev->rx_q, skb);
4147         queue_work(hdev->workqueue, &hdev->rx_work);
4148
4149         return 0;
4150 }
4151 EXPORT_SYMBOL(hci_recv_frame);
4152
4153 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4154                           int count, __u8 index)
4155 {
4156         int len = 0;
4157         int hlen = 0;
4158         int remain = count;
4159         struct sk_buff *skb;
4160         struct bt_skb_cb *scb;
4161
4162         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4163             index >= NUM_REASSEMBLY)
4164                 return -EILSEQ;
4165
4166         skb = hdev->reassembly[index];
4167
4168         if (!skb) {
4169                 switch (type) {
4170                 case HCI_ACLDATA_PKT:
4171                         len = HCI_MAX_FRAME_SIZE;
4172                         hlen = HCI_ACL_HDR_SIZE;
4173                         break;
4174                 case HCI_EVENT_PKT:
4175                         len = HCI_MAX_EVENT_SIZE;
4176                         hlen = HCI_EVENT_HDR_SIZE;
4177                         break;
4178                 case HCI_SCODATA_PKT:
4179                         len = HCI_MAX_SCO_SIZE;
4180                         hlen = HCI_SCO_HDR_SIZE;
4181                         break;
4182                 }
4183
4184                 skb = bt_skb_alloc(len, GFP_ATOMIC);
4185                 if (!skb)
4186                         return -ENOMEM;
4187
4188                 scb = (void *) skb->cb;
4189                 scb->expect = hlen;
4190                 scb->pkt_type = type;
4191
4192                 hdev->reassembly[index] = skb;
4193         }
4194
4195         while (count) {
4196                 scb = (void *) skb->cb;
4197                 len = min_t(uint, scb->expect, count);
4198
4199                 memcpy(skb_put(skb, len), data, len);
4200
4201                 count -= len;
4202                 data += len;
4203                 scb->expect -= len;
4204                 remain = count;
4205
4206                 switch (type) {
4207                 case HCI_EVENT_PKT:
4208                         if (skb->len == HCI_EVENT_HDR_SIZE) {
4209                                 struct hci_event_hdr *h = hci_event_hdr(skb);
4210                                 scb->expect = h->plen;
4211
4212                                 if (skb_tailroom(skb) < scb->expect) {
4213                                         kfree_skb(skb);
4214                                         hdev->reassembly[index] = NULL;
4215                                         return -ENOMEM;
4216                                 }
4217                         }
4218                         break;
4219
4220                 case HCI_ACLDATA_PKT:
4221                         if (skb->len  == HCI_ACL_HDR_SIZE) {
4222                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4223                                 scb->expect = __le16_to_cpu(h->dlen);
4224
4225                                 if (skb_tailroom(skb) < scb->expect) {
4226                                         kfree_skb(skb);
4227                                         hdev->reassembly[index] = NULL;
4228                                         return -ENOMEM;
4229                                 }
4230                         }
4231                         break;
4232
4233                 case HCI_SCODATA_PKT:
4234                         if (skb->len == HCI_SCO_HDR_SIZE) {
4235                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4236                                 scb->expect = h->dlen;
4237
4238                                 if (skb_tailroom(skb) < scb->expect) {
4239                                         kfree_skb(skb);
4240                                         hdev->reassembly[index] = NULL;
4241                                         return -ENOMEM;
4242                                 }
4243                         }
4244                         break;
4245                 }
4246
4247                 if (scb->expect == 0) {
4248                         /* Complete frame */
4249
4250                         bt_cb(skb)->pkt_type = type;
4251                         hci_recv_frame(hdev, skb);
4252
4253                         hdev->reassembly[index] = NULL;
4254                         return remain;
4255                 }
4256         }
4257
4258         return remain;
4259 }
4260
4261 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4262 {
4263         int rem = 0;
4264
4265         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4266                 return -EILSEQ;
4267
4268         while (count) {
4269                 rem = hci_reassembly(hdev, type, data, count, type - 1);
4270                 if (rem < 0)
4271                         return rem;
4272
4273                 data += (count - rem);
4274                 count = rem;
4275         }
4276
4277         return rem;
4278 }
4279 EXPORT_SYMBOL(hci_recv_fragment);
4280
4281 #define STREAM_REASSEMBLY 0
4282
4283 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4284 {
4285         int type;
4286         int rem = 0;
4287
4288         while (count) {
4289                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4290
4291                 if (!skb) {
4292                         struct { char type; } *pkt;
4293
4294                         /* Start of the frame */
4295                         pkt = data;
4296                         type = pkt->type;
4297
4298                         data++;
4299                         count--;
4300                 } else
4301                         type = bt_cb(skb)->pkt_type;
4302
4303                 rem = hci_reassembly(hdev, type, data, count,
4304                                      STREAM_REASSEMBLY);
4305                 if (rem < 0)
4306                         return rem;
4307
4308                 data += (count - rem);
4309                 count = rem;
4310         }
4311
4312         return rem;
4313 }
4314 EXPORT_SYMBOL(hci_recv_stream_fragment);
4315
4316 /* ---- Interface to upper protocols ---- */
4317
4318 int hci_register_cb(struct hci_cb *cb)
4319 {
4320         BT_DBG("%p name %s", cb, cb->name);
4321
4322         write_lock(&hci_cb_list_lock);
4323         list_add(&cb->list, &hci_cb_list);
4324         write_unlock(&hci_cb_list_lock);
4325
4326         return 0;
4327 }
4328 EXPORT_SYMBOL(hci_register_cb);
4329
4330 int hci_unregister_cb(struct hci_cb *cb)
4331 {
4332         BT_DBG("%p name %s", cb, cb->name);
4333
4334         write_lock(&hci_cb_list_lock);
4335         list_del(&cb->list);
4336         write_unlock(&hci_cb_list_lock);
4337
4338         return 0;
4339 }
4340 EXPORT_SYMBOL(hci_unregister_cb);
4341
4342 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4343 {
4344         int err;
4345
4346         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4347
4348         /* Time stamp */
4349         __net_timestamp(skb);
4350
4351         /* Send copy to monitor */
4352         hci_send_to_monitor(hdev, skb);
4353
4354         if (atomic_read(&hdev->promisc)) {
4355                 /* Send copy to the sockets */
4356                 hci_send_to_sock(hdev, skb);
4357         }
4358
4359         /* Get rid of skb owner, prior to sending to the driver. */
4360         skb_orphan(skb);
4361
4362         err = hdev->send(hdev, skb);
4363         if (err < 0) {
4364                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4365                 kfree_skb(skb);
4366         }
4367 }
4368
4369 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4370 {
4371         skb_queue_head_init(&req->cmd_q);
4372         req->hdev = hdev;
4373         req->err = 0;
4374 }
4375
4376 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4377 {
4378         struct hci_dev *hdev = req->hdev;
4379         struct sk_buff *skb;
4380         unsigned long flags;
4381
4382         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4383
4384         /* If an error occured during request building, remove all HCI
4385          * commands queued on the HCI request queue.
4386          */
4387         if (req->err) {
4388                 skb_queue_purge(&req->cmd_q);
4389                 return req->err;
4390         }
4391
4392         /* Do not allow empty requests */
4393         if (skb_queue_empty(&req->cmd_q))
4394                 return -ENODATA;
4395
4396         skb = skb_peek_tail(&req->cmd_q);
4397         bt_cb(skb)->req.complete = complete;
4398
4399         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4400         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4401         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4402
4403         queue_work(hdev->workqueue, &hdev->cmd_work);
4404
4405         return 0;
4406 }
4407
4408 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4409                                        u32 plen, const void *param)
4410 {
4411         int len = HCI_COMMAND_HDR_SIZE + plen;
4412         struct hci_command_hdr *hdr;
4413         struct sk_buff *skb;
4414
4415         skb = bt_skb_alloc(len, GFP_ATOMIC);
4416         if (!skb)
4417                 return NULL;
4418
4419         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4420         hdr->opcode = cpu_to_le16(opcode);
4421         hdr->plen   = plen;
4422
4423         if (plen)
4424                 memcpy(skb_put(skb, plen), param, plen);
4425
4426         BT_DBG("skb len %d", skb->len);
4427
4428         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4429
4430         return skb;
4431 }
4432
4433 /* Send HCI command */
4434 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4435                  const void *param)
4436 {
4437         struct sk_buff *skb;
4438
4439         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4440
4441         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4442         if (!skb) {
4443                 BT_ERR("%s no memory for command", hdev->name);
4444                 return -ENOMEM;
4445         }
4446
4447         /* Stand-alone HCI commands must be flaged as
4448          * single-command requests.
4449          */
4450         bt_cb(skb)->req.start = true;
4451
4452         skb_queue_tail(&hdev->cmd_q, skb);
4453         queue_work(hdev->workqueue, &hdev->cmd_work);
4454
4455         return 0;
4456 }
4457
4458 /* Queue a command to an asynchronous HCI request */
4459 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4460                     const void *param, u8 event)
4461 {
4462         struct hci_dev *hdev = req->hdev;
4463         struct sk_buff *skb;
4464
4465         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4466
4467         /* If an error occured during request building, there is no point in
4468          * queueing the HCI command. We can simply return.
4469          */
4470         if (req->err)
4471                 return;
4472
4473         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4474         if (!skb) {
4475                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4476                        hdev->name, opcode);
4477                 req->err = -ENOMEM;
4478                 return;
4479         }
4480
4481         if (skb_queue_empty(&req->cmd_q))
4482                 bt_cb(skb)->req.start = true;
4483
4484         bt_cb(skb)->req.event = event;
4485
4486         skb_queue_tail(&req->cmd_q, skb);
4487 }
4488
4489 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4490                  const void *param)
4491 {
4492         hci_req_add_ev(req, opcode, plen, param, 0);
4493 }
4494
4495 /* Get data from the previously sent command */
4496 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4497 {
4498         struct hci_command_hdr *hdr;
4499
4500         if (!hdev->sent_cmd)
4501                 return NULL;
4502
4503         hdr = (void *) hdev->sent_cmd->data;
4504
4505         if (hdr->opcode != cpu_to_le16(opcode))
4506                 return NULL;
4507
4508         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4509
4510         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4511 }
4512
4513 /* Send ACL data */
4514 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4515 {
4516         struct hci_acl_hdr *hdr;
4517         int len = skb->len;
4518
4519         skb_push(skb, HCI_ACL_HDR_SIZE);
4520         skb_reset_transport_header(skb);
4521         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4522         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4523         hdr->dlen   = cpu_to_le16(len);
4524 }
4525
4526 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4527                           struct sk_buff *skb, __u16 flags)
4528 {
4529         struct hci_conn *conn = chan->conn;
4530         struct hci_dev *hdev = conn->hdev;
4531         struct sk_buff *list;
4532
4533         skb->len = skb_headlen(skb);
4534         skb->data_len = 0;
4535
4536         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4537
4538         switch (hdev->dev_type) {
4539         case HCI_BREDR:
4540                 hci_add_acl_hdr(skb, conn->handle, flags);
4541                 break;
4542         case HCI_AMP:
4543                 hci_add_acl_hdr(skb, chan->handle, flags);
4544                 break;
4545         default:
4546                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4547                 return;
4548         }
4549
4550         list = skb_shinfo(skb)->frag_list;
4551         if (!list) {
4552                 /* Non fragmented */
4553                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4554
4555                 skb_queue_tail(queue, skb);
4556         } else {
4557                 /* Fragmented */
4558                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4559
4560                 skb_shinfo(skb)->frag_list = NULL;
4561
4562                 /* Queue all fragments atomically */
4563                 spin_lock(&queue->lock);
4564
4565                 __skb_queue_tail(queue, skb);
4566
4567                 flags &= ~ACL_START;
4568                 flags |= ACL_CONT;
4569                 do {
4570                         skb = list; list = list->next;
4571
4572                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4573                         hci_add_acl_hdr(skb, conn->handle, flags);
4574
4575                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4576
4577                         __skb_queue_tail(queue, skb);
4578                 } while (list);
4579
4580                 spin_unlock(&queue->lock);
4581         }
4582 }
4583
4584 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4585 {
4586         struct hci_dev *hdev = chan->conn->hdev;
4587
4588         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4589
4590         hci_queue_acl(chan, &chan->data_q, skb, flags);
4591
4592         queue_work(hdev->workqueue, &hdev->tx_work);
4593 }
4594
4595 /* Send SCO data */
4596 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4597 {
4598         struct hci_dev *hdev = conn->hdev;
4599         struct hci_sco_hdr hdr;
4600
4601         BT_DBG("%s len %d", hdev->name, skb->len);
4602
4603         hdr.handle = cpu_to_le16(conn->handle);
4604         hdr.dlen   = skb->len;
4605
4606         skb_push(skb, HCI_SCO_HDR_SIZE);
4607         skb_reset_transport_header(skb);
4608         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4609
4610         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4611
4612         skb_queue_tail(&conn->data_q, skb);
4613         queue_work(hdev->workqueue, &hdev->tx_work);
4614 }
4615
4616 /* ---- HCI TX task (outgoing data) ---- */
4617
4618 /* HCI Connection scheduler */
4619 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4620                                      int *quote)
4621 {
4622         struct hci_conn_hash *h = &hdev->conn_hash;
4623         struct hci_conn *conn = NULL, *c;
4624         unsigned int num = 0, min = ~0;
4625
4626         /* We don't have to lock device here. Connections are always
4627          * added and removed with TX task disabled. */
4628
4629         rcu_read_lock();
4630
4631         list_for_each_entry_rcu(c, &h->list, list) {
4632                 if (c->type != type || skb_queue_empty(&c->data_q))
4633                         continue;
4634
4635                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4636                         continue;
4637
4638                 num++;
4639
4640                 if (c->sent < min) {
4641                         min  = c->sent;
4642                         conn = c;
4643                 }
4644
4645                 if (hci_conn_num(hdev, type) == num)
4646                         break;
4647         }
4648
4649         rcu_read_unlock();
4650
4651         if (conn) {
4652                 int cnt, q;
4653
4654                 switch (conn->type) {
4655                 case ACL_LINK:
4656                         cnt = hdev->acl_cnt;
4657                         break;
4658                 case SCO_LINK:
4659                 case ESCO_LINK:
4660                         cnt = hdev->sco_cnt;
4661                         break;
4662                 case LE_LINK:
4663                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4664                         break;
4665                 default:
4666                         cnt = 0;
4667                         BT_ERR("Unknown link type");
4668                 }
4669
4670                 q = cnt / num;
4671                 *quote = q ? q : 1;
4672         } else
4673                 *quote = 0;
4674
4675         BT_DBG("conn %p quote %d", conn, *quote);
4676         return conn;
4677 }
4678
4679 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4680 {
4681         struct hci_conn_hash *h = &hdev->conn_hash;
4682         struct hci_conn *c;
4683
4684         BT_ERR("%s link tx timeout", hdev->name);
4685
4686         rcu_read_lock();
4687
4688         /* Kill stalled connections */
4689         list_for_each_entry_rcu(c, &h->list, list) {
4690                 if (c->type == type && c->sent) {
4691                         BT_ERR("%s killing stalled connection %pMR",
4692                                hdev->name, &c->dst);
4693                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4694                 }
4695         }
4696
4697         rcu_read_unlock();
4698 }
4699
4700 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4701                                       int *quote)
4702 {
4703         struct hci_conn_hash *h = &hdev->conn_hash;
4704         struct hci_chan *chan = NULL;
4705         unsigned int num = 0, min = ~0, cur_prio = 0;
4706         struct hci_conn *conn;
4707         int cnt, q, conn_num = 0;
4708
4709         BT_DBG("%s", hdev->name);
4710
4711         rcu_read_lock();
4712
4713         list_for_each_entry_rcu(conn, &h->list, list) {
4714                 struct hci_chan *tmp;
4715
4716                 if (conn->type != type)
4717                         continue;
4718
4719                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4720                         continue;
4721
4722                 conn_num++;
4723
4724                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4725                         struct sk_buff *skb;
4726
4727                         if (skb_queue_empty(&tmp->data_q))
4728                                 continue;
4729
4730                         skb = skb_peek(&tmp->data_q);
4731                         if (skb->priority < cur_prio)
4732                                 continue;
4733
4734                         if (skb->priority > cur_prio) {
4735                                 num = 0;
4736                                 min = ~0;
4737                                 cur_prio = skb->priority;
4738                         }
4739
4740                         num++;
4741
4742                         if (conn->sent < min) {
4743                                 min  = conn->sent;
4744                                 chan = tmp;
4745                         }
4746                 }
4747
4748                 if (hci_conn_num(hdev, type) == conn_num)
4749                         break;
4750         }
4751
4752         rcu_read_unlock();
4753
4754         if (!chan)
4755                 return NULL;
4756
4757         switch (chan->conn->type) {
4758         case ACL_LINK:
4759                 cnt = hdev->acl_cnt;
4760                 break;
4761         case AMP_LINK:
4762                 cnt = hdev->block_cnt;
4763                 break;
4764         case SCO_LINK:
4765         case ESCO_LINK:
4766                 cnt = hdev->sco_cnt;
4767                 break;
4768         case LE_LINK:
4769                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4770                 break;
4771         default:
4772                 cnt = 0;
4773                 BT_ERR("Unknown link type");
4774         }
4775
4776         q = cnt / num;
4777         *quote = q ? q : 1;
4778         BT_DBG("chan %p quote %d", chan, *quote);
4779         return chan;
4780 }
4781
4782 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4783 {
4784         struct hci_conn_hash *h = &hdev->conn_hash;
4785         struct hci_conn *conn;
4786         int num = 0;
4787
4788         BT_DBG("%s", hdev->name);
4789
4790         rcu_read_lock();
4791
4792         list_for_each_entry_rcu(conn, &h->list, list) {
4793                 struct hci_chan *chan;
4794
4795                 if (conn->type != type)
4796                         continue;
4797
4798                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4799                         continue;
4800
4801                 num++;
4802
4803                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4804                         struct sk_buff *skb;
4805
4806                         if (chan->sent) {
4807                                 chan->sent = 0;
4808                                 continue;
4809                         }
4810
4811                         if (skb_queue_empty(&chan->data_q))
4812                                 continue;
4813
4814                         skb = skb_peek(&chan->data_q);
4815                         if (skb->priority >= HCI_PRIO_MAX - 1)
4816                                 continue;
4817
4818                         skb->priority = HCI_PRIO_MAX - 1;
4819
4820                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4821                                skb->priority);
4822                 }
4823
4824                 if (hci_conn_num(hdev, type) == num)
4825                         break;
4826         }
4827
4828         rcu_read_unlock();
4829
4830 }
4831
4832 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4833 {
4834         /* Calculate count of blocks used by this packet */
4835         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4836 }
4837
4838 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4839 {
4840         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4841                 /* ACL tx timeout must be longer than maximum
4842                  * link supervision timeout (40.9 seconds) */
4843                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4844                                        HCI_ACL_TX_TIMEOUT))
4845                         hci_link_tx_to(hdev, ACL_LINK);
4846         }
4847 }
4848
4849 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4850 {
4851         unsigned int cnt = hdev->acl_cnt;
4852         struct hci_chan *chan;
4853         struct sk_buff *skb;
4854         int quote;
4855
4856         __check_timeout(hdev, cnt);
4857
4858         while (hdev->acl_cnt &&
4859                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4860                 u32 priority = (skb_peek(&chan->data_q))->priority;
4861                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4862                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4863                                skb->len, skb->priority);
4864
4865                         /* Stop if priority has changed */
4866                         if (skb->priority < priority)
4867                                 break;
4868
4869                         skb = skb_dequeue(&chan->data_q);
4870
4871                         hci_conn_enter_active_mode(chan->conn,
4872                                                    bt_cb(skb)->force_active);
4873
4874                         hci_send_frame(hdev, skb);
4875                         hdev->acl_last_tx = jiffies;
4876
4877                         hdev->acl_cnt--;
4878                         chan->sent++;
4879                         chan->conn->sent++;
4880                 }
4881         }
4882
4883         if (cnt != hdev->acl_cnt)
4884                 hci_prio_recalculate(hdev, ACL_LINK);
4885 }
4886
4887 static void hci_sched_acl_blk(struct hci_dev *hdev)
4888 {
4889         unsigned int cnt = hdev->block_cnt;
4890         struct hci_chan *chan;
4891         struct sk_buff *skb;
4892         int quote;
4893         u8 type;
4894
4895         __check_timeout(hdev, cnt);
4896
4897         BT_DBG("%s", hdev->name);
4898
4899         if (hdev->dev_type == HCI_AMP)
4900                 type = AMP_LINK;
4901         else
4902                 type = ACL_LINK;
4903
4904         while (hdev->block_cnt > 0 &&
4905                (chan = hci_chan_sent(hdev, type, &quote))) {
4906                 u32 priority = (skb_peek(&chan->data_q))->priority;
4907                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4908                         int blocks;
4909
4910                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4911                                skb->len, skb->priority);
4912
4913                         /* Stop if priority has changed */
4914                         if (skb->priority < priority)
4915                                 break;
4916
4917                         skb = skb_dequeue(&chan->data_q);
4918
4919                         blocks = __get_blocks(hdev, skb);
4920                         if (blocks > hdev->block_cnt)
4921                                 return;
4922
4923                         hci_conn_enter_active_mode(chan->conn,
4924                                                    bt_cb(skb)->force_active);
4925
4926                         hci_send_frame(hdev, skb);
4927                         hdev->acl_last_tx = jiffies;
4928
4929                         hdev->block_cnt -= blocks;
4930                         quote -= blocks;
4931
4932                         chan->sent += blocks;
4933                         chan->conn->sent += blocks;
4934                 }
4935         }
4936
4937         if (cnt != hdev->block_cnt)
4938                 hci_prio_recalculate(hdev, type);
4939 }
4940
4941 static void hci_sched_acl(struct hci_dev *hdev)
4942 {
4943         BT_DBG("%s", hdev->name);
4944
4945         /* No ACL link over BR/EDR controller */
4946         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4947                 return;
4948
4949         /* No AMP link over AMP controller */
4950         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4951                 return;
4952
4953         switch (hdev->flow_ctl_mode) {
4954         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4955                 hci_sched_acl_pkt(hdev);
4956                 break;
4957
4958         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4959                 hci_sched_acl_blk(hdev);
4960                 break;
4961         }
4962 }
4963
4964 /* Schedule SCO */
4965 static void hci_sched_sco(struct hci_dev *hdev)
4966 {
4967         struct hci_conn *conn;
4968         struct sk_buff *skb;
4969         int quote;
4970
4971         BT_DBG("%s", hdev->name);
4972
4973         if (!hci_conn_num(hdev, SCO_LINK))
4974                 return;
4975
4976         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4977                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4978                         BT_DBG("skb %p len %d", skb, skb->len);
4979                         hci_send_frame(hdev, skb);
4980
4981                         conn->sent++;
4982                         if (conn->sent == ~0)
4983                                 conn->sent = 0;
4984                 }
4985         }
4986 }
4987
4988 static void hci_sched_esco(struct hci_dev *hdev)
4989 {
4990         struct hci_conn *conn;
4991         struct sk_buff *skb;
4992         int quote;
4993
4994         BT_DBG("%s", hdev->name);
4995
4996         if (!hci_conn_num(hdev, ESCO_LINK))
4997                 return;
4998
4999         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5000                                                      &quote))) {
5001                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5002                         BT_DBG("skb %p len %d", skb, skb->len);
5003                         hci_send_frame(hdev, skb);
5004
5005                         conn->sent++;
5006                         if (conn->sent == ~0)
5007                                 conn->sent = 0;
5008                 }
5009         }
5010 }
5011
5012 static void hci_sched_le(struct hci_dev *hdev)
5013 {
5014         struct hci_chan *chan;
5015         struct sk_buff *skb;
5016         int quote, cnt, tmp;
5017
5018         BT_DBG("%s", hdev->name);
5019
5020         if (!hci_conn_num(hdev, LE_LINK))
5021                 return;
5022
5023         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5024                 /* LE tx timeout must be longer than maximum
5025                  * link supervision timeout (40.9 seconds) */
5026                 if (!hdev->le_cnt && hdev->le_pkts &&
5027                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
5028                         hci_link_tx_to(hdev, LE_LINK);
5029         }
5030
5031         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5032         tmp = cnt;
5033         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
5034                 u32 priority = (skb_peek(&chan->data_q))->priority;
5035                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5036                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5037                                skb->len, skb->priority);
5038
5039                         /* Stop if priority has changed */
5040                         if (skb->priority < priority)
5041                                 break;
5042
5043                         skb = skb_dequeue(&chan->data_q);
5044
5045                         hci_send_frame(hdev, skb);
5046                         hdev->le_last_tx = jiffies;
5047
5048                         cnt--;
5049                         chan->sent++;
5050                         chan->conn->sent++;
5051                 }
5052         }
5053
5054         if (hdev->le_pkts)
5055                 hdev->le_cnt = cnt;
5056         else
5057                 hdev->acl_cnt = cnt;
5058
5059         if (cnt != tmp)
5060                 hci_prio_recalculate(hdev, LE_LINK);
5061 }
5062
5063 static void hci_tx_work(struct work_struct *work)
5064 {
5065         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5066         struct sk_buff *skb;
5067
5068         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5069                hdev->sco_cnt, hdev->le_cnt);
5070
5071         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5072                 /* Schedule queues and send stuff to HCI driver */
5073                 hci_sched_acl(hdev);
5074                 hci_sched_sco(hdev);
5075                 hci_sched_esco(hdev);
5076                 hci_sched_le(hdev);
5077         }
5078
5079         /* Send next queued raw (unknown type) packet */
5080         while ((skb = skb_dequeue(&hdev->raw_q)))
5081                 hci_send_frame(hdev, skb);
5082 }
5083
5084 /* ----- HCI RX task (incoming data processing) ----- */
5085
5086 /* ACL data packet */
5087 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5088 {
5089         struct hci_acl_hdr *hdr = (void *) skb->data;
5090         struct hci_conn *conn;
5091         __u16 handle, flags;
5092
5093         skb_pull(skb, HCI_ACL_HDR_SIZE);
5094
5095         handle = __le16_to_cpu(hdr->handle);
5096         flags  = hci_flags(handle);
5097         handle = hci_handle(handle);
5098
5099         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5100                handle, flags);
5101
5102         hdev->stat.acl_rx++;
5103
5104         hci_dev_lock(hdev);
5105         conn = hci_conn_hash_lookup_handle(hdev, handle);
5106         hci_dev_unlock(hdev);
5107
5108         if (conn) {
5109                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5110
5111                 /* Send to upper protocol */
5112                 l2cap_recv_acldata(conn, skb, flags);
5113                 return;
5114         } else {
5115                 BT_ERR("%s ACL packet for unknown connection handle %d",
5116                        hdev->name, handle);
5117         }
5118
5119         kfree_skb(skb);
5120 }
5121
5122 /* SCO data packet */
5123 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5124 {
5125         struct hci_sco_hdr *hdr = (void *) skb->data;
5126         struct hci_conn *conn;
5127         __u16 handle;
5128
5129         skb_pull(skb, HCI_SCO_HDR_SIZE);
5130
5131         handle = __le16_to_cpu(hdr->handle);
5132
5133         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5134
5135         hdev->stat.sco_rx++;
5136
5137         hci_dev_lock(hdev);
5138         conn = hci_conn_hash_lookup_handle(hdev, handle);
5139         hci_dev_unlock(hdev);
5140
5141         if (conn) {
5142                 /* Send to upper protocol */
5143                 sco_recv_scodata(conn, skb);
5144                 return;
5145         } else {
5146                 BT_ERR("%s SCO packet for unknown connection handle %d",
5147                        hdev->name, handle);
5148         }
5149
5150         kfree_skb(skb);
5151 }
5152
5153 static bool hci_req_is_complete(struct hci_dev *hdev)
5154 {
5155         struct sk_buff *skb;
5156
5157         skb = skb_peek(&hdev->cmd_q);
5158         if (!skb)
5159                 return true;
5160
5161         return bt_cb(skb)->req.start;
5162 }
5163
5164 static void hci_resend_last(struct hci_dev *hdev)
5165 {
5166         struct hci_command_hdr *sent;
5167         struct sk_buff *skb;
5168         u16 opcode;
5169
5170         if (!hdev->sent_cmd)
5171                 return;
5172
5173         sent = (void *) hdev->sent_cmd->data;
5174         opcode = __le16_to_cpu(sent->opcode);
5175         if (opcode == HCI_OP_RESET)
5176                 return;
5177
5178         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5179         if (!skb)
5180                 return;
5181
5182         skb_queue_head(&hdev->cmd_q, skb);
5183         queue_work(hdev->workqueue, &hdev->cmd_work);
5184 }
5185
5186 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5187 {
5188         hci_req_complete_t req_complete = NULL;
5189         struct sk_buff *skb;
5190         unsigned long flags;
5191
5192         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5193
5194         /* If the completed command doesn't match the last one that was
5195          * sent we need to do special handling of it.
5196          */
5197         if (!hci_sent_cmd_data(hdev, opcode)) {
5198                 /* Some CSR based controllers generate a spontaneous
5199                  * reset complete event during init and any pending
5200                  * command will never be completed. In such a case we
5201                  * need to resend whatever was the last sent
5202                  * command.
5203                  */
5204                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5205                         hci_resend_last(hdev);
5206
5207                 return;
5208         }
5209
5210         /* If the command succeeded and there's still more commands in
5211          * this request the request is not yet complete.
5212          */
5213         if (!status && !hci_req_is_complete(hdev))
5214                 return;
5215
5216         /* If this was the last command in a request the complete
5217          * callback would be found in hdev->sent_cmd instead of the
5218          * command queue (hdev->cmd_q).
5219          */
5220         if (hdev->sent_cmd) {
5221                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5222
5223                 if (req_complete) {
5224                         /* We must set the complete callback to NULL to
5225                          * avoid calling the callback more than once if
5226                          * this function gets called again.
5227                          */
5228                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
5229
5230                         goto call_complete;
5231                 }
5232         }
5233
5234         /* Remove all pending commands belonging to this request */
5235         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5236         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5237                 if (bt_cb(skb)->req.start) {
5238                         __skb_queue_head(&hdev->cmd_q, skb);
5239                         break;
5240                 }
5241
5242                 req_complete = bt_cb(skb)->req.complete;
5243                 kfree_skb(skb);
5244         }
5245         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5246
5247 call_complete:
5248         if (req_complete)
5249                 req_complete(hdev, status);
5250 }
5251
5252 static void hci_rx_work(struct work_struct *work)
5253 {
5254         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5255         struct sk_buff *skb;
5256
5257         BT_DBG("%s", hdev->name);
5258
5259         while ((skb = skb_dequeue(&hdev->rx_q))) {
5260                 /* Send copy to monitor */
5261                 hci_send_to_monitor(hdev, skb);
5262
5263                 if (atomic_read(&hdev->promisc)) {
5264                         /* Send copy to the sockets */
5265                         hci_send_to_sock(hdev, skb);
5266                 }
5267
5268                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5269                         kfree_skb(skb);
5270                         continue;
5271                 }
5272
5273                 if (test_bit(HCI_INIT, &hdev->flags)) {
5274                         /* Don't process data packets in this states. */
5275                         switch (bt_cb(skb)->pkt_type) {
5276                         case HCI_ACLDATA_PKT:
5277                         case HCI_SCODATA_PKT:
5278                                 kfree_skb(skb);
5279                                 continue;
5280                         }
5281                 }
5282
5283                 /* Process frame */
5284                 switch (bt_cb(skb)->pkt_type) {
5285                 case HCI_EVENT_PKT:
5286                         BT_DBG("%s Event packet", hdev->name);
5287                         hci_event_packet(hdev, skb);
5288                         break;
5289
5290                 case HCI_ACLDATA_PKT:
5291                         BT_DBG("%s ACL data packet", hdev->name);
5292                         hci_acldata_packet(hdev, skb);
5293                         break;
5294
5295                 case HCI_SCODATA_PKT:
5296                         BT_DBG("%s SCO data packet", hdev->name);
5297                         hci_scodata_packet(hdev, skb);
5298                         break;
5299
5300                 default:
5301                         kfree_skb(skb);
5302                         break;
5303                 }
5304         }
5305 }
5306
5307 static void hci_cmd_work(struct work_struct *work)
5308 {
5309         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5310         struct sk_buff *skb;
5311
5312         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5313                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5314
5315         /* Send queued commands */
5316         if (atomic_read(&hdev->cmd_cnt)) {
5317                 skb = skb_dequeue(&hdev->cmd_q);
5318                 if (!skb)
5319                         return;
5320
5321                 kfree_skb(hdev->sent_cmd);
5322
5323                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5324                 if (hdev->sent_cmd) {
5325                         atomic_dec(&hdev->cmd_cnt);
5326                         hci_send_frame(hdev, skb);
5327                         if (test_bit(HCI_RESET, &hdev->flags))
5328                                 cancel_delayed_work(&hdev->cmd_timer);
5329                         else
5330                                 schedule_delayed_work(&hdev->cmd_timer,
5331                                                       HCI_CMD_TIMEOUT);
5332                 } else {
5333                         skb_queue_head(&hdev->cmd_q, skb);
5334                         queue_work(hdev->workqueue, &hdev->cmd_work);
5335                 }
5336         }
5337 }
5338
5339 void hci_req_add_le_scan_disable(struct hci_request *req)
5340 {
5341         struct hci_cp_le_set_scan_enable cp;
5342
5343         memset(&cp, 0, sizeof(cp));
5344         cp.enable = LE_SCAN_DISABLE;
5345         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5346 }
5347
5348 void hci_req_add_le_passive_scan(struct hci_request *req)
5349 {
5350         struct hci_cp_le_set_scan_param param_cp;
5351         struct hci_cp_le_set_scan_enable enable_cp;
5352         struct hci_dev *hdev = req->hdev;
5353         u8 own_addr_type;
5354
5355         /* Set require_privacy to false since no SCAN_REQ are send
5356          * during passive scanning. Not using an unresolvable address
5357          * here is important so that peer devices using direct
5358          * advertising with our address will be correctly reported
5359          * by the controller.
5360          */
5361         if (hci_update_random_address(req, false, &own_addr_type))
5362                 return;
5363
5364         memset(&param_cp, 0, sizeof(param_cp));
5365         param_cp.type = LE_SCAN_PASSIVE;
5366         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5367         param_cp.window = cpu_to_le16(hdev->le_scan_window);
5368         param_cp.own_address_type = own_addr_type;
5369         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5370                     &param_cp);
5371
5372         memset(&enable_cp, 0, sizeof(enable_cp));
5373         enable_cp.enable = LE_SCAN_ENABLE;
5374         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5375         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5376                     &enable_cp);
5377 }
5378
5379 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5380 {
5381         if (status)
5382                 BT_DBG("HCI request failed to update background scanning: "
5383                        "status 0x%2.2x", status);
5384 }
5385
5386 /* This function controls the background scanning based on hdev->pend_le_conns
5387  * list. If there are pending LE connection we start the background scanning,
5388  * otherwise we stop it.
5389  *
5390  * This function requires the caller holds hdev->lock.
5391  */
5392 void hci_update_background_scan(struct hci_dev *hdev)
5393 {
5394         struct hci_request req;
5395         struct hci_conn *conn;
5396         int err;
5397
5398         if (!test_bit(HCI_UP, &hdev->flags) ||
5399             test_bit(HCI_INIT, &hdev->flags) ||
5400             test_bit(HCI_SETUP, &hdev->dev_flags) ||
5401             test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5402             test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5403             test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5404                 return;
5405
5406         hci_req_init(&req, hdev);
5407
5408         if (list_empty(&hdev->pend_le_conns) &&
5409             list_empty(&hdev->pend_le_reports)) {
5410                 /* If there is no pending LE connections or devices
5411                  * to be scanned for, we should stop the background
5412                  * scanning.
5413                  */
5414
5415                 /* If controller is not scanning we are done. */
5416                 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5417                         return;
5418
5419                 hci_req_add_le_scan_disable(&req);
5420
5421                 BT_DBG("%s stopping background scanning", hdev->name);
5422         } else {
5423                 /* If there is at least one pending LE connection, we should
5424                  * keep the background scan running.
5425                  */
5426
5427                 /* If controller is connecting, we should not start scanning
5428                  * since some controllers are not able to scan and connect at
5429                  * the same time.
5430                  */
5431                 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5432                 if (conn)
5433                         return;
5434
5435                 /* If controller is currently scanning, we stop it to ensure we
5436                  * don't miss any advertising (due to duplicates filter).
5437                  */
5438                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5439                         hci_req_add_le_scan_disable(&req);
5440
5441                 hci_req_add_le_passive_scan(&req);
5442
5443                 BT_DBG("%s starting background scanning", hdev->name);
5444         }
5445
5446         err = hci_req_run(&req, update_background_scan_complete);
5447         if (err)
5448                 BT_ERR("Failed to run HCI request: err %d", err);
5449 }