Bluetooth: Remove unneeded mgmt_discoverable function
[cascardo/linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "smp.h"
41
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
45
46 /* HCI device list */
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
49
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
53
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
56
57 /* ---- HCI notifications ---- */
58
59 static void hci_notify(struct hci_dev *hdev, int event)
60 {
61         hci_sock_dev_event(hdev, event);
62 }
63
64 /* ---- HCI debugfs entries ---- */
65
66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67                              size_t count, loff_t *ppos)
68 {
69         struct hci_dev *hdev = file->private_data;
70         char buf[3];
71
72         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
73         buf[1] = '\n';
74         buf[2] = '\0';
75         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76 }
77
78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79                               size_t count, loff_t *ppos)
80 {
81         struct hci_dev *hdev = file->private_data;
82         struct sk_buff *skb;
83         char buf[32];
84         size_t buf_size = min(count, (sizeof(buf)-1));
85         bool enable;
86         int err;
87
88         if (!test_bit(HCI_UP, &hdev->flags))
89                 return -ENETDOWN;
90
91         if (copy_from_user(buf, user_buf, buf_size))
92                 return -EFAULT;
93
94         buf[buf_size] = '\0';
95         if (strtobool(buf, &enable))
96                 return -EINVAL;
97
98         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
99                 return -EALREADY;
100
101         hci_req_lock(hdev);
102         if (enable)
103                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
104                                      HCI_CMD_TIMEOUT);
105         else
106                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
107                                      HCI_CMD_TIMEOUT);
108         hci_req_unlock(hdev);
109
110         if (IS_ERR(skb))
111                 return PTR_ERR(skb);
112
113         err = -bt_to_errno(skb->data[0]);
114         kfree_skb(skb);
115
116         if (err < 0)
117                 return err;
118
119         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
120
121         return count;
122 }
123
124 static const struct file_operations dut_mode_fops = {
125         .open           = simple_open,
126         .read           = dut_mode_read,
127         .write          = dut_mode_write,
128         .llseek         = default_llseek,
129 };
130
131 static int features_show(struct seq_file *f, void *ptr)
132 {
133         struct hci_dev *hdev = f->private;
134         u8 p;
135
136         hci_dev_lock(hdev);
137         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
138                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
139                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140                            hdev->features[p][0], hdev->features[p][1],
141                            hdev->features[p][2], hdev->features[p][3],
142                            hdev->features[p][4], hdev->features[p][5],
143                            hdev->features[p][6], hdev->features[p][7]);
144         }
145         if (lmp_le_capable(hdev))
146                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148                            hdev->le_features[0], hdev->le_features[1],
149                            hdev->le_features[2], hdev->le_features[3],
150                            hdev->le_features[4], hdev->le_features[5],
151                            hdev->le_features[6], hdev->le_features[7]);
152         hci_dev_unlock(hdev);
153
154         return 0;
155 }
156
157 static int features_open(struct inode *inode, struct file *file)
158 {
159         return single_open(file, features_show, inode->i_private);
160 }
161
162 static const struct file_operations features_fops = {
163         .open           = features_open,
164         .read           = seq_read,
165         .llseek         = seq_lseek,
166         .release        = single_release,
167 };
168
169 static int blacklist_show(struct seq_file *f, void *p)
170 {
171         struct hci_dev *hdev = f->private;
172         struct bdaddr_list *b;
173
174         hci_dev_lock(hdev);
175         list_for_each_entry(b, &hdev->blacklist, list)
176                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
177         hci_dev_unlock(hdev);
178
179         return 0;
180 }
181
182 static int blacklist_open(struct inode *inode, struct file *file)
183 {
184         return single_open(file, blacklist_show, inode->i_private);
185 }
186
187 static const struct file_operations blacklist_fops = {
188         .open           = blacklist_open,
189         .read           = seq_read,
190         .llseek         = seq_lseek,
191         .release        = single_release,
192 };
193
194 static int whitelist_show(struct seq_file *f, void *p)
195 {
196         struct hci_dev *hdev = f->private;
197         struct bdaddr_list *b;
198
199         hci_dev_lock(hdev);
200         list_for_each_entry(b, &hdev->whitelist, list)
201                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
202         hci_dev_unlock(hdev);
203
204         return 0;
205 }
206
207 static int whitelist_open(struct inode *inode, struct file *file)
208 {
209         return single_open(file, whitelist_show, inode->i_private);
210 }
211
212 static const struct file_operations whitelist_fops = {
213         .open           = whitelist_open,
214         .read           = seq_read,
215         .llseek         = seq_lseek,
216         .release        = single_release,
217 };
218
219 static int uuids_show(struct seq_file *f, void *p)
220 {
221         struct hci_dev *hdev = f->private;
222         struct bt_uuid *uuid;
223
224         hci_dev_lock(hdev);
225         list_for_each_entry(uuid, &hdev->uuids, list) {
226                 u8 i, val[16];
227
228                 /* The Bluetooth UUID values are stored in big endian,
229                  * but with reversed byte order. So convert them into
230                  * the right order for the %pUb modifier.
231                  */
232                 for (i = 0; i < 16; i++)
233                         val[i] = uuid->uuid[15 - i];
234
235                 seq_printf(f, "%pUb\n", val);
236         }
237         hci_dev_unlock(hdev);
238
239         return 0;
240 }
241
242 static int uuids_open(struct inode *inode, struct file *file)
243 {
244         return single_open(file, uuids_show, inode->i_private);
245 }
246
247 static const struct file_operations uuids_fops = {
248         .open           = uuids_open,
249         .read           = seq_read,
250         .llseek         = seq_lseek,
251         .release        = single_release,
252 };
253
254 static int inquiry_cache_show(struct seq_file *f, void *p)
255 {
256         struct hci_dev *hdev = f->private;
257         struct discovery_state *cache = &hdev->discovery;
258         struct inquiry_entry *e;
259
260         hci_dev_lock(hdev);
261
262         list_for_each_entry(e, &cache->all, all) {
263                 struct inquiry_data *data = &e->data;
264                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
265                            &data->bdaddr,
266                            data->pscan_rep_mode, data->pscan_period_mode,
267                            data->pscan_mode, data->dev_class[2],
268                            data->dev_class[1], data->dev_class[0],
269                            __le16_to_cpu(data->clock_offset),
270                            data->rssi, data->ssp_mode, e->timestamp);
271         }
272
273         hci_dev_unlock(hdev);
274
275         return 0;
276 }
277
278 static int inquiry_cache_open(struct inode *inode, struct file *file)
279 {
280         return single_open(file, inquiry_cache_show, inode->i_private);
281 }
282
283 static const struct file_operations inquiry_cache_fops = {
284         .open           = inquiry_cache_open,
285         .read           = seq_read,
286         .llseek         = seq_lseek,
287         .release        = single_release,
288 };
289
290 static int link_keys_show(struct seq_file *f, void *ptr)
291 {
292         struct hci_dev *hdev = f->private;
293         struct list_head *p, *n;
294
295         hci_dev_lock(hdev);
296         list_for_each_safe(p, n, &hdev->link_keys) {
297                 struct link_key *key = list_entry(p, struct link_key, list);
298                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
299                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
300         }
301         hci_dev_unlock(hdev);
302
303         return 0;
304 }
305
306 static int link_keys_open(struct inode *inode, struct file *file)
307 {
308         return single_open(file, link_keys_show, inode->i_private);
309 }
310
311 static const struct file_operations link_keys_fops = {
312         .open           = link_keys_open,
313         .read           = seq_read,
314         .llseek         = seq_lseek,
315         .release        = single_release,
316 };
317
318 static int dev_class_show(struct seq_file *f, void *ptr)
319 {
320         struct hci_dev *hdev = f->private;
321
322         hci_dev_lock(hdev);
323         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
324                    hdev->dev_class[1], hdev->dev_class[0]);
325         hci_dev_unlock(hdev);
326
327         return 0;
328 }
329
330 static int dev_class_open(struct inode *inode, struct file *file)
331 {
332         return single_open(file, dev_class_show, inode->i_private);
333 }
334
335 static const struct file_operations dev_class_fops = {
336         .open           = dev_class_open,
337         .read           = seq_read,
338         .llseek         = seq_lseek,
339         .release        = single_release,
340 };
341
342 static int voice_setting_get(void *data, u64 *val)
343 {
344         struct hci_dev *hdev = data;
345
346         hci_dev_lock(hdev);
347         *val = hdev->voice_setting;
348         hci_dev_unlock(hdev);
349
350         return 0;
351 }
352
353 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
354                         NULL, "0x%4.4llx\n");
355
356 static int auto_accept_delay_set(void *data, u64 val)
357 {
358         struct hci_dev *hdev = data;
359
360         hci_dev_lock(hdev);
361         hdev->auto_accept_delay = val;
362         hci_dev_unlock(hdev);
363
364         return 0;
365 }
366
367 static int auto_accept_delay_get(void *data, u64 *val)
368 {
369         struct hci_dev *hdev = data;
370
371         hci_dev_lock(hdev);
372         *val = hdev->auto_accept_delay;
373         hci_dev_unlock(hdev);
374
375         return 0;
376 }
377
378 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
379                         auto_accept_delay_set, "%llu\n");
380
381 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
382                                      size_t count, loff_t *ppos)
383 {
384         struct hci_dev *hdev = file->private_data;
385         char buf[3];
386
387         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
388         buf[1] = '\n';
389         buf[2] = '\0';
390         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
391 }
392
393 static ssize_t force_sc_support_write(struct file *file,
394                                       const char __user *user_buf,
395                                       size_t count, loff_t *ppos)
396 {
397         struct hci_dev *hdev = file->private_data;
398         char buf[32];
399         size_t buf_size = min(count, (sizeof(buf)-1));
400         bool enable;
401
402         if (test_bit(HCI_UP, &hdev->flags))
403                 return -EBUSY;
404
405         if (copy_from_user(buf, user_buf, buf_size))
406                 return -EFAULT;
407
408         buf[buf_size] = '\0';
409         if (strtobool(buf, &enable))
410                 return -EINVAL;
411
412         if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
413                 return -EALREADY;
414
415         change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
416
417         return count;
418 }
419
420 static const struct file_operations force_sc_support_fops = {
421         .open           = simple_open,
422         .read           = force_sc_support_read,
423         .write          = force_sc_support_write,
424         .llseek         = default_llseek,
425 };
426
427 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
428                                  size_t count, loff_t *ppos)
429 {
430         struct hci_dev *hdev = file->private_data;
431         char buf[3];
432
433         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
434         buf[1] = '\n';
435         buf[2] = '\0';
436         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
437 }
438
439 static const struct file_operations sc_only_mode_fops = {
440         .open           = simple_open,
441         .read           = sc_only_mode_read,
442         .llseek         = default_llseek,
443 };
444
445 static int idle_timeout_set(void *data, u64 val)
446 {
447         struct hci_dev *hdev = data;
448
449         if (val != 0 && (val < 500 || val > 3600000))
450                 return -EINVAL;
451
452         hci_dev_lock(hdev);
453         hdev->idle_timeout = val;
454         hci_dev_unlock(hdev);
455
456         return 0;
457 }
458
459 static int idle_timeout_get(void *data, u64 *val)
460 {
461         struct hci_dev *hdev = data;
462
463         hci_dev_lock(hdev);
464         *val = hdev->idle_timeout;
465         hci_dev_unlock(hdev);
466
467         return 0;
468 }
469
470 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
471                         idle_timeout_set, "%llu\n");
472
473 static int rpa_timeout_set(void *data, u64 val)
474 {
475         struct hci_dev *hdev = data;
476
477         /* Require the RPA timeout to be at least 30 seconds and at most
478          * 24 hours.
479          */
480         if (val < 30 || val > (60 * 60 * 24))
481                 return -EINVAL;
482
483         hci_dev_lock(hdev);
484         hdev->rpa_timeout = val;
485         hci_dev_unlock(hdev);
486
487         return 0;
488 }
489
490 static int rpa_timeout_get(void *data, u64 *val)
491 {
492         struct hci_dev *hdev = data;
493
494         hci_dev_lock(hdev);
495         *val = hdev->rpa_timeout;
496         hci_dev_unlock(hdev);
497
498         return 0;
499 }
500
501 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
502                         rpa_timeout_set, "%llu\n");
503
504 static int sniff_min_interval_set(void *data, u64 val)
505 {
506         struct hci_dev *hdev = data;
507
508         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
509                 return -EINVAL;
510
511         hci_dev_lock(hdev);
512         hdev->sniff_min_interval = val;
513         hci_dev_unlock(hdev);
514
515         return 0;
516 }
517
518 static int sniff_min_interval_get(void *data, u64 *val)
519 {
520         struct hci_dev *hdev = data;
521
522         hci_dev_lock(hdev);
523         *val = hdev->sniff_min_interval;
524         hci_dev_unlock(hdev);
525
526         return 0;
527 }
528
529 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
530                         sniff_min_interval_set, "%llu\n");
531
532 static int sniff_max_interval_set(void *data, u64 val)
533 {
534         struct hci_dev *hdev = data;
535
536         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
537                 return -EINVAL;
538
539         hci_dev_lock(hdev);
540         hdev->sniff_max_interval = val;
541         hci_dev_unlock(hdev);
542
543         return 0;
544 }
545
546 static int sniff_max_interval_get(void *data, u64 *val)
547 {
548         struct hci_dev *hdev = data;
549
550         hci_dev_lock(hdev);
551         *val = hdev->sniff_max_interval;
552         hci_dev_unlock(hdev);
553
554         return 0;
555 }
556
557 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
558                         sniff_max_interval_set, "%llu\n");
559
560 static int conn_info_min_age_set(void *data, u64 val)
561 {
562         struct hci_dev *hdev = data;
563
564         if (val == 0 || val > hdev->conn_info_max_age)
565                 return -EINVAL;
566
567         hci_dev_lock(hdev);
568         hdev->conn_info_min_age = val;
569         hci_dev_unlock(hdev);
570
571         return 0;
572 }
573
574 static int conn_info_min_age_get(void *data, u64 *val)
575 {
576         struct hci_dev *hdev = data;
577
578         hci_dev_lock(hdev);
579         *val = hdev->conn_info_min_age;
580         hci_dev_unlock(hdev);
581
582         return 0;
583 }
584
585 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
586                         conn_info_min_age_set, "%llu\n");
587
588 static int conn_info_max_age_set(void *data, u64 val)
589 {
590         struct hci_dev *hdev = data;
591
592         if (val == 0 || val < hdev->conn_info_min_age)
593                 return -EINVAL;
594
595         hci_dev_lock(hdev);
596         hdev->conn_info_max_age = val;
597         hci_dev_unlock(hdev);
598
599         return 0;
600 }
601
602 static int conn_info_max_age_get(void *data, u64 *val)
603 {
604         struct hci_dev *hdev = data;
605
606         hci_dev_lock(hdev);
607         *val = hdev->conn_info_max_age;
608         hci_dev_unlock(hdev);
609
610         return 0;
611 }
612
613 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
614                         conn_info_max_age_set, "%llu\n");
615
616 static int identity_show(struct seq_file *f, void *p)
617 {
618         struct hci_dev *hdev = f->private;
619         bdaddr_t addr;
620         u8 addr_type;
621
622         hci_dev_lock(hdev);
623
624         hci_copy_identity_address(hdev, &addr, &addr_type);
625
626         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
627                    16, hdev->irk, &hdev->rpa);
628
629         hci_dev_unlock(hdev);
630
631         return 0;
632 }
633
634 static int identity_open(struct inode *inode, struct file *file)
635 {
636         return single_open(file, identity_show, inode->i_private);
637 }
638
639 static const struct file_operations identity_fops = {
640         .open           = identity_open,
641         .read           = seq_read,
642         .llseek         = seq_lseek,
643         .release        = single_release,
644 };
645
646 static int random_address_show(struct seq_file *f, void *p)
647 {
648         struct hci_dev *hdev = f->private;
649
650         hci_dev_lock(hdev);
651         seq_printf(f, "%pMR\n", &hdev->random_addr);
652         hci_dev_unlock(hdev);
653
654         return 0;
655 }
656
657 static int random_address_open(struct inode *inode, struct file *file)
658 {
659         return single_open(file, random_address_show, inode->i_private);
660 }
661
662 static const struct file_operations random_address_fops = {
663         .open           = random_address_open,
664         .read           = seq_read,
665         .llseek         = seq_lseek,
666         .release        = single_release,
667 };
668
669 static int static_address_show(struct seq_file *f, void *p)
670 {
671         struct hci_dev *hdev = f->private;
672
673         hci_dev_lock(hdev);
674         seq_printf(f, "%pMR\n", &hdev->static_addr);
675         hci_dev_unlock(hdev);
676
677         return 0;
678 }
679
680 static int static_address_open(struct inode *inode, struct file *file)
681 {
682         return single_open(file, static_address_show, inode->i_private);
683 }
684
685 static const struct file_operations static_address_fops = {
686         .open           = static_address_open,
687         .read           = seq_read,
688         .llseek         = seq_lseek,
689         .release        = single_release,
690 };
691
692 static ssize_t force_static_address_read(struct file *file,
693                                          char __user *user_buf,
694                                          size_t count, loff_t *ppos)
695 {
696         struct hci_dev *hdev = file->private_data;
697         char buf[3];
698
699         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
700         buf[1] = '\n';
701         buf[2] = '\0';
702         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
703 }
704
705 static ssize_t force_static_address_write(struct file *file,
706                                           const char __user *user_buf,
707                                           size_t count, loff_t *ppos)
708 {
709         struct hci_dev *hdev = file->private_data;
710         char buf[32];
711         size_t buf_size = min(count, (sizeof(buf)-1));
712         bool enable;
713
714         if (test_bit(HCI_UP, &hdev->flags))
715                 return -EBUSY;
716
717         if (copy_from_user(buf, user_buf, buf_size))
718                 return -EFAULT;
719
720         buf[buf_size] = '\0';
721         if (strtobool(buf, &enable))
722                 return -EINVAL;
723
724         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
725                 return -EALREADY;
726
727         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
728
729         return count;
730 }
731
732 static const struct file_operations force_static_address_fops = {
733         .open           = simple_open,
734         .read           = force_static_address_read,
735         .write          = force_static_address_write,
736         .llseek         = default_llseek,
737 };
738
739 static int white_list_show(struct seq_file *f, void *ptr)
740 {
741         struct hci_dev *hdev = f->private;
742         struct bdaddr_list *b;
743
744         hci_dev_lock(hdev);
745         list_for_each_entry(b, &hdev->le_white_list, list)
746                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
747         hci_dev_unlock(hdev);
748
749         return 0;
750 }
751
752 static int white_list_open(struct inode *inode, struct file *file)
753 {
754         return single_open(file, white_list_show, inode->i_private);
755 }
756
757 static const struct file_operations white_list_fops = {
758         .open           = white_list_open,
759         .read           = seq_read,
760         .llseek         = seq_lseek,
761         .release        = single_release,
762 };
763
764 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
765 {
766         struct hci_dev *hdev = f->private;
767         struct list_head *p, *n;
768
769         hci_dev_lock(hdev);
770         list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
771                 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
772                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
773                            &irk->bdaddr, irk->addr_type,
774                            16, irk->val, &irk->rpa);
775         }
776         hci_dev_unlock(hdev);
777
778         return 0;
779 }
780
781 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
782 {
783         return single_open(file, identity_resolving_keys_show,
784                            inode->i_private);
785 }
786
787 static const struct file_operations identity_resolving_keys_fops = {
788         .open           = identity_resolving_keys_open,
789         .read           = seq_read,
790         .llseek         = seq_lseek,
791         .release        = single_release,
792 };
793
794 static int long_term_keys_show(struct seq_file *f, void *ptr)
795 {
796         struct hci_dev *hdev = f->private;
797         struct list_head *p, *n;
798
799         hci_dev_lock(hdev);
800         list_for_each_safe(p, n, &hdev->long_term_keys) {
801                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
802                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
803                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
804                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
805                            __le64_to_cpu(ltk->rand), 16, ltk->val);
806         }
807         hci_dev_unlock(hdev);
808
809         return 0;
810 }
811
812 static int long_term_keys_open(struct inode *inode, struct file *file)
813 {
814         return single_open(file, long_term_keys_show, inode->i_private);
815 }
816
817 static const struct file_operations long_term_keys_fops = {
818         .open           = long_term_keys_open,
819         .read           = seq_read,
820         .llseek         = seq_lseek,
821         .release        = single_release,
822 };
823
824 static int conn_min_interval_set(void *data, u64 val)
825 {
826         struct hci_dev *hdev = data;
827
828         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
829                 return -EINVAL;
830
831         hci_dev_lock(hdev);
832         hdev->le_conn_min_interval = val;
833         hci_dev_unlock(hdev);
834
835         return 0;
836 }
837
838 static int conn_min_interval_get(void *data, u64 *val)
839 {
840         struct hci_dev *hdev = data;
841
842         hci_dev_lock(hdev);
843         *val = hdev->le_conn_min_interval;
844         hci_dev_unlock(hdev);
845
846         return 0;
847 }
848
849 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
850                         conn_min_interval_set, "%llu\n");
851
852 static int conn_max_interval_set(void *data, u64 val)
853 {
854         struct hci_dev *hdev = data;
855
856         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
857                 return -EINVAL;
858
859         hci_dev_lock(hdev);
860         hdev->le_conn_max_interval = val;
861         hci_dev_unlock(hdev);
862
863         return 0;
864 }
865
866 static int conn_max_interval_get(void *data, u64 *val)
867 {
868         struct hci_dev *hdev = data;
869
870         hci_dev_lock(hdev);
871         *val = hdev->le_conn_max_interval;
872         hci_dev_unlock(hdev);
873
874         return 0;
875 }
876
877 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
878                         conn_max_interval_set, "%llu\n");
879
880 static int conn_latency_set(void *data, u64 val)
881 {
882         struct hci_dev *hdev = data;
883
884         if (val > 0x01f3)
885                 return -EINVAL;
886
887         hci_dev_lock(hdev);
888         hdev->le_conn_latency = val;
889         hci_dev_unlock(hdev);
890
891         return 0;
892 }
893
894 static int conn_latency_get(void *data, u64 *val)
895 {
896         struct hci_dev *hdev = data;
897
898         hci_dev_lock(hdev);
899         *val = hdev->le_conn_latency;
900         hci_dev_unlock(hdev);
901
902         return 0;
903 }
904
905 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
906                         conn_latency_set, "%llu\n");
907
908 static int supervision_timeout_set(void *data, u64 val)
909 {
910         struct hci_dev *hdev = data;
911
912         if (val < 0x000a || val > 0x0c80)
913                 return -EINVAL;
914
915         hci_dev_lock(hdev);
916         hdev->le_supv_timeout = val;
917         hci_dev_unlock(hdev);
918
919         return 0;
920 }
921
922 static int supervision_timeout_get(void *data, u64 *val)
923 {
924         struct hci_dev *hdev = data;
925
926         hci_dev_lock(hdev);
927         *val = hdev->le_supv_timeout;
928         hci_dev_unlock(hdev);
929
930         return 0;
931 }
932
933 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
934                         supervision_timeout_set, "%llu\n");
935
936 static int adv_channel_map_set(void *data, u64 val)
937 {
938         struct hci_dev *hdev = data;
939
940         if (val < 0x01 || val > 0x07)
941                 return -EINVAL;
942
943         hci_dev_lock(hdev);
944         hdev->le_adv_channel_map = val;
945         hci_dev_unlock(hdev);
946
947         return 0;
948 }
949
950 static int adv_channel_map_get(void *data, u64 *val)
951 {
952         struct hci_dev *hdev = data;
953
954         hci_dev_lock(hdev);
955         *val = hdev->le_adv_channel_map;
956         hci_dev_unlock(hdev);
957
958         return 0;
959 }
960
961 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
962                         adv_channel_map_set, "%llu\n");
963
964 static int device_list_show(struct seq_file *f, void *ptr)
965 {
966         struct hci_dev *hdev = f->private;
967         struct hci_conn_params *p;
968
969         hci_dev_lock(hdev);
970         list_for_each_entry(p, &hdev->le_conn_params, list) {
971                 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
972                            p->auto_connect);
973         }
974         hci_dev_unlock(hdev);
975
976         return 0;
977 }
978
979 static int device_list_open(struct inode *inode, struct file *file)
980 {
981         return single_open(file, device_list_show, inode->i_private);
982 }
983
984 static const struct file_operations device_list_fops = {
985         .open           = device_list_open,
986         .read           = seq_read,
987         .llseek         = seq_lseek,
988         .release        = single_release,
989 };
990
991 /* ---- HCI requests ---- */
992
993 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
994 {
995         BT_DBG("%s result 0x%2.2x", hdev->name, result);
996
997         if (hdev->req_status == HCI_REQ_PEND) {
998                 hdev->req_result = result;
999                 hdev->req_status = HCI_REQ_DONE;
1000                 wake_up_interruptible(&hdev->req_wait_q);
1001         }
1002 }
1003
1004 static void hci_req_cancel(struct hci_dev *hdev, int err)
1005 {
1006         BT_DBG("%s err 0x%2.2x", hdev->name, err);
1007
1008         if (hdev->req_status == HCI_REQ_PEND) {
1009                 hdev->req_result = err;
1010                 hdev->req_status = HCI_REQ_CANCELED;
1011                 wake_up_interruptible(&hdev->req_wait_q);
1012         }
1013 }
1014
1015 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1016                                             u8 event)
1017 {
1018         struct hci_ev_cmd_complete *ev;
1019         struct hci_event_hdr *hdr;
1020         struct sk_buff *skb;
1021
1022         hci_dev_lock(hdev);
1023
1024         skb = hdev->recv_evt;
1025         hdev->recv_evt = NULL;
1026
1027         hci_dev_unlock(hdev);
1028
1029         if (!skb)
1030                 return ERR_PTR(-ENODATA);
1031
1032         if (skb->len < sizeof(*hdr)) {
1033                 BT_ERR("Too short HCI event");
1034                 goto failed;
1035         }
1036
1037         hdr = (void *) skb->data;
1038         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1039
1040         if (event) {
1041                 if (hdr->evt != event)
1042                         goto failed;
1043                 return skb;
1044         }
1045
1046         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1047                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1048                 goto failed;
1049         }
1050
1051         if (skb->len < sizeof(*ev)) {
1052                 BT_ERR("Too short cmd_complete event");
1053                 goto failed;
1054         }
1055
1056         ev = (void *) skb->data;
1057         skb_pull(skb, sizeof(*ev));
1058
1059         if (opcode == __le16_to_cpu(ev->opcode))
1060                 return skb;
1061
1062         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1063                __le16_to_cpu(ev->opcode));
1064
1065 failed:
1066         kfree_skb(skb);
1067         return ERR_PTR(-ENODATA);
1068 }
1069
1070 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1071                                   const void *param, u8 event, u32 timeout)
1072 {
1073         DECLARE_WAITQUEUE(wait, current);
1074         struct hci_request req;
1075         int err = 0;
1076
1077         BT_DBG("%s", hdev->name);
1078
1079         hci_req_init(&req, hdev);
1080
1081         hci_req_add_ev(&req, opcode, plen, param, event);
1082
1083         hdev->req_status = HCI_REQ_PEND;
1084
1085         err = hci_req_run(&req, hci_req_sync_complete);
1086         if (err < 0)
1087                 return ERR_PTR(err);
1088
1089         add_wait_queue(&hdev->req_wait_q, &wait);
1090         set_current_state(TASK_INTERRUPTIBLE);
1091
1092         schedule_timeout(timeout);
1093
1094         remove_wait_queue(&hdev->req_wait_q, &wait);
1095
1096         if (signal_pending(current))
1097                 return ERR_PTR(-EINTR);
1098
1099         switch (hdev->req_status) {
1100         case HCI_REQ_DONE:
1101                 err = -bt_to_errno(hdev->req_result);
1102                 break;
1103
1104         case HCI_REQ_CANCELED:
1105                 err = -hdev->req_result;
1106                 break;
1107
1108         default:
1109                 err = -ETIMEDOUT;
1110                 break;
1111         }
1112
1113         hdev->req_status = hdev->req_result = 0;
1114
1115         BT_DBG("%s end: err %d", hdev->name, err);
1116
1117         if (err < 0)
1118                 return ERR_PTR(err);
1119
1120         return hci_get_cmd_complete(hdev, opcode, event);
1121 }
1122 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1123
1124 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1125                                const void *param, u32 timeout)
1126 {
1127         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1128 }
1129 EXPORT_SYMBOL(__hci_cmd_sync);
1130
1131 /* Execute request and wait for completion. */
1132 static int __hci_req_sync(struct hci_dev *hdev,
1133                           void (*func)(struct hci_request *req,
1134                                       unsigned long opt),
1135                           unsigned long opt, __u32 timeout)
1136 {
1137         struct hci_request req;
1138         DECLARE_WAITQUEUE(wait, current);
1139         int err = 0;
1140
1141         BT_DBG("%s start", hdev->name);
1142
1143         hci_req_init(&req, hdev);
1144
1145         hdev->req_status = HCI_REQ_PEND;
1146
1147         func(&req, opt);
1148
1149         err = hci_req_run(&req, hci_req_sync_complete);
1150         if (err < 0) {
1151                 hdev->req_status = 0;
1152
1153                 /* ENODATA means the HCI request command queue is empty.
1154                  * This can happen when a request with conditionals doesn't
1155                  * trigger any commands to be sent. This is normal behavior
1156                  * and should not trigger an error return.
1157                  */
1158                 if (err == -ENODATA)
1159                         return 0;
1160
1161                 return err;
1162         }
1163
1164         add_wait_queue(&hdev->req_wait_q, &wait);
1165         set_current_state(TASK_INTERRUPTIBLE);
1166
1167         schedule_timeout(timeout);
1168
1169         remove_wait_queue(&hdev->req_wait_q, &wait);
1170
1171         if (signal_pending(current))
1172                 return -EINTR;
1173
1174         switch (hdev->req_status) {
1175         case HCI_REQ_DONE:
1176                 err = -bt_to_errno(hdev->req_result);
1177                 break;
1178
1179         case HCI_REQ_CANCELED:
1180                 err = -hdev->req_result;
1181                 break;
1182
1183         default:
1184                 err = -ETIMEDOUT;
1185                 break;
1186         }
1187
1188         hdev->req_status = hdev->req_result = 0;
1189
1190         BT_DBG("%s end: err %d", hdev->name, err);
1191
1192         return err;
1193 }
1194
1195 static int hci_req_sync(struct hci_dev *hdev,
1196                         void (*req)(struct hci_request *req,
1197                                     unsigned long opt),
1198                         unsigned long opt, __u32 timeout)
1199 {
1200         int ret;
1201
1202         if (!test_bit(HCI_UP, &hdev->flags))
1203                 return -ENETDOWN;
1204
1205         /* Serialize all requests */
1206         hci_req_lock(hdev);
1207         ret = __hci_req_sync(hdev, req, opt, timeout);
1208         hci_req_unlock(hdev);
1209
1210         return ret;
1211 }
1212
1213 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1214 {
1215         BT_DBG("%s %ld", req->hdev->name, opt);
1216
1217         /* Reset device */
1218         set_bit(HCI_RESET, &req->hdev->flags);
1219         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1220 }
1221
1222 static void bredr_init(struct hci_request *req)
1223 {
1224         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1225
1226         /* Read Local Supported Features */
1227         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1228
1229         /* Read Local Version */
1230         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1231
1232         /* Read BD Address */
1233         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1234 }
1235
1236 static void amp_init(struct hci_request *req)
1237 {
1238         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1239
1240         /* Read Local Version */
1241         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1242
1243         /* Read Local Supported Commands */
1244         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1245
1246         /* Read Local Supported Features */
1247         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1248
1249         /* Read Local AMP Info */
1250         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1251
1252         /* Read Data Blk size */
1253         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1254
1255         /* Read Flow Control Mode */
1256         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1257
1258         /* Read Location Data */
1259         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1260 }
1261
1262 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1263 {
1264         struct hci_dev *hdev = req->hdev;
1265
1266         BT_DBG("%s %ld", hdev->name, opt);
1267
1268         /* Reset */
1269         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1270                 hci_reset_req(req, 0);
1271
1272         switch (hdev->dev_type) {
1273         case HCI_BREDR:
1274                 bredr_init(req);
1275                 break;
1276
1277         case HCI_AMP:
1278                 amp_init(req);
1279                 break;
1280
1281         default:
1282                 BT_ERR("Unknown device type %d", hdev->dev_type);
1283                 break;
1284         }
1285 }
1286
1287 static void bredr_setup(struct hci_request *req)
1288 {
1289         struct hci_dev *hdev = req->hdev;
1290
1291         __le16 param;
1292         __u8 flt_type;
1293
1294         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1295         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1296
1297         /* Read Class of Device */
1298         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1299
1300         /* Read Local Name */
1301         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1302
1303         /* Read Voice Setting */
1304         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1305
1306         /* Read Number of Supported IAC */
1307         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1308
1309         /* Read Current IAC LAP */
1310         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1311
1312         /* Clear Event Filters */
1313         flt_type = HCI_FLT_CLEAR_ALL;
1314         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1315
1316         /* Connection accept timeout ~20 secs */
1317         param = cpu_to_le16(0x7d00);
1318         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1319
1320         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1321          * but it does not support page scan related HCI commands.
1322          */
1323         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1324                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1325                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1326         }
1327 }
1328
1329 static void le_setup(struct hci_request *req)
1330 {
1331         struct hci_dev *hdev = req->hdev;
1332
1333         /* Read LE Buffer Size */
1334         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1335
1336         /* Read LE Local Supported Features */
1337         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1338
1339         /* Read LE Supported States */
1340         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1341
1342         /* Read LE Advertising Channel TX Power */
1343         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1344
1345         /* Read LE White List Size */
1346         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1347
1348         /* Clear LE White List */
1349         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1350
1351         /* LE-only controllers have LE implicitly enabled */
1352         if (!lmp_bredr_capable(hdev))
1353                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1354 }
1355
1356 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1357 {
1358         if (lmp_ext_inq_capable(hdev))
1359                 return 0x02;
1360
1361         if (lmp_inq_rssi_capable(hdev))
1362                 return 0x01;
1363
1364         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1365             hdev->lmp_subver == 0x0757)
1366                 return 0x01;
1367
1368         if (hdev->manufacturer == 15) {
1369                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1370                         return 0x01;
1371                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1372                         return 0x01;
1373                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1374                         return 0x01;
1375         }
1376
1377         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1378             hdev->lmp_subver == 0x1805)
1379                 return 0x01;
1380
1381         return 0x00;
1382 }
1383
1384 static void hci_setup_inquiry_mode(struct hci_request *req)
1385 {
1386         u8 mode;
1387
1388         mode = hci_get_inquiry_mode(req->hdev);
1389
1390         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1391 }
1392
1393 static void hci_setup_event_mask(struct hci_request *req)
1394 {
1395         struct hci_dev *hdev = req->hdev;
1396
1397         /* The second byte is 0xff instead of 0x9f (two reserved bits
1398          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1399          * command otherwise.
1400          */
1401         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1402
1403         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1404          * any event mask for pre 1.2 devices.
1405          */
1406         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1407                 return;
1408
1409         if (lmp_bredr_capable(hdev)) {
1410                 events[4] |= 0x01; /* Flow Specification Complete */
1411                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1412                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1413                 events[5] |= 0x08; /* Synchronous Connection Complete */
1414                 events[5] |= 0x10; /* Synchronous Connection Changed */
1415         } else {
1416                 /* Use a different default for LE-only devices */
1417                 memset(events, 0, sizeof(events));
1418                 events[0] |= 0x10; /* Disconnection Complete */
1419                 events[0] |= 0x80; /* Encryption Change */
1420                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1421                 events[1] |= 0x20; /* Command Complete */
1422                 events[1] |= 0x40; /* Command Status */
1423                 events[1] |= 0x80; /* Hardware Error */
1424                 events[2] |= 0x04; /* Number of Completed Packets */
1425                 events[3] |= 0x02; /* Data Buffer Overflow */
1426                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1427         }
1428
1429         if (lmp_inq_rssi_capable(hdev))
1430                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1431
1432         if (lmp_sniffsubr_capable(hdev))
1433                 events[5] |= 0x20; /* Sniff Subrating */
1434
1435         if (lmp_pause_enc_capable(hdev))
1436                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1437
1438         if (lmp_ext_inq_capable(hdev))
1439                 events[5] |= 0x40; /* Extended Inquiry Result */
1440
1441         if (lmp_no_flush_capable(hdev))
1442                 events[7] |= 0x01; /* Enhanced Flush Complete */
1443
1444         if (lmp_lsto_capable(hdev))
1445                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1446
1447         if (lmp_ssp_capable(hdev)) {
1448                 events[6] |= 0x01;      /* IO Capability Request */
1449                 events[6] |= 0x02;      /* IO Capability Response */
1450                 events[6] |= 0x04;      /* User Confirmation Request */
1451                 events[6] |= 0x08;      /* User Passkey Request */
1452                 events[6] |= 0x10;      /* Remote OOB Data Request */
1453                 events[6] |= 0x20;      /* Simple Pairing Complete */
1454                 events[7] |= 0x04;      /* User Passkey Notification */
1455                 events[7] |= 0x08;      /* Keypress Notification */
1456                 events[7] |= 0x10;      /* Remote Host Supported
1457                                          * Features Notification
1458                                          */
1459         }
1460
1461         if (lmp_le_capable(hdev))
1462                 events[7] |= 0x20;      /* LE Meta-Event */
1463
1464         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1465 }
1466
1467 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1468 {
1469         struct hci_dev *hdev = req->hdev;
1470
1471         if (lmp_bredr_capable(hdev))
1472                 bredr_setup(req);
1473         else
1474                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1475
1476         if (lmp_le_capable(hdev))
1477                 le_setup(req);
1478
1479         hci_setup_event_mask(req);
1480
1481         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1482          * local supported commands HCI command.
1483          */
1484         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1485                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1486
1487         if (lmp_ssp_capable(hdev)) {
1488                 /* When SSP is available, then the host features page
1489                  * should also be available as well. However some
1490                  * controllers list the max_page as 0 as long as SSP
1491                  * has not been enabled. To achieve proper debugging
1492                  * output, force the minimum max_page to 1 at least.
1493                  */
1494                 hdev->max_page = 0x01;
1495
1496                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1497                         u8 mode = 0x01;
1498                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1499                                     sizeof(mode), &mode);
1500                 } else {
1501                         struct hci_cp_write_eir cp;
1502
1503                         memset(hdev->eir, 0, sizeof(hdev->eir));
1504                         memset(&cp, 0, sizeof(cp));
1505
1506                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1507                 }
1508         }
1509
1510         if (lmp_inq_rssi_capable(hdev))
1511                 hci_setup_inquiry_mode(req);
1512
1513         if (lmp_inq_tx_pwr_capable(hdev))
1514                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1515
1516         if (lmp_ext_feat_capable(hdev)) {
1517                 struct hci_cp_read_local_ext_features cp;
1518
1519                 cp.page = 0x01;
1520                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1521                             sizeof(cp), &cp);
1522         }
1523
1524         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1525                 u8 enable = 1;
1526                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1527                             &enable);
1528         }
1529 }
1530
1531 static void hci_setup_link_policy(struct hci_request *req)
1532 {
1533         struct hci_dev *hdev = req->hdev;
1534         struct hci_cp_write_def_link_policy cp;
1535         u16 link_policy = 0;
1536
1537         if (lmp_rswitch_capable(hdev))
1538                 link_policy |= HCI_LP_RSWITCH;
1539         if (lmp_hold_capable(hdev))
1540                 link_policy |= HCI_LP_HOLD;
1541         if (lmp_sniff_capable(hdev))
1542                 link_policy |= HCI_LP_SNIFF;
1543         if (lmp_park_capable(hdev))
1544                 link_policy |= HCI_LP_PARK;
1545
1546         cp.policy = cpu_to_le16(link_policy);
1547         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1548 }
1549
1550 static void hci_set_le_support(struct hci_request *req)
1551 {
1552         struct hci_dev *hdev = req->hdev;
1553         struct hci_cp_write_le_host_supported cp;
1554
1555         /* LE-only devices do not support explicit enablement */
1556         if (!lmp_bredr_capable(hdev))
1557                 return;
1558
1559         memset(&cp, 0, sizeof(cp));
1560
1561         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1562                 cp.le = 0x01;
1563                 cp.simul = lmp_le_br_capable(hdev);
1564         }
1565
1566         if (cp.le != lmp_host_le_capable(hdev))
1567                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1568                             &cp);
1569 }
1570
1571 static void hci_set_event_mask_page_2(struct hci_request *req)
1572 {
1573         struct hci_dev *hdev = req->hdev;
1574         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1575
1576         /* If Connectionless Slave Broadcast master role is supported
1577          * enable all necessary events for it.
1578          */
1579         if (lmp_csb_master_capable(hdev)) {
1580                 events[1] |= 0x40;      /* Triggered Clock Capture */
1581                 events[1] |= 0x80;      /* Synchronization Train Complete */
1582                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1583                 events[2] |= 0x20;      /* CSB Channel Map Change */
1584         }
1585
1586         /* If Connectionless Slave Broadcast slave role is supported
1587          * enable all necessary events for it.
1588          */
1589         if (lmp_csb_slave_capable(hdev)) {
1590                 events[2] |= 0x01;      /* Synchronization Train Received */
1591                 events[2] |= 0x02;      /* CSB Receive */
1592                 events[2] |= 0x04;      /* CSB Timeout */
1593                 events[2] |= 0x08;      /* Truncated Page Complete */
1594         }
1595
1596         /* Enable Authenticated Payload Timeout Expired event if supported */
1597         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1598                 events[2] |= 0x80;
1599
1600         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1601 }
1602
1603 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1604 {
1605         struct hci_dev *hdev = req->hdev;
1606         u8 p;
1607
1608         /* Some Broadcom based Bluetooth controllers do not support the
1609          * Delete Stored Link Key command. They are clearly indicating its
1610          * absence in the bit mask of supported commands.
1611          *
1612          * Check the supported commands and only if the the command is marked
1613          * as supported send it. If not supported assume that the controller
1614          * does not have actual support for stored link keys which makes this
1615          * command redundant anyway.
1616          *
1617          * Some controllers indicate that they support handling deleting
1618          * stored link keys, but they don't. The quirk lets a driver
1619          * just disable this command.
1620          */
1621         if (hdev->commands[6] & 0x80 &&
1622             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1623                 struct hci_cp_delete_stored_link_key cp;
1624
1625                 bacpy(&cp.bdaddr, BDADDR_ANY);
1626                 cp.delete_all = 0x01;
1627                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1628                             sizeof(cp), &cp);
1629         }
1630
1631         if (hdev->commands[5] & 0x10)
1632                 hci_setup_link_policy(req);
1633
1634         if (lmp_le_capable(hdev)) {
1635                 u8 events[8];
1636
1637                 memset(events, 0, sizeof(events));
1638                 events[0] = 0x1f;
1639
1640                 /* If controller supports the Connection Parameters Request
1641                  * Link Layer Procedure, enable the corresponding event.
1642                  */
1643                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1644                         events[0] |= 0x20;      /* LE Remote Connection
1645                                                  * Parameter Request
1646                                                  */
1647
1648                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1649                             events);
1650
1651                 hci_set_le_support(req);
1652         }
1653
1654         /* Read features beyond page 1 if available */
1655         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1656                 struct hci_cp_read_local_ext_features cp;
1657
1658                 cp.page = p;
1659                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1660                             sizeof(cp), &cp);
1661         }
1662 }
1663
1664 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1665 {
1666         struct hci_dev *hdev = req->hdev;
1667
1668         /* Set event mask page 2 if the HCI command for it is supported */
1669         if (hdev->commands[22] & 0x04)
1670                 hci_set_event_mask_page_2(req);
1671
1672         /* Check for Synchronization Train support */
1673         if (lmp_sync_train_capable(hdev))
1674                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1675
1676         /* Enable Secure Connections if supported and configured */
1677         if ((lmp_sc_capable(hdev) ||
1678              test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1679             test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1680                 u8 support = 0x01;
1681                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1682                             sizeof(support), &support);
1683         }
1684 }
1685
1686 static int __hci_init(struct hci_dev *hdev)
1687 {
1688         int err;
1689
1690         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1691         if (err < 0)
1692                 return err;
1693
1694         /* The Device Under Test (DUT) mode is special and available for
1695          * all controller types. So just create it early on.
1696          */
1697         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1698                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1699                                     &dut_mode_fops);
1700         }
1701
1702         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1703          * BR/EDR/LE type controllers. AMP controllers only need the
1704          * first stage init.
1705          */
1706         if (hdev->dev_type != HCI_BREDR)
1707                 return 0;
1708
1709         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1710         if (err < 0)
1711                 return err;
1712
1713         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1714         if (err < 0)
1715                 return err;
1716
1717         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1718         if (err < 0)
1719                 return err;
1720
1721         /* Only create debugfs entries during the initial setup
1722          * phase and not every time the controller gets powered on.
1723          */
1724         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1725                 return 0;
1726
1727         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1728                             &features_fops);
1729         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1730                            &hdev->manufacturer);
1731         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1732         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1733         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1734                             &blacklist_fops);
1735         debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1736                             &whitelist_fops);
1737         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1738
1739         debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1740                             &conn_info_min_age_fops);
1741         debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1742                             &conn_info_max_age_fops);
1743
1744         if (lmp_bredr_capable(hdev)) {
1745                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1746                                     hdev, &inquiry_cache_fops);
1747                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1748                                     hdev, &link_keys_fops);
1749                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1750                                     hdev, &dev_class_fops);
1751                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1752                                     hdev, &voice_setting_fops);
1753         }
1754
1755         if (lmp_ssp_capable(hdev)) {
1756                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1757                                     hdev, &auto_accept_delay_fops);
1758                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1759                                     hdev, &force_sc_support_fops);
1760                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1761                                     hdev, &sc_only_mode_fops);
1762         }
1763
1764         if (lmp_sniff_capable(hdev)) {
1765                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1766                                     hdev, &idle_timeout_fops);
1767                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1768                                     hdev, &sniff_min_interval_fops);
1769                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1770                                     hdev, &sniff_max_interval_fops);
1771         }
1772
1773         if (lmp_le_capable(hdev)) {
1774                 debugfs_create_file("identity", 0400, hdev->debugfs,
1775                                     hdev, &identity_fops);
1776                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1777                                     hdev, &rpa_timeout_fops);
1778                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1779                                     hdev, &random_address_fops);
1780                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1781                                     hdev, &static_address_fops);
1782
1783                 /* For controllers with a public address, provide a debug
1784                  * option to force the usage of the configured static
1785                  * address. By default the public address is used.
1786                  */
1787                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1788                         debugfs_create_file("force_static_address", 0644,
1789                                             hdev->debugfs, hdev,
1790                                             &force_static_address_fops);
1791
1792                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1793                                   &hdev->le_white_list_size);
1794                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1795                                     &white_list_fops);
1796                 debugfs_create_file("identity_resolving_keys", 0400,
1797                                     hdev->debugfs, hdev,
1798                                     &identity_resolving_keys_fops);
1799                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1800                                     hdev, &long_term_keys_fops);
1801                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1802                                     hdev, &conn_min_interval_fops);
1803                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1804                                     hdev, &conn_max_interval_fops);
1805                 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1806                                     hdev, &conn_latency_fops);
1807                 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1808                                     hdev, &supervision_timeout_fops);
1809                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1810                                     hdev, &adv_channel_map_fops);
1811                 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1812                                     &device_list_fops);
1813                 debugfs_create_u16("discov_interleaved_timeout", 0644,
1814                                    hdev->debugfs,
1815                                    &hdev->discov_interleaved_timeout);
1816         }
1817
1818         return 0;
1819 }
1820
1821 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1822 {
1823         struct hci_dev *hdev = req->hdev;
1824
1825         BT_DBG("%s %ld", hdev->name, opt);
1826
1827         /* Reset */
1828         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1829                 hci_reset_req(req, 0);
1830
1831         /* Read Local Version */
1832         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1833
1834         /* Read BD Address */
1835         if (hdev->set_bdaddr)
1836                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1837 }
1838
1839 static int __hci_unconf_init(struct hci_dev *hdev)
1840 {
1841         int err;
1842
1843         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1844                 return 0;
1845
1846         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1847         if (err < 0)
1848                 return err;
1849
1850         return 0;
1851 }
1852
1853 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1854 {
1855         __u8 scan = opt;
1856
1857         BT_DBG("%s %x", req->hdev->name, scan);
1858
1859         /* Inquiry and Page scans */
1860         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1861 }
1862
1863 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1864 {
1865         __u8 auth = opt;
1866
1867         BT_DBG("%s %x", req->hdev->name, auth);
1868
1869         /* Authentication */
1870         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1871 }
1872
1873 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1874 {
1875         __u8 encrypt = opt;
1876
1877         BT_DBG("%s %x", req->hdev->name, encrypt);
1878
1879         /* Encryption */
1880         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1881 }
1882
1883 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1884 {
1885         __le16 policy = cpu_to_le16(opt);
1886
1887         BT_DBG("%s %x", req->hdev->name, policy);
1888
1889         /* Default link policy */
1890         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1891 }
1892
1893 /* Get HCI device by index.
1894  * Device is held on return. */
1895 struct hci_dev *hci_dev_get(int index)
1896 {
1897         struct hci_dev *hdev = NULL, *d;
1898
1899         BT_DBG("%d", index);
1900
1901         if (index < 0)
1902                 return NULL;
1903
1904         read_lock(&hci_dev_list_lock);
1905         list_for_each_entry(d, &hci_dev_list, list) {
1906                 if (d->id == index) {
1907                         hdev = hci_dev_hold(d);
1908                         break;
1909                 }
1910         }
1911         read_unlock(&hci_dev_list_lock);
1912         return hdev;
1913 }
1914
1915 /* ---- Inquiry support ---- */
1916
1917 bool hci_discovery_active(struct hci_dev *hdev)
1918 {
1919         struct discovery_state *discov = &hdev->discovery;
1920
1921         switch (discov->state) {
1922         case DISCOVERY_FINDING:
1923         case DISCOVERY_RESOLVING:
1924                 return true;
1925
1926         default:
1927                 return false;
1928         }
1929 }
1930
1931 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1932 {
1933         int old_state = hdev->discovery.state;
1934
1935         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1936
1937         if (old_state == state)
1938                 return;
1939
1940         hdev->discovery.state = state;
1941
1942         switch (state) {
1943         case DISCOVERY_STOPPED:
1944                 hci_update_background_scan(hdev);
1945
1946                 if (old_state != DISCOVERY_STARTING)
1947                         mgmt_discovering(hdev, 0);
1948                 break;
1949         case DISCOVERY_STARTING:
1950                 break;
1951         case DISCOVERY_FINDING:
1952                 mgmt_discovering(hdev, 1);
1953                 break;
1954         case DISCOVERY_RESOLVING:
1955                 break;
1956         case DISCOVERY_STOPPING:
1957                 break;
1958         }
1959 }
1960
1961 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1962 {
1963         struct discovery_state *cache = &hdev->discovery;
1964         struct inquiry_entry *p, *n;
1965
1966         list_for_each_entry_safe(p, n, &cache->all, all) {
1967                 list_del(&p->all);
1968                 kfree(p);
1969         }
1970
1971         INIT_LIST_HEAD(&cache->unknown);
1972         INIT_LIST_HEAD(&cache->resolve);
1973 }
1974
1975 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1976                                                bdaddr_t *bdaddr)
1977 {
1978         struct discovery_state *cache = &hdev->discovery;
1979         struct inquiry_entry *e;
1980
1981         BT_DBG("cache %p, %pMR", cache, bdaddr);
1982
1983         list_for_each_entry(e, &cache->all, all) {
1984                 if (!bacmp(&e->data.bdaddr, bdaddr))
1985                         return e;
1986         }
1987
1988         return NULL;
1989 }
1990
1991 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1992                                                        bdaddr_t *bdaddr)
1993 {
1994         struct discovery_state *cache = &hdev->discovery;
1995         struct inquiry_entry *e;
1996
1997         BT_DBG("cache %p, %pMR", cache, bdaddr);
1998
1999         list_for_each_entry(e, &cache->unknown, list) {
2000                 if (!bacmp(&e->data.bdaddr, bdaddr))
2001                         return e;
2002         }
2003
2004         return NULL;
2005 }
2006
2007 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2008                                                        bdaddr_t *bdaddr,
2009                                                        int state)
2010 {
2011         struct discovery_state *cache = &hdev->discovery;
2012         struct inquiry_entry *e;
2013
2014         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2015
2016         list_for_each_entry(e, &cache->resolve, list) {
2017                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2018                         return e;
2019                 if (!bacmp(&e->data.bdaddr, bdaddr))
2020                         return e;
2021         }
2022
2023         return NULL;
2024 }
2025
2026 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2027                                       struct inquiry_entry *ie)
2028 {
2029         struct discovery_state *cache = &hdev->discovery;
2030         struct list_head *pos = &cache->resolve;
2031         struct inquiry_entry *p;
2032
2033         list_del(&ie->list);
2034
2035         list_for_each_entry(p, &cache->resolve, list) {
2036                 if (p->name_state != NAME_PENDING &&
2037                     abs(p->data.rssi) >= abs(ie->data.rssi))
2038                         break;
2039                 pos = &p->list;
2040         }
2041
2042         list_add(&ie->list, pos);
2043 }
2044
2045 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2046                              bool name_known)
2047 {
2048         struct discovery_state *cache = &hdev->discovery;
2049         struct inquiry_entry *ie;
2050         u32 flags = 0;
2051
2052         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2053
2054         hci_remove_remote_oob_data(hdev, &data->bdaddr);
2055
2056         if (!data->ssp_mode)
2057                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2058
2059         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2060         if (ie) {
2061                 if (!ie->data.ssp_mode)
2062                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2063
2064                 if (ie->name_state == NAME_NEEDED &&
2065                     data->rssi != ie->data.rssi) {
2066                         ie->data.rssi = data->rssi;
2067                         hci_inquiry_cache_update_resolve(hdev, ie);
2068                 }
2069
2070                 goto update;
2071         }
2072
2073         /* Entry not in the cache. Add new one. */
2074         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2075         if (!ie) {
2076                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2077                 goto done;
2078         }
2079
2080         list_add(&ie->all, &cache->all);
2081
2082         if (name_known) {
2083                 ie->name_state = NAME_KNOWN;
2084         } else {
2085                 ie->name_state = NAME_NOT_KNOWN;
2086                 list_add(&ie->list, &cache->unknown);
2087         }
2088
2089 update:
2090         if (name_known && ie->name_state != NAME_KNOWN &&
2091             ie->name_state != NAME_PENDING) {
2092                 ie->name_state = NAME_KNOWN;
2093                 list_del(&ie->list);
2094         }
2095
2096         memcpy(&ie->data, data, sizeof(*data));
2097         ie->timestamp = jiffies;
2098         cache->timestamp = jiffies;
2099
2100         if (ie->name_state == NAME_NOT_KNOWN)
2101                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2102
2103 done:
2104         return flags;
2105 }
2106
2107 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2108 {
2109         struct discovery_state *cache = &hdev->discovery;
2110         struct inquiry_info *info = (struct inquiry_info *) buf;
2111         struct inquiry_entry *e;
2112         int copied = 0;
2113
2114         list_for_each_entry(e, &cache->all, all) {
2115                 struct inquiry_data *data = &e->data;
2116
2117                 if (copied >= num)
2118                         break;
2119
2120                 bacpy(&info->bdaddr, &data->bdaddr);
2121                 info->pscan_rep_mode    = data->pscan_rep_mode;
2122                 info->pscan_period_mode = data->pscan_period_mode;
2123                 info->pscan_mode        = data->pscan_mode;
2124                 memcpy(info->dev_class, data->dev_class, 3);
2125                 info->clock_offset      = data->clock_offset;
2126
2127                 info++;
2128                 copied++;
2129         }
2130
2131         BT_DBG("cache %p, copied %d", cache, copied);
2132         return copied;
2133 }
2134
2135 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2136 {
2137         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2138         struct hci_dev *hdev = req->hdev;
2139         struct hci_cp_inquiry cp;
2140
2141         BT_DBG("%s", hdev->name);
2142
2143         if (test_bit(HCI_INQUIRY, &hdev->flags))
2144                 return;
2145
2146         /* Start Inquiry */
2147         memcpy(&cp.lap, &ir->lap, 3);
2148         cp.length  = ir->length;
2149         cp.num_rsp = ir->num_rsp;
2150         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2151 }
2152
2153 static int wait_inquiry(void *word)
2154 {
2155         schedule();
2156         return signal_pending(current);
2157 }
2158
2159 int hci_inquiry(void __user *arg)
2160 {
2161         __u8 __user *ptr = arg;
2162         struct hci_inquiry_req ir;
2163         struct hci_dev *hdev;
2164         int err = 0, do_inquiry = 0, max_rsp;
2165         long timeo;
2166         __u8 *buf;
2167
2168         if (copy_from_user(&ir, ptr, sizeof(ir)))
2169                 return -EFAULT;
2170
2171         hdev = hci_dev_get(ir.dev_id);
2172         if (!hdev)
2173                 return -ENODEV;
2174
2175         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2176                 err = -EBUSY;
2177                 goto done;
2178         }
2179
2180         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2181                 err = -EOPNOTSUPP;
2182                 goto done;
2183         }
2184
2185         if (hdev->dev_type != HCI_BREDR) {
2186                 err = -EOPNOTSUPP;
2187                 goto done;
2188         }
2189
2190         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2191                 err = -EOPNOTSUPP;
2192                 goto done;
2193         }
2194
2195         hci_dev_lock(hdev);
2196         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2197             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2198                 hci_inquiry_cache_flush(hdev);
2199                 do_inquiry = 1;
2200         }
2201         hci_dev_unlock(hdev);
2202
2203         timeo = ir.length * msecs_to_jiffies(2000);
2204
2205         if (do_inquiry) {
2206                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2207                                    timeo);
2208                 if (err < 0)
2209                         goto done;
2210
2211                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2212                  * cleared). If it is interrupted by a signal, return -EINTR.
2213                  */
2214                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2215                                 TASK_INTERRUPTIBLE))
2216                         return -EINTR;
2217         }
2218
2219         /* for unlimited number of responses we will use buffer with
2220          * 255 entries
2221          */
2222         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2223
2224         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2225          * copy it to the user space.
2226          */
2227         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2228         if (!buf) {
2229                 err = -ENOMEM;
2230                 goto done;
2231         }
2232
2233         hci_dev_lock(hdev);
2234         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2235         hci_dev_unlock(hdev);
2236
2237         BT_DBG("num_rsp %d", ir.num_rsp);
2238
2239         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2240                 ptr += sizeof(ir);
2241                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2242                                  ir.num_rsp))
2243                         err = -EFAULT;
2244         } else
2245                 err = -EFAULT;
2246
2247         kfree(buf);
2248
2249 done:
2250         hci_dev_put(hdev);
2251         return err;
2252 }
2253
2254 static int hci_dev_do_open(struct hci_dev *hdev)
2255 {
2256         int ret = 0;
2257
2258         BT_DBG("%s %p", hdev->name, hdev);
2259
2260         hci_req_lock(hdev);
2261
2262         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2263                 ret = -ENODEV;
2264                 goto done;
2265         }
2266
2267         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2268             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2269                 /* Check for rfkill but allow the HCI setup stage to
2270                  * proceed (which in itself doesn't cause any RF activity).
2271                  */
2272                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2273                         ret = -ERFKILL;
2274                         goto done;
2275                 }
2276
2277                 /* Check for valid public address or a configured static
2278                  * random adddress, but let the HCI setup proceed to
2279                  * be able to determine if there is a public address
2280                  * or not.
2281                  *
2282                  * In case of user channel usage, it is not important
2283                  * if a public address or static random address is
2284                  * available.
2285                  *
2286                  * This check is only valid for BR/EDR controllers
2287                  * since AMP controllers do not have an address.
2288                  */
2289                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2290                     hdev->dev_type == HCI_BREDR &&
2291                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2292                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2293                         ret = -EADDRNOTAVAIL;
2294                         goto done;
2295                 }
2296         }
2297
2298         if (test_bit(HCI_UP, &hdev->flags)) {
2299                 ret = -EALREADY;
2300                 goto done;
2301         }
2302
2303         if (hdev->open(hdev)) {
2304                 ret = -EIO;
2305                 goto done;
2306         }
2307
2308         atomic_set(&hdev->cmd_cnt, 1);
2309         set_bit(HCI_INIT, &hdev->flags);
2310
2311         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2312                 if (hdev->setup)
2313                         ret = hdev->setup(hdev);
2314
2315                 /* The transport driver can set these quirks before
2316                  * creating the HCI device or in its setup callback.
2317                  *
2318                  * In case any of them is set, the controller has to
2319                  * start up as unconfigured.
2320                  */
2321                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2322                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2323                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2324
2325                 /* For an unconfigured controller it is required to
2326                  * read at least the version information provided by
2327                  * the Read Local Version Information command.
2328                  *
2329                  * If the set_bdaddr driver callback is provided, then
2330                  * also the original Bluetooth public device address
2331                  * will be read using the Read BD Address command.
2332                  */
2333                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2334                         ret = __hci_unconf_init(hdev);
2335         }
2336
2337         if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2338                 /* If public address change is configured, ensure that
2339                  * the address gets programmed. If the driver does not
2340                  * support changing the public address, fail the power
2341                  * on procedure.
2342                  */
2343                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2344                     hdev->set_bdaddr)
2345                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2346                 else
2347                         ret = -EADDRNOTAVAIL;
2348         }
2349
2350         if (!ret) {
2351                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2352                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2353                         ret = __hci_init(hdev);
2354         }
2355
2356         clear_bit(HCI_INIT, &hdev->flags);
2357
2358         if (!ret) {
2359                 hci_dev_hold(hdev);
2360                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2361                 set_bit(HCI_UP, &hdev->flags);
2362                 hci_notify(hdev, HCI_DEV_UP);
2363                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2364                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2365                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2366                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2367                     hdev->dev_type == HCI_BREDR) {
2368                         hci_dev_lock(hdev);
2369                         mgmt_powered(hdev, 1);
2370                         hci_dev_unlock(hdev);
2371                 }
2372         } else {
2373                 /* Init failed, cleanup */
2374                 flush_work(&hdev->tx_work);
2375                 flush_work(&hdev->cmd_work);
2376                 flush_work(&hdev->rx_work);
2377
2378                 skb_queue_purge(&hdev->cmd_q);
2379                 skb_queue_purge(&hdev->rx_q);
2380
2381                 if (hdev->flush)
2382                         hdev->flush(hdev);
2383
2384                 if (hdev->sent_cmd) {
2385                         kfree_skb(hdev->sent_cmd);
2386                         hdev->sent_cmd = NULL;
2387                 }
2388
2389                 hdev->close(hdev);
2390                 hdev->flags &= BIT(HCI_RAW);
2391         }
2392
2393 done:
2394         hci_req_unlock(hdev);
2395         return ret;
2396 }
2397
2398 /* ---- HCI ioctl helpers ---- */
2399
2400 int hci_dev_open(__u16 dev)
2401 {
2402         struct hci_dev *hdev;
2403         int err;
2404
2405         hdev = hci_dev_get(dev);
2406         if (!hdev)
2407                 return -ENODEV;
2408
2409         /* Devices that are marked as unconfigured can only be powered
2410          * up as user channel. Trying to bring them up as normal devices
2411          * will result into a failure. Only user channel operation is
2412          * possible.
2413          *
2414          * When this function is called for a user channel, the flag
2415          * HCI_USER_CHANNEL will be set first before attempting to
2416          * open the device.
2417          */
2418         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2419             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2420                 err = -EOPNOTSUPP;
2421                 goto done;
2422         }
2423
2424         /* We need to ensure that no other power on/off work is pending
2425          * before proceeding to call hci_dev_do_open. This is
2426          * particularly important if the setup procedure has not yet
2427          * completed.
2428          */
2429         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2430                 cancel_delayed_work(&hdev->power_off);
2431
2432         /* After this call it is guaranteed that the setup procedure
2433          * has finished. This means that error conditions like RFKILL
2434          * or no valid public or static random address apply.
2435          */
2436         flush_workqueue(hdev->req_workqueue);
2437
2438         err = hci_dev_do_open(hdev);
2439
2440 done:
2441         hci_dev_put(hdev);
2442         return err;
2443 }
2444
2445 /* This function requires the caller holds hdev->lock */
2446 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2447 {
2448         struct hci_conn_params *p;
2449
2450         list_for_each_entry(p, &hdev->le_conn_params, list)
2451                 list_del_init(&p->action);
2452
2453         BT_DBG("All LE pending actions cleared");
2454 }
2455
2456 static int hci_dev_do_close(struct hci_dev *hdev)
2457 {
2458         BT_DBG("%s %p", hdev->name, hdev);
2459
2460         cancel_delayed_work(&hdev->power_off);
2461
2462         hci_req_cancel(hdev, ENODEV);
2463         hci_req_lock(hdev);
2464
2465         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2466                 cancel_delayed_work_sync(&hdev->cmd_timer);
2467                 hci_req_unlock(hdev);
2468                 return 0;
2469         }
2470
2471         /* Flush RX and TX works */
2472         flush_work(&hdev->tx_work);
2473         flush_work(&hdev->rx_work);
2474
2475         if (hdev->discov_timeout > 0) {
2476                 cancel_delayed_work(&hdev->discov_off);
2477                 hdev->discov_timeout = 0;
2478                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2479                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2480         }
2481
2482         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2483                 cancel_delayed_work(&hdev->service_cache);
2484
2485         cancel_delayed_work_sync(&hdev->le_scan_disable);
2486
2487         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2488                 cancel_delayed_work_sync(&hdev->rpa_expired);
2489
2490         hci_dev_lock(hdev);
2491         hci_inquiry_cache_flush(hdev);
2492         hci_conn_hash_flush(hdev);
2493         hci_pend_le_actions_clear(hdev);
2494         hci_dev_unlock(hdev);
2495
2496         hci_notify(hdev, HCI_DEV_DOWN);
2497
2498         if (hdev->flush)
2499                 hdev->flush(hdev);
2500
2501         /* Reset device */
2502         skb_queue_purge(&hdev->cmd_q);
2503         atomic_set(&hdev->cmd_cnt, 1);
2504         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2505             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2506             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2507                 set_bit(HCI_INIT, &hdev->flags);
2508                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2509                 clear_bit(HCI_INIT, &hdev->flags);
2510         }
2511
2512         /* flush cmd  work */
2513         flush_work(&hdev->cmd_work);
2514
2515         /* Drop queues */
2516         skb_queue_purge(&hdev->rx_q);
2517         skb_queue_purge(&hdev->cmd_q);
2518         skb_queue_purge(&hdev->raw_q);
2519
2520         /* Drop last sent command */
2521         if (hdev->sent_cmd) {
2522                 cancel_delayed_work_sync(&hdev->cmd_timer);
2523                 kfree_skb(hdev->sent_cmd);
2524                 hdev->sent_cmd = NULL;
2525         }
2526
2527         kfree_skb(hdev->recv_evt);
2528         hdev->recv_evt = NULL;
2529
2530         /* After this point our queues are empty
2531          * and no tasks are scheduled. */
2532         hdev->close(hdev);
2533
2534         /* Clear flags */
2535         hdev->flags &= BIT(HCI_RAW);
2536         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2537
2538         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2539                 if (hdev->dev_type == HCI_BREDR) {
2540                         hci_dev_lock(hdev);
2541                         mgmt_powered(hdev, 0);
2542                         hci_dev_unlock(hdev);
2543                 }
2544         }
2545
2546         /* Controller radio is available but is currently powered down */
2547         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2548
2549         memset(hdev->eir, 0, sizeof(hdev->eir));
2550         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2551         bacpy(&hdev->random_addr, BDADDR_ANY);
2552
2553         hci_req_unlock(hdev);
2554
2555         hci_dev_put(hdev);
2556         return 0;
2557 }
2558
2559 int hci_dev_close(__u16 dev)
2560 {
2561         struct hci_dev *hdev;
2562         int err;
2563
2564         hdev = hci_dev_get(dev);
2565         if (!hdev)
2566                 return -ENODEV;
2567
2568         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2569                 err = -EBUSY;
2570                 goto done;
2571         }
2572
2573         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2574                 cancel_delayed_work(&hdev->power_off);
2575
2576         err = hci_dev_do_close(hdev);
2577
2578 done:
2579         hci_dev_put(hdev);
2580         return err;
2581 }
2582
2583 int hci_dev_reset(__u16 dev)
2584 {
2585         struct hci_dev *hdev;
2586         int ret = 0;
2587
2588         hdev = hci_dev_get(dev);
2589         if (!hdev)
2590                 return -ENODEV;
2591
2592         hci_req_lock(hdev);
2593
2594         if (!test_bit(HCI_UP, &hdev->flags)) {
2595                 ret = -ENETDOWN;
2596                 goto done;
2597         }
2598
2599         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2600                 ret = -EBUSY;
2601                 goto done;
2602         }
2603
2604         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2605                 ret = -EOPNOTSUPP;
2606                 goto done;
2607         }
2608
2609         /* Drop queues */
2610         skb_queue_purge(&hdev->rx_q);
2611         skb_queue_purge(&hdev->cmd_q);
2612
2613         hci_dev_lock(hdev);
2614         hci_inquiry_cache_flush(hdev);
2615         hci_conn_hash_flush(hdev);
2616         hci_dev_unlock(hdev);
2617
2618         if (hdev->flush)
2619                 hdev->flush(hdev);
2620
2621         atomic_set(&hdev->cmd_cnt, 1);
2622         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2623
2624         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2625
2626 done:
2627         hci_req_unlock(hdev);
2628         hci_dev_put(hdev);
2629         return ret;
2630 }
2631
2632 int hci_dev_reset_stat(__u16 dev)
2633 {
2634         struct hci_dev *hdev;
2635         int ret = 0;
2636
2637         hdev = hci_dev_get(dev);
2638         if (!hdev)
2639                 return -ENODEV;
2640
2641         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2642                 ret = -EBUSY;
2643                 goto done;
2644         }
2645
2646         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2647                 ret = -EOPNOTSUPP;
2648                 goto done;
2649         }
2650
2651         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2652
2653 done:
2654         hci_dev_put(hdev);
2655         return ret;
2656 }
2657
2658 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2659 {
2660         bool conn_changed, discov_changed;
2661
2662         BT_DBG("%s scan 0x%02x", hdev->name, scan);
2663
2664         if ((scan & SCAN_PAGE))
2665                 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2666                                                  &hdev->dev_flags);
2667         else
2668                 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2669                                                   &hdev->dev_flags);
2670
2671         if ((scan & SCAN_INQUIRY)) {
2672                 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2673                                                    &hdev->dev_flags);
2674         } else {
2675                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2676                 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2677                                                     &hdev->dev_flags);
2678         }
2679
2680         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2681                 return;
2682
2683         if (conn_changed || discov_changed) {
2684                 /* In case this was disabled through mgmt */
2685                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2686
2687                 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2688                         mgmt_update_adv_data(hdev);
2689
2690                 mgmt_new_settings(hdev);
2691         }
2692 }
2693
2694 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2695 {
2696         struct hci_dev *hdev;
2697         struct hci_dev_req dr;
2698         int err = 0;
2699
2700         if (copy_from_user(&dr, arg, sizeof(dr)))
2701                 return -EFAULT;
2702
2703         hdev = hci_dev_get(dr.dev_id);
2704         if (!hdev)
2705                 return -ENODEV;
2706
2707         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2708                 err = -EBUSY;
2709                 goto done;
2710         }
2711
2712         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2713                 err = -EOPNOTSUPP;
2714                 goto done;
2715         }
2716
2717         if (hdev->dev_type != HCI_BREDR) {
2718                 err = -EOPNOTSUPP;
2719                 goto done;
2720         }
2721
2722         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2723                 err = -EOPNOTSUPP;
2724                 goto done;
2725         }
2726
2727         switch (cmd) {
2728         case HCISETAUTH:
2729                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2730                                    HCI_INIT_TIMEOUT);
2731                 break;
2732
2733         case HCISETENCRYPT:
2734                 if (!lmp_encrypt_capable(hdev)) {
2735                         err = -EOPNOTSUPP;
2736                         break;
2737                 }
2738
2739                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2740                         /* Auth must be enabled first */
2741                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2742                                            HCI_INIT_TIMEOUT);
2743                         if (err)
2744                                 break;
2745                 }
2746
2747                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2748                                    HCI_INIT_TIMEOUT);
2749                 break;
2750
2751         case HCISETSCAN:
2752                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2753                                    HCI_INIT_TIMEOUT);
2754
2755                 /* Ensure that the connectable and discoverable states
2756                  * get correctly modified as this was a non-mgmt change.
2757                  */
2758                 if (!err)
2759                         hci_update_scan_state(hdev, dr.dev_opt);
2760                 break;
2761
2762         case HCISETLINKPOL:
2763                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2764                                    HCI_INIT_TIMEOUT);
2765                 break;
2766
2767         case HCISETLINKMODE:
2768                 hdev->link_mode = ((__u16) dr.dev_opt) &
2769                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2770                 break;
2771
2772         case HCISETPTYPE:
2773                 hdev->pkt_type = (__u16) dr.dev_opt;
2774                 break;
2775
2776         case HCISETACLMTU:
2777                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2778                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2779                 break;
2780
2781         case HCISETSCOMTU:
2782                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2783                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2784                 break;
2785
2786         default:
2787                 err = -EINVAL;
2788                 break;
2789         }
2790
2791 done:
2792         hci_dev_put(hdev);
2793         return err;
2794 }
2795
2796 int hci_get_dev_list(void __user *arg)
2797 {
2798         struct hci_dev *hdev;
2799         struct hci_dev_list_req *dl;
2800         struct hci_dev_req *dr;
2801         int n = 0, size, err;
2802         __u16 dev_num;
2803
2804         if (get_user(dev_num, (__u16 __user *) arg))
2805                 return -EFAULT;
2806
2807         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2808                 return -EINVAL;
2809
2810         size = sizeof(*dl) + dev_num * sizeof(*dr);
2811
2812         dl = kzalloc(size, GFP_KERNEL);
2813         if (!dl)
2814                 return -ENOMEM;
2815
2816         dr = dl->dev_req;
2817
2818         read_lock(&hci_dev_list_lock);
2819         list_for_each_entry(hdev, &hci_dev_list, list) {
2820                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2821                         cancel_delayed_work(&hdev->power_off);
2822
2823                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2824                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2825
2826                 (dr + n)->dev_id  = hdev->id;
2827                 (dr + n)->dev_opt = hdev->flags;
2828
2829                 if (++n >= dev_num)
2830                         break;
2831         }
2832         read_unlock(&hci_dev_list_lock);
2833
2834         dl->dev_num = n;
2835         size = sizeof(*dl) + n * sizeof(*dr);
2836
2837         err = copy_to_user(arg, dl, size);
2838         kfree(dl);
2839
2840         return err ? -EFAULT : 0;
2841 }
2842
2843 int hci_get_dev_info(void __user *arg)
2844 {
2845         struct hci_dev *hdev;
2846         struct hci_dev_info di;
2847         int err = 0;
2848
2849         if (copy_from_user(&di, arg, sizeof(di)))
2850                 return -EFAULT;
2851
2852         hdev = hci_dev_get(di.dev_id);
2853         if (!hdev)
2854                 return -ENODEV;
2855
2856         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2857                 cancel_delayed_work_sync(&hdev->power_off);
2858
2859         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2860                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2861
2862         strcpy(di.name, hdev->name);
2863         di.bdaddr   = hdev->bdaddr;
2864         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2865         di.flags    = hdev->flags;
2866         di.pkt_type = hdev->pkt_type;
2867         if (lmp_bredr_capable(hdev)) {
2868                 di.acl_mtu  = hdev->acl_mtu;
2869                 di.acl_pkts = hdev->acl_pkts;
2870                 di.sco_mtu  = hdev->sco_mtu;
2871                 di.sco_pkts = hdev->sco_pkts;
2872         } else {
2873                 di.acl_mtu  = hdev->le_mtu;
2874                 di.acl_pkts = hdev->le_pkts;
2875                 di.sco_mtu  = 0;
2876                 di.sco_pkts = 0;
2877         }
2878         di.link_policy = hdev->link_policy;
2879         di.link_mode   = hdev->link_mode;
2880
2881         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2882         memcpy(&di.features, &hdev->features, sizeof(di.features));
2883
2884         if (copy_to_user(arg, &di, sizeof(di)))
2885                 err = -EFAULT;
2886
2887         hci_dev_put(hdev);
2888
2889         return err;
2890 }
2891
2892 /* ---- Interface to HCI drivers ---- */
2893
2894 static int hci_rfkill_set_block(void *data, bool blocked)
2895 {
2896         struct hci_dev *hdev = data;
2897
2898         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2899
2900         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2901                 return -EBUSY;
2902
2903         if (blocked) {
2904                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2905                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2906                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
2907                         hci_dev_do_close(hdev);
2908         } else {
2909                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2910         }
2911
2912         return 0;
2913 }
2914
2915 static const struct rfkill_ops hci_rfkill_ops = {
2916         .set_block = hci_rfkill_set_block,
2917 };
2918
2919 static void hci_power_on(struct work_struct *work)
2920 {
2921         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2922         int err;
2923
2924         BT_DBG("%s", hdev->name);
2925
2926         err = hci_dev_do_open(hdev);
2927         if (err < 0) {
2928                 mgmt_set_powered_failed(hdev, err);
2929                 return;
2930         }
2931
2932         /* During the HCI setup phase, a few error conditions are
2933          * ignored and they need to be checked now. If they are still
2934          * valid, it is important to turn the device back off.
2935          */
2936         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2937             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2938             (hdev->dev_type == HCI_BREDR &&
2939              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2940              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2941                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2942                 hci_dev_do_close(hdev);
2943         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2944                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2945                                    HCI_AUTO_OFF_TIMEOUT);
2946         }
2947
2948         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2949                 /* For unconfigured devices, set the HCI_RAW flag
2950                  * so that userspace can easily identify them.
2951                  */
2952                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2953                         set_bit(HCI_RAW, &hdev->flags);
2954
2955                 /* For fully configured devices, this will send
2956                  * the Index Added event. For unconfigured devices,
2957                  * it will send Unconfigued Index Added event.
2958                  *
2959                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2960                  * and no event will be send.
2961                  */
2962                 mgmt_index_added(hdev);
2963         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2964                 /* When the controller is now configured, then it
2965                  * is important to clear the HCI_RAW flag.
2966                  */
2967                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2968                         clear_bit(HCI_RAW, &hdev->flags);
2969
2970                 /* Powering on the controller with HCI_CONFIG set only
2971                  * happens with the transition from unconfigured to
2972                  * configured. This will send the Index Added event.
2973                  */
2974                 mgmt_index_added(hdev);
2975         }
2976 }
2977
2978 static void hci_power_off(struct work_struct *work)
2979 {
2980         struct hci_dev *hdev = container_of(work, struct hci_dev,
2981                                             power_off.work);
2982
2983         BT_DBG("%s", hdev->name);
2984
2985         hci_dev_do_close(hdev);
2986 }
2987
2988 static void hci_discov_off(struct work_struct *work)
2989 {
2990         struct hci_dev *hdev;
2991
2992         hdev = container_of(work, struct hci_dev, discov_off.work);
2993
2994         BT_DBG("%s", hdev->name);
2995
2996         mgmt_discoverable_timeout(hdev);
2997 }
2998
2999 void hci_uuids_clear(struct hci_dev *hdev)
3000 {
3001         struct bt_uuid *uuid, *tmp;
3002
3003         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3004                 list_del(&uuid->list);
3005                 kfree(uuid);
3006         }
3007 }
3008
3009 void hci_link_keys_clear(struct hci_dev *hdev)
3010 {
3011         struct list_head *p, *n;
3012
3013         list_for_each_safe(p, n, &hdev->link_keys) {
3014                 struct link_key *key;
3015
3016                 key = list_entry(p, struct link_key, list);
3017
3018                 list_del(p);
3019                 kfree(key);
3020         }
3021 }
3022
3023 void hci_smp_ltks_clear(struct hci_dev *hdev)
3024 {
3025         struct smp_ltk *k, *tmp;
3026
3027         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3028                 list_del(&k->list);
3029                 kfree(k);
3030         }
3031 }
3032
3033 void hci_smp_irks_clear(struct hci_dev *hdev)
3034 {
3035         struct smp_irk *k, *tmp;
3036
3037         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3038                 list_del(&k->list);
3039                 kfree(k);
3040         }
3041 }
3042
3043 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3044 {
3045         struct link_key *k;
3046
3047         list_for_each_entry(k, &hdev->link_keys, list)
3048                 if (bacmp(bdaddr, &k->bdaddr) == 0)
3049                         return k;
3050
3051         return NULL;
3052 }
3053
3054 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3055                                u8 key_type, u8 old_key_type)
3056 {
3057         /* Legacy key */
3058         if (key_type < 0x03)
3059                 return true;
3060
3061         /* Debug keys are insecure so don't store them persistently */
3062         if (key_type == HCI_LK_DEBUG_COMBINATION)
3063                 return false;
3064
3065         /* Changed combination key and there's no previous one */
3066         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3067                 return false;
3068
3069         /* Security mode 3 case */
3070         if (!conn)
3071                 return true;
3072
3073         /* Neither local nor remote side had no-bonding as requirement */
3074         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3075                 return true;
3076
3077         /* Local side had dedicated bonding as requirement */
3078         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3079                 return true;
3080
3081         /* Remote side had dedicated bonding as requirement */
3082         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3083                 return true;
3084
3085         /* If none of the above criteria match, then don't store the key
3086          * persistently */
3087         return false;
3088 }
3089
3090 static bool ltk_type_master(u8 type)
3091 {
3092         return (type == SMP_LTK);
3093 }
3094
3095 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3096                              bool master)
3097 {
3098         struct smp_ltk *k;
3099
3100         list_for_each_entry(k, &hdev->long_term_keys, list) {
3101                 if (k->ediv != ediv || k->rand != rand)
3102                         continue;
3103
3104                 if (ltk_type_master(k->type) != master)
3105                         continue;
3106
3107                 return k;
3108         }
3109
3110         return NULL;
3111 }
3112
3113 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3114                                      u8 addr_type, bool master)
3115 {
3116         struct smp_ltk *k;
3117
3118         list_for_each_entry(k, &hdev->long_term_keys, list)
3119                 if (addr_type == k->bdaddr_type &&
3120                     bacmp(bdaddr, &k->bdaddr) == 0 &&
3121                     ltk_type_master(k->type) == master)
3122                         return k;
3123
3124         return NULL;
3125 }
3126
3127 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3128 {
3129         struct smp_irk *irk;
3130
3131         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3132                 if (!bacmp(&irk->rpa, rpa))
3133                         return irk;
3134         }
3135
3136         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3137                 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3138                         bacpy(&irk->rpa, rpa);
3139                         return irk;
3140                 }
3141         }
3142
3143         return NULL;
3144 }
3145
3146 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3147                                      u8 addr_type)
3148 {
3149         struct smp_irk *irk;
3150
3151         /* Identity Address must be public or static random */
3152         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3153                 return NULL;
3154
3155         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3156                 if (addr_type == irk->addr_type &&
3157                     bacmp(bdaddr, &irk->bdaddr) == 0)
3158                         return irk;
3159         }
3160
3161         return NULL;
3162 }
3163
3164 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3165                                   bdaddr_t *bdaddr, u8 *val, u8 type,
3166                                   u8 pin_len, bool *persistent)
3167 {
3168         struct link_key *key, *old_key;
3169         u8 old_key_type;
3170
3171         old_key = hci_find_link_key(hdev, bdaddr);
3172         if (old_key) {
3173                 old_key_type = old_key->type;
3174                 key = old_key;
3175         } else {
3176                 old_key_type = conn ? conn->key_type : 0xff;
3177                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3178                 if (!key)
3179                         return NULL;
3180                 list_add(&key->list, &hdev->link_keys);
3181         }
3182
3183         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3184
3185         /* Some buggy controller combinations generate a changed
3186          * combination key for legacy pairing even when there's no
3187          * previous key */
3188         if (type == HCI_LK_CHANGED_COMBINATION &&
3189             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3190                 type = HCI_LK_COMBINATION;
3191                 if (conn)
3192                         conn->key_type = type;
3193         }
3194
3195         bacpy(&key->bdaddr, bdaddr);
3196         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3197         key->pin_len = pin_len;
3198
3199         if (type == HCI_LK_CHANGED_COMBINATION)
3200                 key->type = old_key_type;
3201         else
3202                 key->type = type;
3203
3204         if (persistent)
3205                 *persistent = hci_persistent_key(hdev, conn, type,
3206                                                  old_key_type);
3207
3208         return key;
3209 }
3210
3211 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3212                             u8 addr_type, u8 type, u8 authenticated,
3213                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3214 {
3215         struct smp_ltk *key, *old_key;
3216         bool master = ltk_type_master(type);
3217
3218         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3219         if (old_key)
3220                 key = old_key;
3221         else {
3222                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3223                 if (!key)
3224                         return NULL;
3225                 list_add(&key->list, &hdev->long_term_keys);
3226         }
3227
3228         bacpy(&key->bdaddr, bdaddr);
3229         key->bdaddr_type = addr_type;
3230         memcpy(key->val, tk, sizeof(key->val));
3231         key->authenticated = authenticated;
3232         key->ediv = ediv;
3233         key->rand = rand;
3234         key->enc_size = enc_size;
3235         key->type = type;
3236
3237         return key;
3238 }
3239
3240 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3241                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3242 {
3243         struct smp_irk *irk;
3244
3245         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3246         if (!irk) {
3247                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3248                 if (!irk)
3249                         return NULL;
3250
3251                 bacpy(&irk->bdaddr, bdaddr);
3252                 irk->addr_type = addr_type;
3253
3254                 list_add(&irk->list, &hdev->identity_resolving_keys);
3255         }
3256
3257         memcpy(irk->val, val, 16);
3258         bacpy(&irk->rpa, rpa);
3259
3260         return irk;
3261 }
3262
3263 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3264 {
3265         struct link_key *key;
3266
3267         key = hci_find_link_key(hdev, bdaddr);
3268         if (!key)
3269                 return -ENOENT;
3270
3271         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3272
3273         list_del(&key->list);
3274         kfree(key);
3275
3276         return 0;
3277 }
3278
3279 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3280 {
3281         struct smp_ltk *k, *tmp;
3282         int removed = 0;
3283
3284         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3285                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3286                         continue;
3287
3288                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3289
3290                 list_del(&k->list);
3291                 kfree(k);
3292                 removed++;
3293         }
3294
3295         return removed ? 0 : -ENOENT;
3296 }
3297
3298 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3299 {
3300         struct smp_irk *k, *tmp;
3301
3302         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3303                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3304                         continue;
3305
3306                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3307
3308                 list_del(&k->list);
3309                 kfree(k);
3310         }
3311 }
3312
3313 /* HCI command timer function */
3314 static void hci_cmd_timeout(struct work_struct *work)
3315 {
3316         struct hci_dev *hdev = container_of(work, struct hci_dev,
3317                                             cmd_timer.work);
3318
3319         if (hdev->sent_cmd) {
3320                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3321                 u16 opcode = __le16_to_cpu(sent->opcode);
3322
3323                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3324         } else {
3325                 BT_ERR("%s command tx timeout", hdev->name);
3326         }
3327
3328         atomic_set(&hdev->cmd_cnt, 1);
3329         queue_work(hdev->workqueue, &hdev->cmd_work);
3330 }
3331
3332 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3333                                           bdaddr_t *bdaddr)
3334 {
3335         struct oob_data *data;
3336
3337         list_for_each_entry(data, &hdev->remote_oob_data, list)
3338                 if (bacmp(bdaddr, &data->bdaddr) == 0)
3339                         return data;
3340
3341         return NULL;
3342 }
3343
3344 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3345 {
3346         struct oob_data *data;
3347
3348         data = hci_find_remote_oob_data(hdev, bdaddr);
3349         if (!data)
3350                 return -ENOENT;
3351
3352         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3353
3354         list_del(&data->list);
3355         kfree(data);
3356
3357         return 0;
3358 }
3359
3360 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3361 {
3362         struct oob_data *data, *n;
3363
3364         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3365                 list_del(&data->list);
3366                 kfree(data);
3367         }
3368 }
3369
3370 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3371                             u8 *hash, u8 *randomizer)
3372 {
3373         struct oob_data *data;
3374
3375         data = hci_find_remote_oob_data(hdev, bdaddr);
3376         if (!data) {
3377                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3378                 if (!data)
3379                         return -ENOMEM;
3380
3381                 bacpy(&data->bdaddr, bdaddr);
3382                 list_add(&data->list, &hdev->remote_oob_data);
3383         }
3384
3385         memcpy(data->hash192, hash, sizeof(data->hash192));
3386         memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3387
3388         memset(data->hash256, 0, sizeof(data->hash256));
3389         memset(data->randomizer256, 0, sizeof(data->randomizer256));
3390
3391         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3392
3393         return 0;
3394 }
3395
3396 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3397                                 u8 *hash192, u8 *randomizer192,
3398                                 u8 *hash256, u8 *randomizer256)
3399 {
3400         struct oob_data *data;
3401
3402         data = hci_find_remote_oob_data(hdev, bdaddr);
3403         if (!data) {
3404                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3405                 if (!data)
3406                         return -ENOMEM;
3407
3408                 bacpy(&data->bdaddr, bdaddr);
3409                 list_add(&data->list, &hdev->remote_oob_data);
3410         }
3411
3412         memcpy(data->hash192, hash192, sizeof(data->hash192));
3413         memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3414
3415         memcpy(data->hash256, hash256, sizeof(data->hash256));
3416         memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3417
3418         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3419
3420         return 0;
3421 }
3422
3423 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3424                                          bdaddr_t *bdaddr, u8 type)
3425 {
3426         struct bdaddr_list *b;
3427
3428         list_for_each_entry(b, bdaddr_list, list) {
3429                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3430                         return b;
3431         }
3432
3433         return NULL;
3434 }
3435
3436 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3437 {
3438         struct list_head *p, *n;
3439
3440         list_for_each_safe(p, n, bdaddr_list) {
3441                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3442
3443                 list_del(p);
3444                 kfree(b);
3445         }
3446 }
3447
3448 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3449 {
3450         struct bdaddr_list *entry;
3451
3452         if (!bacmp(bdaddr, BDADDR_ANY))
3453                 return -EBADF;
3454
3455         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3456                 return -EEXIST;
3457
3458         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3459         if (!entry)
3460                 return -ENOMEM;
3461
3462         bacpy(&entry->bdaddr, bdaddr);
3463         entry->bdaddr_type = type;
3464
3465         list_add(&entry->list, list);
3466
3467         return 0;
3468 }
3469
3470 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3471 {
3472         struct bdaddr_list *entry;
3473
3474         if (!bacmp(bdaddr, BDADDR_ANY)) {
3475                 hci_bdaddr_list_clear(list);
3476                 return 0;
3477         }
3478
3479         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3480         if (!entry)
3481                 return -ENOENT;
3482
3483         list_del(&entry->list);
3484         kfree(entry);
3485
3486         return 0;
3487 }
3488
3489 /* This function requires the caller holds hdev->lock */
3490 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3491                                                bdaddr_t *addr, u8 addr_type)
3492 {
3493         struct hci_conn_params *params;
3494
3495         /* The conn params list only contains identity addresses */
3496         if (!hci_is_identity_address(addr, addr_type))
3497                 return NULL;
3498
3499         list_for_each_entry(params, &hdev->le_conn_params, list) {
3500                 if (bacmp(&params->addr, addr) == 0 &&
3501                     params->addr_type == addr_type) {
3502                         return params;
3503                 }
3504         }
3505
3506         return NULL;
3507 }
3508
3509 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3510 {
3511         struct hci_conn *conn;
3512
3513         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3514         if (!conn)
3515                 return false;
3516
3517         if (conn->dst_type != type)
3518                 return false;
3519
3520         if (conn->state != BT_CONNECTED)
3521                 return false;
3522
3523         return true;
3524 }
3525
3526 /* This function requires the caller holds hdev->lock */
3527 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3528                                                   bdaddr_t *addr, u8 addr_type)
3529 {
3530         struct hci_conn_params *param;
3531
3532         /* The list only contains identity addresses */
3533         if (!hci_is_identity_address(addr, addr_type))
3534                 return NULL;
3535
3536         list_for_each_entry(param, list, action) {
3537                 if (bacmp(&param->addr, addr) == 0 &&
3538                     param->addr_type == addr_type)
3539                         return param;
3540         }
3541
3542         return NULL;
3543 }
3544
3545 /* This function requires the caller holds hdev->lock */
3546 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3547                                             bdaddr_t *addr, u8 addr_type)
3548 {
3549         struct hci_conn_params *params;
3550
3551         if (!hci_is_identity_address(addr, addr_type))
3552                 return NULL;
3553
3554         params = hci_conn_params_lookup(hdev, addr, addr_type);
3555         if (params)
3556                 return params;
3557
3558         params = kzalloc(sizeof(*params), GFP_KERNEL);
3559         if (!params) {
3560                 BT_ERR("Out of memory");
3561                 return NULL;
3562         }
3563
3564         bacpy(&params->addr, addr);
3565         params->addr_type = addr_type;
3566
3567         list_add(&params->list, &hdev->le_conn_params);
3568         INIT_LIST_HEAD(&params->action);
3569
3570         params->conn_min_interval = hdev->le_conn_min_interval;
3571         params->conn_max_interval = hdev->le_conn_max_interval;
3572         params->conn_latency = hdev->le_conn_latency;
3573         params->supervision_timeout = hdev->le_supv_timeout;
3574         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3575
3576         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3577
3578         return params;
3579 }
3580
3581 /* This function requires the caller holds hdev->lock */
3582 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3583                         u8 auto_connect)
3584 {
3585         struct hci_conn_params *params;
3586
3587         params = hci_conn_params_add(hdev, addr, addr_type);
3588         if (!params)
3589                 return -EIO;
3590
3591         if (params->auto_connect == auto_connect)
3592                 return 0;
3593
3594         list_del_init(&params->action);
3595
3596         switch (auto_connect) {
3597         case HCI_AUTO_CONN_DISABLED:
3598         case HCI_AUTO_CONN_LINK_LOSS:
3599                 hci_update_background_scan(hdev);
3600                 break;
3601         case HCI_AUTO_CONN_REPORT:
3602                 list_add(&params->action, &hdev->pend_le_reports);
3603                 hci_update_background_scan(hdev);
3604                 break;
3605         case HCI_AUTO_CONN_ALWAYS:
3606                 if (!is_connected(hdev, addr, addr_type)) {
3607                         list_add(&params->action, &hdev->pend_le_conns);
3608                         hci_update_background_scan(hdev);
3609                 }
3610                 break;
3611         }
3612
3613         params->auto_connect = auto_connect;
3614
3615         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3616                auto_connect);
3617
3618         return 0;
3619 }
3620
3621 /* This function requires the caller holds hdev->lock */
3622 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3623 {
3624         struct hci_conn_params *params;
3625
3626         params = hci_conn_params_lookup(hdev, addr, addr_type);
3627         if (!params)
3628                 return;
3629
3630         list_del(&params->action);
3631         list_del(&params->list);
3632         kfree(params);
3633
3634         hci_update_background_scan(hdev);
3635
3636         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3637 }
3638
3639 /* This function requires the caller holds hdev->lock */
3640 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3641 {
3642         struct hci_conn_params *params, *tmp;
3643
3644         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3645                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3646                         continue;
3647                 list_del(&params->list);
3648                 kfree(params);
3649         }
3650
3651         BT_DBG("All LE disabled connection parameters were removed");
3652 }
3653
3654 /* This function requires the caller holds hdev->lock */
3655 void hci_conn_params_clear_all(struct hci_dev *hdev)
3656 {
3657         struct hci_conn_params *params, *tmp;
3658
3659         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3660                 list_del(&params->action);
3661                 list_del(&params->list);
3662                 kfree(params);
3663         }
3664
3665         hci_update_background_scan(hdev);
3666
3667         BT_DBG("All LE connection parameters were removed");
3668 }
3669
3670 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3671 {
3672         if (status) {
3673                 BT_ERR("Failed to start inquiry: status %d", status);
3674
3675                 hci_dev_lock(hdev);
3676                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3677                 hci_dev_unlock(hdev);
3678                 return;
3679         }
3680 }
3681
3682 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3683 {
3684         /* General inquiry access code (GIAC) */
3685         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3686         struct hci_request req;
3687         struct hci_cp_inquiry cp;
3688         int err;
3689
3690         if (status) {
3691                 BT_ERR("Failed to disable LE scanning: status %d", status);
3692                 return;
3693         }
3694
3695         switch (hdev->discovery.type) {
3696         case DISCOV_TYPE_LE:
3697                 hci_dev_lock(hdev);
3698                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3699                 hci_dev_unlock(hdev);
3700                 break;
3701
3702         case DISCOV_TYPE_INTERLEAVED:
3703                 hci_req_init(&req, hdev);
3704
3705                 memset(&cp, 0, sizeof(cp));
3706                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3707                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3708                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3709
3710                 hci_dev_lock(hdev);
3711
3712                 hci_inquiry_cache_flush(hdev);
3713
3714                 err = hci_req_run(&req, inquiry_complete);
3715                 if (err) {
3716                         BT_ERR("Inquiry request failed: err %d", err);
3717                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3718                 }
3719
3720                 hci_dev_unlock(hdev);
3721                 break;
3722         }
3723 }
3724
3725 static void le_scan_disable_work(struct work_struct *work)
3726 {
3727         struct hci_dev *hdev = container_of(work, struct hci_dev,
3728                                             le_scan_disable.work);
3729         struct hci_request req;
3730         int err;
3731
3732         BT_DBG("%s", hdev->name);
3733
3734         hci_req_init(&req, hdev);
3735
3736         hci_req_add_le_scan_disable(&req);
3737
3738         err = hci_req_run(&req, le_scan_disable_work_complete);
3739         if (err)
3740                 BT_ERR("Disable LE scanning request failed: err %d", err);
3741 }
3742
3743 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3744 {
3745         struct hci_dev *hdev = req->hdev;
3746
3747         /* If we're advertising or initiating an LE connection we can't
3748          * go ahead and change the random address at this time. This is
3749          * because the eventual initiator address used for the
3750          * subsequently created connection will be undefined (some
3751          * controllers use the new address and others the one we had
3752          * when the operation started).
3753          *
3754          * In this kind of scenario skip the update and let the random
3755          * address be updated at the next cycle.
3756          */
3757         if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3758             hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3759                 BT_DBG("Deferring random address update");
3760                 return;
3761         }
3762
3763         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3764 }
3765
3766 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3767                               u8 *own_addr_type)
3768 {
3769         struct hci_dev *hdev = req->hdev;
3770         int err;
3771
3772         /* If privacy is enabled use a resolvable private address. If
3773          * current RPA has expired or there is something else than
3774          * the current RPA in use, then generate a new one.
3775          */
3776         if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3777                 int to;
3778
3779                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3780
3781                 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3782                     !bacmp(&hdev->random_addr, &hdev->rpa))
3783                         return 0;
3784
3785                 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3786                 if (err < 0) {
3787                         BT_ERR("%s failed to generate new RPA", hdev->name);
3788                         return err;
3789                 }
3790
3791                 set_random_addr(req, &hdev->rpa);
3792
3793                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3794                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3795
3796                 return 0;
3797         }
3798
3799         /* In case of required privacy without resolvable private address,
3800          * use an unresolvable private address. This is useful for active
3801          * scanning and non-connectable advertising.
3802          */
3803         if (require_privacy) {
3804                 bdaddr_t urpa;
3805
3806                 get_random_bytes(&urpa, 6);
3807                 urpa.b[5] &= 0x3f;      /* Clear two most significant bits */
3808
3809                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3810                 set_random_addr(req, &urpa);
3811                 return 0;
3812         }
3813
3814         /* If forcing static address is in use or there is no public
3815          * address use the static address as random address (but skip
3816          * the HCI command if the current random address is already the
3817          * static one.
3818          */
3819         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3820             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3821                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3822                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3823                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3824                                     &hdev->static_addr);
3825                 return 0;
3826         }
3827
3828         /* Neither privacy nor static address is being used so use a
3829          * public address.
3830          */
3831         *own_addr_type = ADDR_LE_DEV_PUBLIC;
3832
3833         return 0;
3834 }
3835
3836 /* Copy the Identity Address of the controller.
3837  *
3838  * If the controller has a public BD_ADDR, then by default use that one.
3839  * If this is a LE only controller without a public address, default to
3840  * the static random address.
3841  *
3842  * For debugging purposes it is possible to force controllers with a
3843  * public address to use the static random address instead.
3844  */
3845 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3846                                u8 *bdaddr_type)
3847 {
3848         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3849             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3850                 bacpy(bdaddr, &hdev->static_addr);
3851                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3852         } else {
3853                 bacpy(bdaddr, &hdev->bdaddr);
3854                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3855         }
3856 }
3857
3858 /* Alloc HCI device */
3859 struct hci_dev *hci_alloc_dev(void)
3860 {
3861         struct hci_dev *hdev;
3862
3863         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3864         if (!hdev)
3865                 return NULL;
3866
3867         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3868         hdev->esco_type = (ESCO_HV1);
3869         hdev->link_mode = (HCI_LM_ACCEPT);
3870         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3871         hdev->io_capability = 0x03;     /* No Input No Output */
3872         hdev->manufacturer = 0xffff;    /* Default to internal use */
3873         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3874         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3875
3876         hdev->sniff_max_interval = 800;
3877         hdev->sniff_min_interval = 80;
3878
3879         hdev->le_adv_channel_map = 0x07;
3880         hdev->le_scan_interval = 0x0060;
3881         hdev->le_scan_window = 0x0030;
3882         hdev->le_conn_min_interval = 0x0028;
3883         hdev->le_conn_max_interval = 0x0038;
3884         hdev->le_conn_latency = 0x0000;
3885         hdev->le_supv_timeout = 0x002a;
3886
3887         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3888         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3889         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3890         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3891
3892         mutex_init(&hdev->lock);
3893         mutex_init(&hdev->req_lock);
3894
3895         INIT_LIST_HEAD(&hdev->mgmt_pending);
3896         INIT_LIST_HEAD(&hdev->blacklist);
3897         INIT_LIST_HEAD(&hdev->whitelist);
3898         INIT_LIST_HEAD(&hdev->uuids);
3899         INIT_LIST_HEAD(&hdev->link_keys);
3900         INIT_LIST_HEAD(&hdev->long_term_keys);
3901         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3902         INIT_LIST_HEAD(&hdev->remote_oob_data);
3903         INIT_LIST_HEAD(&hdev->le_white_list);
3904         INIT_LIST_HEAD(&hdev->le_conn_params);
3905         INIT_LIST_HEAD(&hdev->pend_le_conns);
3906         INIT_LIST_HEAD(&hdev->pend_le_reports);
3907         INIT_LIST_HEAD(&hdev->conn_hash.list);
3908
3909         INIT_WORK(&hdev->rx_work, hci_rx_work);
3910         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3911         INIT_WORK(&hdev->tx_work, hci_tx_work);
3912         INIT_WORK(&hdev->power_on, hci_power_on);
3913
3914         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3915         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3916         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3917
3918         skb_queue_head_init(&hdev->rx_q);
3919         skb_queue_head_init(&hdev->cmd_q);
3920         skb_queue_head_init(&hdev->raw_q);
3921
3922         init_waitqueue_head(&hdev->req_wait_q);
3923
3924         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3925
3926         hci_init_sysfs(hdev);
3927         discovery_init(hdev);
3928
3929         return hdev;
3930 }
3931 EXPORT_SYMBOL(hci_alloc_dev);
3932
3933 /* Free HCI device */
3934 void hci_free_dev(struct hci_dev *hdev)
3935 {
3936         /* will free via device release */
3937         put_device(&hdev->dev);
3938 }
3939 EXPORT_SYMBOL(hci_free_dev);
3940
3941 /* Register HCI device */
3942 int hci_register_dev(struct hci_dev *hdev)
3943 {
3944         int id, error;
3945
3946         if (!hdev->open || !hdev->close || !hdev->send)
3947                 return -EINVAL;
3948
3949         /* Do not allow HCI_AMP devices to register at index 0,
3950          * so the index can be used as the AMP controller ID.
3951          */
3952         switch (hdev->dev_type) {
3953         case HCI_BREDR:
3954                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3955                 break;
3956         case HCI_AMP:
3957                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3958                 break;
3959         default:
3960                 return -EINVAL;
3961         }
3962
3963         if (id < 0)
3964                 return id;
3965
3966         sprintf(hdev->name, "hci%d", id);
3967         hdev->id = id;
3968
3969         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3970
3971         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3972                                           WQ_MEM_RECLAIM, 1, hdev->name);
3973         if (!hdev->workqueue) {
3974                 error = -ENOMEM;
3975                 goto err;
3976         }
3977
3978         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3979                                               WQ_MEM_RECLAIM, 1, hdev->name);
3980         if (!hdev->req_workqueue) {
3981                 destroy_workqueue(hdev->workqueue);
3982                 error = -ENOMEM;
3983                 goto err;
3984         }
3985
3986         if (!IS_ERR_OR_NULL(bt_debugfs))
3987                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3988
3989         dev_set_name(&hdev->dev, "%s", hdev->name);
3990
3991         hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3992                                                CRYPTO_ALG_ASYNC);
3993         if (IS_ERR(hdev->tfm_aes)) {
3994                 BT_ERR("Unable to create crypto context");
3995                 error = PTR_ERR(hdev->tfm_aes);
3996                 hdev->tfm_aes = NULL;
3997                 goto err_wqueue;
3998         }
3999
4000         error = device_add(&hdev->dev);
4001         if (error < 0)
4002                 goto err_tfm;
4003
4004         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4005                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4006                                     hdev);
4007         if (hdev->rfkill) {
4008                 if (rfkill_register(hdev->rfkill) < 0) {
4009                         rfkill_destroy(hdev->rfkill);
4010                         hdev->rfkill = NULL;
4011                 }
4012         }
4013
4014         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4015                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4016
4017         set_bit(HCI_SETUP, &hdev->dev_flags);
4018         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4019
4020         if (hdev->dev_type == HCI_BREDR) {
4021                 /* Assume BR/EDR support until proven otherwise (such as
4022                  * through reading supported features during init.
4023                  */
4024                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4025         }
4026
4027         write_lock(&hci_dev_list_lock);
4028         list_add(&hdev->list, &hci_dev_list);
4029         write_unlock(&hci_dev_list_lock);
4030
4031         /* Devices that are marked for raw-only usage are unconfigured
4032          * and should not be included in normal operation.
4033          */
4034         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4035                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4036
4037         hci_notify(hdev, HCI_DEV_REG);
4038         hci_dev_hold(hdev);
4039
4040         queue_work(hdev->req_workqueue, &hdev->power_on);
4041
4042         return id;
4043
4044 err_tfm:
4045         crypto_free_blkcipher(hdev->tfm_aes);
4046 err_wqueue:
4047         destroy_workqueue(hdev->workqueue);
4048         destroy_workqueue(hdev->req_workqueue);
4049 err:
4050         ida_simple_remove(&hci_index_ida, hdev->id);
4051
4052         return error;
4053 }
4054 EXPORT_SYMBOL(hci_register_dev);
4055
4056 /* Unregister HCI device */
4057 void hci_unregister_dev(struct hci_dev *hdev)
4058 {
4059         int i, id;
4060
4061         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4062
4063         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4064
4065         id = hdev->id;
4066
4067         write_lock(&hci_dev_list_lock);
4068         list_del(&hdev->list);
4069         write_unlock(&hci_dev_list_lock);
4070
4071         hci_dev_do_close(hdev);
4072
4073         for (i = 0; i < NUM_REASSEMBLY; i++)
4074                 kfree_skb(hdev->reassembly[i]);
4075
4076         cancel_work_sync(&hdev->power_on);
4077
4078         if (!test_bit(HCI_INIT, &hdev->flags) &&
4079             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4080             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4081                 hci_dev_lock(hdev);
4082                 mgmt_index_removed(hdev);
4083                 hci_dev_unlock(hdev);
4084         }
4085
4086         /* mgmt_index_removed should take care of emptying the
4087          * pending list */
4088         BUG_ON(!list_empty(&hdev->mgmt_pending));
4089
4090         hci_notify(hdev, HCI_DEV_UNREG);
4091
4092         if (hdev->rfkill) {
4093                 rfkill_unregister(hdev->rfkill);
4094                 rfkill_destroy(hdev->rfkill);
4095         }
4096
4097         if (hdev->tfm_aes)
4098                 crypto_free_blkcipher(hdev->tfm_aes);
4099
4100         device_del(&hdev->dev);
4101
4102         debugfs_remove_recursive(hdev->debugfs);
4103
4104         destroy_workqueue(hdev->workqueue);
4105         destroy_workqueue(hdev->req_workqueue);
4106
4107         hci_dev_lock(hdev);
4108         hci_bdaddr_list_clear(&hdev->blacklist);
4109         hci_bdaddr_list_clear(&hdev->whitelist);
4110         hci_uuids_clear(hdev);
4111         hci_link_keys_clear(hdev);
4112         hci_smp_ltks_clear(hdev);
4113         hci_smp_irks_clear(hdev);
4114         hci_remote_oob_data_clear(hdev);
4115         hci_bdaddr_list_clear(&hdev->le_white_list);
4116         hci_conn_params_clear_all(hdev);
4117         hci_dev_unlock(hdev);
4118
4119         hci_dev_put(hdev);
4120
4121         ida_simple_remove(&hci_index_ida, id);
4122 }
4123 EXPORT_SYMBOL(hci_unregister_dev);
4124
4125 /* Suspend HCI device */
4126 int hci_suspend_dev(struct hci_dev *hdev)
4127 {
4128         hci_notify(hdev, HCI_DEV_SUSPEND);
4129         return 0;
4130 }
4131 EXPORT_SYMBOL(hci_suspend_dev);
4132
4133 /* Resume HCI device */
4134 int hci_resume_dev(struct hci_dev *hdev)
4135 {
4136         hci_notify(hdev, HCI_DEV_RESUME);
4137         return 0;
4138 }
4139 EXPORT_SYMBOL(hci_resume_dev);
4140
4141 /* Receive frame from HCI drivers */
4142 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4143 {
4144         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4145                       && !test_bit(HCI_INIT, &hdev->flags))) {
4146                 kfree_skb(skb);
4147                 return -ENXIO;
4148         }
4149
4150         /* Incoming skb */
4151         bt_cb(skb)->incoming = 1;
4152
4153         /* Time stamp */
4154         __net_timestamp(skb);
4155
4156         skb_queue_tail(&hdev->rx_q, skb);
4157         queue_work(hdev->workqueue, &hdev->rx_work);
4158
4159         return 0;
4160 }
4161 EXPORT_SYMBOL(hci_recv_frame);
4162
4163 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4164                           int count, __u8 index)
4165 {
4166         int len = 0;
4167         int hlen = 0;
4168         int remain = count;
4169         struct sk_buff *skb;
4170         struct bt_skb_cb *scb;
4171
4172         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4173             index >= NUM_REASSEMBLY)
4174                 return -EILSEQ;
4175
4176         skb = hdev->reassembly[index];
4177
4178         if (!skb) {
4179                 switch (type) {
4180                 case HCI_ACLDATA_PKT:
4181                         len = HCI_MAX_FRAME_SIZE;
4182                         hlen = HCI_ACL_HDR_SIZE;
4183                         break;
4184                 case HCI_EVENT_PKT:
4185                         len = HCI_MAX_EVENT_SIZE;
4186                         hlen = HCI_EVENT_HDR_SIZE;
4187                         break;
4188                 case HCI_SCODATA_PKT:
4189                         len = HCI_MAX_SCO_SIZE;
4190                         hlen = HCI_SCO_HDR_SIZE;
4191                         break;
4192                 }
4193
4194                 skb = bt_skb_alloc(len, GFP_ATOMIC);
4195                 if (!skb)
4196                         return -ENOMEM;
4197
4198                 scb = (void *) skb->cb;
4199                 scb->expect = hlen;
4200                 scb->pkt_type = type;
4201
4202                 hdev->reassembly[index] = skb;
4203         }
4204
4205         while (count) {
4206                 scb = (void *) skb->cb;
4207                 len = min_t(uint, scb->expect, count);
4208
4209                 memcpy(skb_put(skb, len), data, len);
4210
4211                 count -= len;
4212                 data += len;
4213                 scb->expect -= len;
4214                 remain = count;
4215
4216                 switch (type) {
4217                 case HCI_EVENT_PKT:
4218                         if (skb->len == HCI_EVENT_HDR_SIZE) {
4219                                 struct hci_event_hdr *h = hci_event_hdr(skb);
4220                                 scb->expect = h->plen;
4221
4222                                 if (skb_tailroom(skb) < scb->expect) {
4223                                         kfree_skb(skb);
4224                                         hdev->reassembly[index] = NULL;
4225                                         return -ENOMEM;
4226                                 }
4227                         }
4228                         break;
4229
4230                 case HCI_ACLDATA_PKT:
4231                         if (skb->len  == HCI_ACL_HDR_SIZE) {
4232                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4233                                 scb->expect = __le16_to_cpu(h->dlen);
4234
4235                                 if (skb_tailroom(skb) < scb->expect) {
4236                                         kfree_skb(skb);
4237                                         hdev->reassembly[index] = NULL;
4238                                         return -ENOMEM;
4239                                 }
4240                         }
4241                         break;
4242
4243                 case HCI_SCODATA_PKT:
4244                         if (skb->len == HCI_SCO_HDR_SIZE) {
4245                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4246                                 scb->expect = h->dlen;
4247
4248                                 if (skb_tailroom(skb) < scb->expect) {
4249                                         kfree_skb(skb);
4250                                         hdev->reassembly[index] = NULL;
4251                                         return -ENOMEM;
4252                                 }
4253                         }
4254                         break;
4255                 }
4256
4257                 if (scb->expect == 0) {
4258                         /* Complete frame */
4259
4260                         bt_cb(skb)->pkt_type = type;
4261                         hci_recv_frame(hdev, skb);
4262
4263                         hdev->reassembly[index] = NULL;
4264                         return remain;
4265                 }
4266         }
4267
4268         return remain;
4269 }
4270
4271 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4272 {
4273         int rem = 0;
4274
4275         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4276                 return -EILSEQ;
4277
4278         while (count) {
4279                 rem = hci_reassembly(hdev, type, data, count, type - 1);
4280                 if (rem < 0)
4281                         return rem;
4282
4283                 data += (count - rem);
4284                 count = rem;
4285         }
4286
4287         return rem;
4288 }
4289 EXPORT_SYMBOL(hci_recv_fragment);
4290
4291 #define STREAM_REASSEMBLY 0
4292
4293 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4294 {
4295         int type;
4296         int rem = 0;
4297
4298         while (count) {
4299                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4300
4301                 if (!skb) {
4302                         struct { char type; } *pkt;
4303
4304                         /* Start of the frame */
4305                         pkt = data;
4306                         type = pkt->type;
4307
4308                         data++;
4309                         count--;
4310                 } else
4311                         type = bt_cb(skb)->pkt_type;
4312
4313                 rem = hci_reassembly(hdev, type, data, count,
4314                                      STREAM_REASSEMBLY);
4315                 if (rem < 0)
4316                         return rem;
4317
4318                 data += (count - rem);
4319                 count = rem;
4320         }
4321
4322         return rem;
4323 }
4324 EXPORT_SYMBOL(hci_recv_stream_fragment);
4325
4326 /* ---- Interface to upper protocols ---- */
4327
4328 int hci_register_cb(struct hci_cb *cb)
4329 {
4330         BT_DBG("%p name %s", cb, cb->name);
4331
4332         write_lock(&hci_cb_list_lock);
4333         list_add(&cb->list, &hci_cb_list);
4334         write_unlock(&hci_cb_list_lock);
4335
4336         return 0;
4337 }
4338 EXPORT_SYMBOL(hci_register_cb);
4339
4340 int hci_unregister_cb(struct hci_cb *cb)
4341 {
4342         BT_DBG("%p name %s", cb, cb->name);
4343
4344         write_lock(&hci_cb_list_lock);
4345         list_del(&cb->list);
4346         write_unlock(&hci_cb_list_lock);
4347
4348         return 0;
4349 }
4350 EXPORT_SYMBOL(hci_unregister_cb);
4351
4352 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4353 {
4354         int err;
4355
4356         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4357
4358         /* Time stamp */
4359         __net_timestamp(skb);
4360
4361         /* Send copy to monitor */
4362         hci_send_to_monitor(hdev, skb);
4363
4364         if (atomic_read(&hdev->promisc)) {
4365                 /* Send copy to the sockets */
4366                 hci_send_to_sock(hdev, skb);
4367         }
4368
4369         /* Get rid of skb owner, prior to sending to the driver. */
4370         skb_orphan(skb);
4371
4372         err = hdev->send(hdev, skb);
4373         if (err < 0) {
4374                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4375                 kfree_skb(skb);
4376         }
4377 }
4378
4379 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4380 {
4381         skb_queue_head_init(&req->cmd_q);
4382         req->hdev = hdev;
4383         req->err = 0;
4384 }
4385
4386 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4387 {
4388         struct hci_dev *hdev = req->hdev;
4389         struct sk_buff *skb;
4390         unsigned long flags;
4391
4392         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4393
4394         /* If an error occured during request building, remove all HCI
4395          * commands queued on the HCI request queue.
4396          */
4397         if (req->err) {
4398                 skb_queue_purge(&req->cmd_q);
4399                 return req->err;
4400         }
4401
4402         /* Do not allow empty requests */
4403         if (skb_queue_empty(&req->cmd_q))
4404                 return -ENODATA;
4405
4406         skb = skb_peek_tail(&req->cmd_q);
4407         bt_cb(skb)->req.complete = complete;
4408
4409         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4410         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4411         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4412
4413         queue_work(hdev->workqueue, &hdev->cmd_work);
4414
4415         return 0;
4416 }
4417
4418 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4419                                        u32 plen, const void *param)
4420 {
4421         int len = HCI_COMMAND_HDR_SIZE + plen;
4422         struct hci_command_hdr *hdr;
4423         struct sk_buff *skb;
4424
4425         skb = bt_skb_alloc(len, GFP_ATOMIC);
4426         if (!skb)
4427                 return NULL;
4428
4429         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4430         hdr->opcode = cpu_to_le16(opcode);
4431         hdr->plen   = plen;
4432
4433         if (plen)
4434                 memcpy(skb_put(skb, plen), param, plen);
4435
4436         BT_DBG("skb len %d", skb->len);
4437
4438         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4439
4440         return skb;
4441 }
4442
4443 /* Send HCI command */
4444 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4445                  const void *param)
4446 {
4447         struct sk_buff *skb;
4448
4449         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4450
4451         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4452         if (!skb) {
4453                 BT_ERR("%s no memory for command", hdev->name);
4454                 return -ENOMEM;
4455         }
4456
4457         /* Stand-alone HCI commands must be flaged as
4458          * single-command requests.
4459          */
4460         bt_cb(skb)->req.start = true;
4461
4462         skb_queue_tail(&hdev->cmd_q, skb);
4463         queue_work(hdev->workqueue, &hdev->cmd_work);
4464
4465         return 0;
4466 }
4467
4468 /* Queue a command to an asynchronous HCI request */
4469 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4470                     const void *param, u8 event)
4471 {
4472         struct hci_dev *hdev = req->hdev;
4473         struct sk_buff *skb;
4474
4475         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4476
4477         /* If an error occured during request building, there is no point in
4478          * queueing the HCI command. We can simply return.
4479          */
4480         if (req->err)
4481                 return;
4482
4483         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4484         if (!skb) {
4485                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4486                        hdev->name, opcode);
4487                 req->err = -ENOMEM;
4488                 return;
4489         }
4490
4491         if (skb_queue_empty(&req->cmd_q))
4492                 bt_cb(skb)->req.start = true;
4493
4494         bt_cb(skb)->req.event = event;
4495
4496         skb_queue_tail(&req->cmd_q, skb);
4497 }
4498
4499 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4500                  const void *param)
4501 {
4502         hci_req_add_ev(req, opcode, plen, param, 0);
4503 }
4504
4505 /* Get data from the previously sent command */
4506 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4507 {
4508         struct hci_command_hdr *hdr;
4509
4510         if (!hdev->sent_cmd)
4511                 return NULL;
4512
4513         hdr = (void *) hdev->sent_cmd->data;
4514
4515         if (hdr->opcode != cpu_to_le16(opcode))
4516                 return NULL;
4517
4518         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4519
4520         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4521 }
4522
4523 /* Send ACL data */
4524 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4525 {
4526         struct hci_acl_hdr *hdr;
4527         int len = skb->len;
4528
4529         skb_push(skb, HCI_ACL_HDR_SIZE);
4530         skb_reset_transport_header(skb);
4531         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4532         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4533         hdr->dlen   = cpu_to_le16(len);
4534 }
4535
4536 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4537                           struct sk_buff *skb, __u16 flags)
4538 {
4539         struct hci_conn *conn = chan->conn;
4540         struct hci_dev *hdev = conn->hdev;
4541         struct sk_buff *list;
4542
4543         skb->len = skb_headlen(skb);
4544         skb->data_len = 0;
4545
4546         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4547
4548         switch (hdev->dev_type) {
4549         case HCI_BREDR:
4550                 hci_add_acl_hdr(skb, conn->handle, flags);
4551                 break;
4552         case HCI_AMP:
4553                 hci_add_acl_hdr(skb, chan->handle, flags);
4554                 break;
4555         default:
4556                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4557                 return;
4558         }
4559
4560         list = skb_shinfo(skb)->frag_list;
4561         if (!list) {
4562                 /* Non fragmented */
4563                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4564
4565                 skb_queue_tail(queue, skb);
4566         } else {
4567                 /* Fragmented */
4568                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4569
4570                 skb_shinfo(skb)->frag_list = NULL;
4571
4572                 /* Queue all fragments atomically */
4573                 spin_lock(&queue->lock);
4574
4575                 __skb_queue_tail(queue, skb);
4576
4577                 flags &= ~ACL_START;
4578                 flags |= ACL_CONT;
4579                 do {
4580                         skb = list; list = list->next;
4581
4582                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4583                         hci_add_acl_hdr(skb, conn->handle, flags);
4584
4585                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4586
4587                         __skb_queue_tail(queue, skb);
4588                 } while (list);
4589
4590                 spin_unlock(&queue->lock);
4591         }
4592 }
4593
4594 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4595 {
4596         struct hci_dev *hdev = chan->conn->hdev;
4597
4598         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4599
4600         hci_queue_acl(chan, &chan->data_q, skb, flags);
4601
4602         queue_work(hdev->workqueue, &hdev->tx_work);
4603 }
4604
4605 /* Send SCO data */
4606 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4607 {
4608         struct hci_dev *hdev = conn->hdev;
4609         struct hci_sco_hdr hdr;
4610
4611         BT_DBG("%s len %d", hdev->name, skb->len);
4612
4613         hdr.handle = cpu_to_le16(conn->handle);
4614         hdr.dlen   = skb->len;
4615
4616         skb_push(skb, HCI_SCO_HDR_SIZE);
4617         skb_reset_transport_header(skb);
4618         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4619
4620         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4621
4622         skb_queue_tail(&conn->data_q, skb);
4623         queue_work(hdev->workqueue, &hdev->tx_work);
4624 }
4625
4626 /* ---- HCI TX task (outgoing data) ---- */
4627
4628 /* HCI Connection scheduler */
4629 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4630                                      int *quote)
4631 {
4632         struct hci_conn_hash *h = &hdev->conn_hash;
4633         struct hci_conn *conn = NULL, *c;
4634         unsigned int num = 0, min = ~0;
4635
4636         /* We don't have to lock device here. Connections are always
4637          * added and removed with TX task disabled. */
4638
4639         rcu_read_lock();
4640
4641         list_for_each_entry_rcu(c, &h->list, list) {
4642                 if (c->type != type || skb_queue_empty(&c->data_q))
4643                         continue;
4644
4645                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4646                         continue;
4647
4648                 num++;
4649
4650                 if (c->sent < min) {
4651                         min  = c->sent;
4652                         conn = c;
4653                 }
4654
4655                 if (hci_conn_num(hdev, type) == num)
4656                         break;
4657         }
4658
4659         rcu_read_unlock();
4660
4661         if (conn) {
4662                 int cnt, q;
4663
4664                 switch (conn->type) {
4665                 case ACL_LINK:
4666                         cnt = hdev->acl_cnt;
4667                         break;
4668                 case SCO_LINK:
4669                 case ESCO_LINK:
4670                         cnt = hdev->sco_cnt;
4671                         break;
4672                 case LE_LINK:
4673                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4674                         break;
4675                 default:
4676                         cnt = 0;
4677                         BT_ERR("Unknown link type");
4678                 }
4679
4680                 q = cnt / num;
4681                 *quote = q ? q : 1;
4682         } else
4683                 *quote = 0;
4684
4685         BT_DBG("conn %p quote %d", conn, *quote);
4686         return conn;
4687 }
4688
4689 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4690 {
4691         struct hci_conn_hash *h = &hdev->conn_hash;
4692         struct hci_conn *c;
4693
4694         BT_ERR("%s link tx timeout", hdev->name);
4695
4696         rcu_read_lock();
4697
4698         /* Kill stalled connections */
4699         list_for_each_entry_rcu(c, &h->list, list) {
4700                 if (c->type == type && c->sent) {
4701                         BT_ERR("%s killing stalled connection %pMR",
4702                                hdev->name, &c->dst);
4703                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4704                 }
4705         }
4706
4707         rcu_read_unlock();
4708 }
4709
4710 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4711                                       int *quote)
4712 {
4713         struct hci_conn_hash *h = &hdev->conn_hash;
4714         struct hci_chan *chan = NULL;
4715         unsigned int num = 0, min = ~0, cur_prio = 0;
4716         struct hci_conn *conn;
4717         int cnt, q, conn_num = 0;
4718
4719         BT_DBG("%s", hdev->name);
4720
4721         rcu_read_lock();
4722
4723         list_for_each_entry_rcu(conn, &h->list, list) {
4724                 struct hci_chan *tmp;
4725
4726                 if (conn->type != type)
4727                         continue;
4728
4729                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4730                         continue;
4731
4732                 conn_num++;
4733
4734                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4735                         struct sk_buff *skb;
4736
4737                         if (skb_queue_empty(&tmp->data_q))
4738                                 continue;
4739
4740                         skb = skb_peek(&tmp->data_q);
4741                         if (skb->priority < cur_prio)
4742                                 continue;
4743
4744                         if (skb->priority > cur_prio) {
4745                                 num = 0;
4746                                 min = ~0;
4747                                 cur_prio = skb->priority;
4748                         }
4749
4750                         num++;
4751
4752                         if (conn->sent < min) {
4753                                 min  = conn->sent;
4754                                 chan = tmp;
4755                         }
4756                 }
4757
4758                 if (hci_conn_num(hdev, type) == conn_num)
4759                         break;
4760         }
4761
4762         rcu_read_unlock();
4763
4764         if (!chan)
4765                 return NULL;
4766
4767         switch (chan->conn->type) {
4768         case ACL_LINK:
4769                 cnt = hdev->acl_cnt;
4770                 break;
4771         case AMP_LINK:
4772                 cnt = hdev->block_cnt;
4773                 break;
4774         case SCO_LINK:
4775         case ESCO_LINK:
4776                 cnt = hdev->sco_cnt;
4777                 break;
4778         case LE_LINK:
4779                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4780                 break;
4781         default:
4782                 cnt = 0;
4783                 BT_ERR("Unknown link type");
4784         }
4785
4786         q = cnt / num;
4787         *quote = q ? q : 1;
4788         BT_DBG("chan %p quote %d", chan, *quote);
4789         return chan;
4790 }
4791
4792 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4793 {
4794         struct hci_conn_hash *h = &hdev->conn_hash;
4795         struct hci_conn *conn;
4796         int num = 0;
4797
4798         BT_DBG("%s", hdev->name);
4799
4800         rcu_read_lock();
4801
4802         list_for_each_entry_rcu(conn, &h->list, list) {
4803                 struct hci_chan *chan;
4804
4805                 if (conn->type != type)
4806                         continue;
4807
4808                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4809                         continue;
4810
4811                 num++;
4812
4813                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4814                         struct sk_buff *skb;
4815
4816                         if (chan->sent) {
4817                                 chan->sent = 0;
4818                                 continue;
4819                         }
4820
4821                         if (skb_queue_empty(&chan->data_q))
4822                                 continue;
4823
4824                         skb = skb_peek(&chan->data_q);
4825                         if (skb->priority >= HCI_PRIO_MAX - 1)
4826                                 continue;
4827
4828                         skb->priority = HCI_PRIO_MAX - 1;
4829
4830                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4831                                skb->priority);
4832                 }
4833
4834                 if (hci_conn_num(hdev, type) == num)
4835                         break;
4836         }
4837
4838         rcu_read_unlock();
4839
4840 }
4841
4842 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4843 {
4844         /* Calculate count of blocks used by this packet */
4845         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4846 }
4847
4848 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4849 {
4850         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4851                 /* ACL tx timeout must be longer than maximum
4852                  * link supervision timeout (40.9 seconds) */
4853                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4854                                        HCI_ACL_TX_TIMEOUT))
4855                         hci_link_tx_to(hdev, ACL_LINK);
4856         }
4857 }
4858
4859 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4860 {
4861         unsigned int cnt = hdev->acl_cnt;
4862         struct hci_chan *chan;
4863         struct sk_buff *skb;
4864         int quote;
4865
4866         __check_timeout(hdev, cnt);
4867
4868         while (hdev->acl_cnt &&
4869                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4870                 u32 priority = (skb_peek(&chan->data_q))->priority;
4871                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4872                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4873                                skb->len, skb->priority);
4874
4875                         /* Stop if priority has changed */
4876                         if (skb->priority < priority)
4877                                 break;
4878
4879                         skb = skb_dequeue(&chan->data_q);
4880
4881                         hci_conn_enter_active_mode(chan->conn,
4882                                                    bt_cb(skb)->force_active);
4883
4884                         hci_send_frame(hdev, skb);
4885                         hdev->acl_last_tx = jiffies;
4886
4887                         hdev->acl_cnt--;
4888                         chan->sent++;
4889                         chan->conn->sent++;
4890                 }
4891         }
4892
4893         if (cnt != hdev->acl_cnt)
4894                 hci_prio_recalculate(hdev, ACL_LINK);
4895 }
4896
4897 static void hci_sched_acl_blk(struct hci_dev *hdev)
4898 {
4899         unsigned int cnt = hdev->block_cnt;
4900         struct hci_chan *chan;
4901         struct sk_buff *skb;
4902         int quote;
4903         u8 type;
4904
4905         __check_timeout(hdev, cnt);
4906
4907         BT_DBG("%s", hdev->name);
4908
4909         if (hdev->dev_type == HCI_AMP)
4910                 type = AMP_LINK;
4911         else
4912                 type = ACL_LINK;
4913
4914         while (hdev->block_cnt > 0 &&
4915                (chan = hci_chan_sent(hdev, type, &quote))) {
4916                 u32 priority = (skb_peek(&chan->data_q))->priority;
4917                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4918                         int blocks;
4919
4920                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4921                                skb->len, skb->priority);
4922
4923                         /* Stop if priority has changed */
4924                         if (skb->priority < priority)
4925                                 break;
4926
4927                         skb = skb_dequeue(&chan->data_q);
4928
4929                         blocks = __get_blocks(hdev, skb);
4930                         if (blocks > hdev->block_cnt)
4931                                 return;
4932
4933                         hci_conn_enter_active_mode(chan->conn,
4934                                                    bt_cb(skb)->force_active);
4935
4936                         hci_send_frame(hdev, skb);
4937                         hdev->acl_last_tx = jiffies;
4938
4939                         hdev->block_cnt -= blocks;
4940                         quote -= blocks;
4941
4942                         chan->sent += blocks;
4943                         chan->conn->sent += blocks;
4944                 }
4945         }
4946
4947         if (cnt != hdev->block_cnt)
4948                 hci_prio_recalculate(hdev, type);
4949 }
4950
4951 static void hci_sched_acl(struct hci_dev *hdev)
4952 {
4953         BT_DBG("%s", hdev->name);
4954
4955         /* No ACL link over BR/EDR controller */
4956         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4957                 return;
4958
4959         /* No AMP link over AMP controller */
4960         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4961                 return;
4962
4963         switch (hdev->flow_ctl_mode) {
4964         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4965                 hci_sched_acl_pkt(hdev);
4966                 break;
4967
4968         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4969                 hci_sched_acl_blk(hdev);
4970                 break;
4971         }
4972 }
4973
4974 /* Schedule SCO */
4975 static void hci_sched_sco(struct hci_dev *hdev)
4976 {
4977         struct hci_conn *conn;
4978         struct sk_buff *skb;
4979         int quote;
4980
4981         BT_DBG("%s", hdev->name);
4982
4983         if (!hci_conn_num(hdev, SCO_LINK))
4984                 return;
4985
4986         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4987                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4988                         BT_DBG("skb %p len %d", skb, skb->len);
4989                         hci_send_frame(hdev, skb);
4990
4991                         conn->sent++;
4992                         if (conn->sent == ~0)
4993                                 conn->sent = 0;
4994                 }
4995         }
4996 }
4997
4998 static void hci_sched_esco(struct hci_dev *hdev)
4999 {
5000         struct hci_conn *conn;
5001         struct sk_buff *skb;
5002         int quote;
5003
5004         BT_DBG("%s", hdev->name);
5005
5006         if (!hci_conn_num(hdev, ESCO_LINK))
5007                 return;
5008
5009         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5010                                                      &quote))) {
5011                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5012                         BT_DBG("skb %p len %d", skb, skb->len);
5013                         hci_send_frame(hdev, skb);
5014
5015                         conn->sent++;
5016                         if (conn->sent == ~0)
5017                                 conn->sent = 0;
5018                 }
5019         }
5020 }
5021
5022 static void hci_sched_le(struct hci_dev *hdev)
5023 {
5024         struct hci_chan *chan;
5025         struct sk_buff *skb;
5026         int quote, cnt, tmp;
5027
5028         BT_DBG("%s", hdev->name);
5029
5030         if (!hci_conn_num(hdev, LE_LINK))
5031                 return;
5032
5033         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5034                 /* LE tx timeout must be longer than maximum
5035                  * link supervision timeout (40.9 seconds) */
5036                 if (!hdev->le_cnt && hdev->le_pkts &&
5037                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
5038                         hci_link_tx_to(hdev, LE_LINK);
5039         }
5040
5041         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5042         tmp = cnt;
5043         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
5044                 u32 priority = (skb_peek(&chan->data_q))->priority;
5045                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5046                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5047                                skb->len, skb->priority);
5048
5049                         /* Stop if priority has changed */
5050                         if (skb->priority < priority)
5051                                 break;
5052
5053                         skb = skb_dequeue(&chan->data_q);
5054
5055                         hci_send_frame(hdev, skb);
5056                         hdev->le_last_tx = jiffies;
5057
5058                         cnt--;
5059                         chan->sent++;
5060                         chan->conn->sent++;
5061                 }
5062         }
5063
5064         if (hdev->le_pkts)
5065                 hdev->le_cnt = cnt;
5066         else
5067                 hdev->acl_cnt = cnt;
5068
5069         if (cnt != tmp)
5070                 hci_prio_recalculate(hdev, LE_LINK);
5071 }
5072
5073 static void hci_tx_work(struct work_struct *work)
5074 {
5075         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5076         struct sk_buff *skb;
5077
5078         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5079                hdev->sco_cnt, hdev->le_cnt);
5080
5081         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5082                 /* Schedule queues and send stuff to HCI driver */
5083                 hci_sched_acl(hdev);
5084                 hci_sched_sco(hdev);
5085                 hci_sched_esco(hdev);
5086                 hci_sched_le(hdev);
5087         }
5088
5089         /* Send next queued raw (unknown type) packet */
5090         while ((skb = skb_dequeue(&hdev->raw_q)))
5091                 hci_send_frame(hdev, skb);
5092 }
5093
5094 /* ----- HCI RX task (incoming data processing) ----- */
5095
5096 /* ACL data packet */
5097 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5098 {
5099         struct hci_acl_hdr *hdr = (void *) skb->data;
5100         struct hci_conn *conn;
5101         __u16 handle, flags;
5102
5103         skb_pull(skb, HCI_ACL_HDR_SIZE);
5104
5105         handle = __le16_to_cpu(hdr->handle);
5106         flags  = hci_flags(handle);
5107         handle = hci_handle(handle);
5108
5109         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5110                handle, flags);
5111
5112         hdev->stat.acl_rx++;
5113
5114         hci_dev_lock(hdev);
5115         conn = hci_conn_hash_lookup_handle(hdev, handle);
5116         hci_dev_unlock(hdev);
5117
5118         if (conn) {
5119                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5120
5121                 /* Send to upper protocol */
5122                 l2cap_recv_acldata(conn, skb, flags);
5123                 return;
5124         } else {
5125                 BT_ERR("%s ACL packet for unknown connection handle %d",
5126                        hdev->name, handle);
5127         }
5128
5129         kfree_skb(skb);
5130 }
5131
5132 /* SCO data packet */
5133 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5134 {
5135         struct hci_sco_hdr *hdr = (void *) skb->data;
5136         struct hci_conn *conn;
5137         __u16 handle;
5138
5139         skb_pull(skb, HCI_SCO_HDR_SIZE);
5140
5141         handle = __le16_to_cpu(hdr->handle);
5142
5143         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5144
5145         hdev->stat.sco_rx++;
5146
5147         hci_dev_lock(hdev);
5148         conn = hci_conn_hash_lookup_handle(hdev, handle);
5149         hci_dev_unlock(hdev);
5150
5151         if (conn) {
5152                 /* Send to upper protocol */
5153                 sco_recv_scodata(conn, skb);
5154                 return;
5155         } else {
5156                 BT_ERR("%s SCO packet for unknown connection handle %d",
5157                        hdev->name, handle);
5158         }
5159
5160         kfree_skb(skb);
5161 }
5162
5163 static bool hci_req_is_complete(struct hci_dev *hdev)
5164 {
5165         struct sk_buff *skb;
5166
5167         skb = skb_peek(&hdev->cmd_q);
5168         if (!skb)
5169                 return true;
5170
5171         return bt_cb(skb)->req.start;
5172 }
5173
5174 static void hci_resend_last(struct hci_dev *hdev)
5175 {
5176         struct hci_command_hdr *sent;
5177         struct sk_buff *skb;
5178         u16 opcode;
5179
5180         if (!hdev->sent_cmd)
5181                 return;
5182
5183         sent = (void *) hdev->sent_cmd->data;
5184         opcode = __le16_to_cpu(sent->opcode);
5185         if (opcode == HCI_OP_RESET)
5186                 return;
5187
5188         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5189         if (!skb)
5190                 return;
5191
5192         skb_queue_head(&hdev->cmd_q, skb);
5193         queue_work(hdev->workqueue, &hdev->cmd_work);
5194 }
5195
5196 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5197 {
5198         hci_req_complete_t req_complete = NULL;
5199         struct sk_buff *skb;
5200         unsigned long flags;
5201
5202         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5203
5204         /* If the completed command doesn't match the last one that was
5205          * sent we need to do special handling of it.
5206          */
5207         if (!hci_sent_cmd_data(hdev, opcode)) {
5208                 /* Some CSR based controllers generate a spontaneous
5209                  * reset complete event during init and any pending
5210                  * command will never be completed. In such a case we
5211                  * need to resend whatever was the last sent
5212                  * command.
5213                  */
5214                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5215                         hci_resend_last(hdev);
5216
5217                 return;
5218         }
5219
5220         /* If the command succeeded and there's still more commands in
5221          * this request the request is not yet complete.
5222          */
5223         if (!status && !hci_req_is_complete(hdev))
5224                 return;
5225
5226         /* If this was the last command in a request the complete
5227          * callback would be found in hdev->sent_cmd instead of the
5228          * command queue (hdev->cmd_q).
5229          */
5230         if (hdev->sent_cmd) {
5231                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5232
5233                 if (req_complete) {
5234                         /* We must set the complete callback to NULL to
5235                          * avoid calling the callback more than once if
5236                          * this function gets called again.
5237                          */
5238                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
5239
5240                         goto call_complete;
5241                 }
5242         }
5243
5244         /* Remove all pending commands belonging to this request */
5245         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5246         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5247                 if (bt_cb(skb)->req.start) {
5248                         __skb_queue_head(&hdev->cmd_q, skb);
5249                         break;
5250                 }
5251
5252                 req_complete = bt_cb(skb)->req.complete;
5253                 kfree_skb(skb);
5254         }
5255         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5256
5257 call_complete:
5258         if (req_complete)
5259                 req_complete(hdev, status);
5260 }
5261
5262 static void hci_rx_work(struct work_struct *work)
5263 {
5264         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5265         struct sk_buff *skb;
5266
5267         BT_DBG("%s", hdev->name);
5268
5269         while ((skb = skb_dequeue(&hdev->rx_q))) {
5270                 /* Send copy to monitor */
5271                 hci_send_to_monitor(hdev, skb);
5272
5273                 if (atomic_read(&hdev->promisc)) {
5274                         /* Send copy to the sockets */
5275                         hci_send_to_sock(hdev, skb);
5276                 }
5277
5278                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5279                         kfree_skb(skb);
5280                         continue;
5281                 }
5282
5283                 if (test_bit(HCI_INIT, &hdev->flags)) {
5284                         /* Don't process data packets in this states. */
5285                         switch (bt_cb(skb)->pkt_type) {
5286                         case HCI_ACLDATA_PKT:
5287                         case HCI_SCODATA_PKT:
5288                                 kfree_skb(skb);
5289                                 continue;
5290                         }
5291                 }
5292
5293                 /* Process frame */
5294                 switch (bt_cb(skb)->pkt_type) {
5295                 case HCI_EVENT_PKT:
5296                         BT_DBG("%s Event packet", hdev->name);
5297                         hci_event_packet(hdev, skb);
5298                         break;
5299
5300                 case HCI_ACLDATA_PKT:
5301                         BT_DBG("%s ACL data packet", hdev->name);
5302                         hci_acldata_packet(hdev, skb);
5303                         break;
5304
5305                 case HCI_SCODATA_PKT:
5306                         BT_DBG("%s SCO data packet", hdev->name);
5307                         hci_scodata_packet(hdev, skb);
5308                         break;
5309
5310                 default:
5311                         kfree_skb(skb);
5312                         break;
5313                 }
5314         }
5315 }
5316
5317 static void hci_cmd_work(struct work_struct *work)
5318 {
5319         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5320         struct sk_buff *skb;
5321
5322         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5323                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5324
5325         /* Send queued commands */
5326         if (atomic_read(&hdev->cmd_cnt)) {
5327                 skb = skb_dequeue(&hdev->cmd_q);
5328                 if (!skb)
5329                         return;
5330
5331                 kfree_skb(hdev->sent_cmd);
5332
5333                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5334                 if (hdev->sent_cmd) {
5335                         atomic_dec(&hdev->cmd_cnt);
5336                         hci_send_frame(hdev, skb);
5337                         if (test_bit(HCI_RESET, &hdev->flags))
5338                                 cancel_delayed_work(&hdev->cmd_timer);
5339                         else
5340                                 schedule_delayed_work(&hdev->cmd_timer,
5341                                                       HCI_CMD_TIMEOUT);
5342                 } else {
5343                         skb_queue_head(&hdev->cmd_q, skb);
5344                         queue_work(hdev->workqueue, &hdev->cmd_work);
5345                 }
5346         }
5347 }
5348
5349 void hci_req_add_le_scan_disable(struct hci_request *req)
5350 {
5351         struct hci_cp_le_set_scan_enable cp;
5352
5353         memset(&cp, 0, sizeof(cp));
5354         cp.enable = LE_SCAN_DISABLE;
5355         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5356 }
5357
5358 void hci_req_add_le_passive_scan(struct hci_request *req)
5359 {
5360         struct hci_cp_le_set_scan_param param_cp;
5361         struct hci_cp_le_set_scan_enable enable_cp;
5362         struct hci_dev *hdev = req->hdev;
5363         u8 own_addr_type;
5364
5365         /* Set require_privacy to false since no SCAN_REQ are send
5366          * during passive scanning. Not using an unresolvable address
5367          * here is important so that peer devices using direct
5368          * advertising with our address will be correctly reported
5369          * by the controller.
5370          */
5371         if (hci_update_random_address(req, false, &own_addr_type))
5372                 return;
5373
5374         memset(&param_cp, 0, sizeof(param_cp));
5375         param_cp.type = LE_SCAN_PASSIVE;
5376         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5377         param_cp.window = cpu_to_le16(hdev->le_scan_window);
5378         param_cp.own_address_type = own_addr_type;
5379         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5380                     &param_cp);
5381
5382         memset(&enable_cp, 0, sizeof(enable_cp));
5383         enable_cp.enable = LE_SCAN_ENABLE;
5384         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5385         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5386                     &enable_cp);
5387 }
5388
5389 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5390 {
5391         if (status)
5392                 BT_DBG("HCI request failed to update background scanning: "
5393                        "status 0x%2.2x", status);
5394 }
5395
5396 /* This function controls the background scanning based on hdev->pend_le_conns
5397  * list. If there are pending LE connection we start the background scanning,
5398  * otherwise we stop it.
5399  *
5400  * This function requires the caller holds hdev->lock.
5401  */
5402 void hci_update_background_scan(struct hci_dev *hdev)
5403 {
5404         struct hci_request req;
5405         struct hci_conn *conn;
5406         int err;
5407
5408         if (!test_bit(HCI_UP, &hdev->flags) ||
5409             test_bit(HCI_INIT, &hdev->flags) ||
5410             test_bit(HCI_SETUP, &hdev->dev_flags) ||
5411             test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5412             test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5413             test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5414                 return;
5415
5416         /* No point in doing scanning if LE support hasn't been enabled */
5417         if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5418                 return;
5419
5420         /* If discovery is active don't interfere with it */
5421         if (hdev->discovery.state != DISCOVERY_STOPPED)
5422                 return;
5423
5424         hci_req_init(&req, hdev);
5425
5426         if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
5427             list_empty(&hdev->pend_le_conns) &&
5428             list_empty(&hdev->pend_le_reports)) {
5429                 /* If there is no pending LE connections or devices
5430                  * to be scanned for, we should stop the background
5431                  * scanning.
5432                  */
5433
5434                 /* If controller is not scanning we are done. */
5435                 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5436                         return;
5437
5438                 hci_req_add_le_scan_disable(&req);
5439
5440                 BT_DBG("%s stopping background scanning", hdev->name);
5441         } else {
5442                 /* If there is at least one pending LE connection, we should
5443                  * keep the background scan running.
5444                  */
5445
5446                 /* If controller is connecting, we should not start scanning
5447                  * since some controllers are not able to scan and connect at
5448                  * the same time.
5449                  */
5450                 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5451                 if (conn)
5452                         return;
5453
5454                 /* If controller is currently scanning, we stop it to ensure we
5455                  * don't miss any advertising (due to duplicates filter).
5456                  */
5457                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5458                         hci_req_add_le_scan_disable(&req);
5459
5460                 hci_req_add_le_passive_scan(&req);
5461
5462                 BT_DBG("%s starting background scanning", hdev->name);
5463         }
5464
5465         err = hci_req_run(&req, update_background_scan_complete);
5466         if (err)
5467                 BT_ERR("Failed to run HCI request: err %d", err);
5468 }