Bluetooth: Mark controller is down when HCI_AUTO_OFF is set
[cascardo/linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "smp.h"
41
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
45
46 /* HCI device list */
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
49
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
53
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
56
57 /* ---- HCI notifications ---- */
58
59 static void hci_notify(struct hci_dev *hdev, int event)
60 {
61         hci_sock_dev_event(hdev, event);
62 }
63
64 /* ---- HCI debugfs entries ---- */
65
66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67                              size_t count, loff_t *ppos)
68 {
69         struct hci_dev *hdev = file->private_data;
70         char buf[3];
71
72         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
73         buf[1] = '\n';
74         buf[2] = '\0';
75         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76 }
77
78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79                               size_t count, loff_t *ppos)
80 {
81         struct hci_dev *hdev = file->private_data;
82         struct sk_buff *skb;
83         char buf[32];
84         size_t buf_size = min(count, (sizeof(buf)-1));
85         bool enable;
86         int err;
87
88         if (!test_bit(HCI_UP, &hdev->flags))
89                 return -ENETDOWN;
90
91         if (copy_from_user(buf, user_buf, buf_size))
92                 return -EFAULT;
93
94         buf[buf_size] = '\0';
95         if (strtobool(buf, &enable))
96                 return -EINVAL;
97
98         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
99                 return -EALREADY;
100
101         hci_req_lock(hdev);
102         if (enable)
103                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
104                                      HCI_CMD_TIMEOUT);
105         else
106                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
107                                      HCI_CMD_TIMEOUT);
108         hci_req_unlock(hdev);
109
110         if (IS_ERR(skb))
111                 return PTR_ERR(skb);
112
113         err = -bt_to_errno(skb->data[0]);
114         kfree_skb(skb);
115
116         if (err < 0)
117                 return err;
118
119         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
120
121         return count;
122 }
123
124 static const struct file_operations dut_mode_fops = {
125         .open           = simple_open,
126         .read           = dut_mode_read,
127         .write          = dut_mode_write,
128         .llseek         = default_llseek,
129 };
130
131 static int features_show(struct seq_file *f, void *ptr)
132 {
133         struct hci_dev *hdev = f->private;
134         u8 p;
135
136         hci_dev_lock(hdev);
137         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
138                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
139                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140                            hdev->features[p][0], hdev->features[p][1],
141                            hdev->features[p][2], hdev->features[p][3],
142                            hdev->features[p][4], hdev->features[p][5],
143                            hdev->features[p][6], hdev->features[p][7]);
144         }
145         if (lmp_le_capable(hdev))
146                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148                            hdev->le_features[0], hdev->le_features[1],
149                            hdev->le_features[2], hdev->le_features[3],
150                            hdev->le_features[4], hdev->le_features[5],
151                            hdev->le_features[6], hdev->le_features[7]);
152         hci_dev_unlock(hdev);
153
154         return 0;
155 }
156
157 static int features_open(struct inode *inode, struct file *file)
158 {
159         return single_open(file, features_show, inode->i_private);
160 }
161
162 static const struct file_operations features_fops = {
163         .open           = features_open,
164         .read           = seq_read,
165         .llseek         = seq_lseek,
166         .release        = single_release,
167 };
168
169 static int blacklist_show(struct seq_file *f, void *p)
170 {
171         struct hci_dev *hdev = f->private;
172         struct bdaddr_list *b;
173
174         hci_dev_lock(hdev);
175         list_for_each_entry(b, &hdev->blacklist, list)
176                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
177         hci_dev_unlock(hdev);
178
179         return 0;
180 }
181
182 static int blacklist_open(struct inode *inode, struct file *file)
183 {
184         return single_open(file, blacklist_show, inode->i_private);
185 }
186
187 static const struct file_operations blacklist_fops = {
188         .open           = blacklist_open,
189         .read           = seq_read,
190         .llseek         = seq_lseek,
191         .release        = single_release,
192 };
193
194 static int whitelist_show(struct seq_file *f, void *p)
195 {
196         struct hci_dev *hdev = f->private;
197         struct bdaddr_list *b;
198
199         hci_dev_lock(hdev);
200         list_for_each_entry(b, &hdev->whitelist, list)
201                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
202         hci_dev_unlock(hdev);
203
204         return 0;
205 }
206
207 static int whitelist_open(struct inode *inode, struct file *file)
208 {
209         return single_open(file, whitelist_show, inode->i_private);
210 }
211
212 static const struct file_operations whitelist_fops = {
213         .open           = whitelist_open,
214         .read           = seq_read,
215         .llseek         = seq_lseek,
216         .release        = single_release,
217 };
218
219 static int uuids_show(struct seq_file *f, void *p)
220 {
221         struct hci_dev *hdev = f->private;
222         struct bt_uuid *uuid;
223
224         hci_dev_lock(hdev);
225         list_for_each_entry(uuid, &hdev->uuids, list) {
226                 u8 i, val[16];
227
228                 /* The Bluetooth UUID values are stored in big endian,
229                  * but with reversed byte order. So convert them into
230                  * the right order for the %pUb modifier.
231                  */
232                 for (i = 0; i < 16; i++)
233                         val[i] = uuid->uuid[15 - i];
234
235                 seq_printf(f, "%pUb\n", val);
236         }
237         hci_dev_unlock(hdev);
238
239         return 0;
240 }
241
242 static int uuids_open(struct inode *inode, struct file *file)
243 {
244         return single_open(file, uuids_show, inode->i_private);
245 }
246
247 static const struct file_operations uuids_fops = {
248         .open           = uuids_open,
249         .read           = seq_read,
250         .llseek         = seq_lseek,
251         .release        = single_release,
252 };
253
254 static int inquiry_cache_show(struct seq_file *f, void *p)
255 {
256         struct hci_dev *hdev = f->private;
257         struct discovery_state *cache = &hdev->discovery;
258         struct inquiry_entry *e;
259
260         hci_dev_lock(hdev);
261
262         list_for_each_entry(e, &cache->all, all) {
263                 struct inquiry_data *data = &e->data;
264                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
265                            &data->bdaddr,
266                            data->pscan_rep_mode, data->pscan_period_mode,
267                            data->pscan_mode, data->dev_class[2],
268                            data->dev_class[1], data->dev_class[0],
269                            __le16_to_cpu(data->clock_offset),
270                            data->rssi, data->ssp_mode, e->timestamp);
271         }
272
273         hci_dev_unlock(hdev);
274
275         return 0;
276 }
277
278 static int inquiry_cache_open(struct inode *inode, struct file *file)
279 {
280         return single_open(file, inquiry_cache_show, inode->i_private);
281 }
282
283 static const struct file_operations inquiry_cache_fops = {
284         .open           = inquiry_cache_open,
285         .read           = seq_read,
286         .llseek         = seq_lseek,
287         .release        = single_release,
288 };
289
290 static int link_keys_show(struct seq_file *f, void *ptr)
291 {
292         struct hci_dev *hdev = f->private;
293         struct list_head *p, *n;
294
295         hci_dev_lock(hdev);
296         list_for_each_safe(p, n, &hdev->link_keys) {
297                 struct link_key *key = list_entry(p, struct link_key, list);
298                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
299                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
300         }
301         hci_dev_unlock(hdev);
302
303         return 0;
304 }
305
306 static int link_keys_open(struct inode *inode, struct file *file)
307 {
308         return single_open(file, link_keys_show, inode->i_private);
309 }
310
311 static const struct file_operations link_keys_fops = {
312         .open           = link_keys_open,
313         .read           = seq_read,
314         .llseek         = seq_lseek,
315         .release        = single_release,
316 };
317
318 static int dev_class_show(struct seq_file *f, void *ptr)
319 {
320         struct hci_dev *hdev = f->private;
321
322         hci_dev_lock(hdev);
323         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
324                    hdev->dev_class[1], hdev->dev_class[0]);
325         hci_dev_unlock(hdev);
326
327         return 0;
328 }
329
330 static int dev_class_open(struct inode *inode, struct file *file)
331 {
332         return single_open(file, dev_class_show, inode->i_private);
333 }
334
335 static const struct file_operations dev_class_fops = {
336         .open           = dev_class_open,
337         .read           = seq_read,
338         .llseek         = seq_lseek,
339         .release        = single_release,
340 };
341
342 static int voice_setting_get(void *data, u64 *val)
343 {
344         struct hci_dev *hdev = data;
345
346         hci_dev_lock(hdev);
347         *val = hdev->voice_setting;
348         hci_dev_unlock(hdev);
349
350         return 0;
351 }
352
353 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
354                         NULL, "0x%4.4llx\n");
355
356 static int auto_accept_delay_set(void *data, u64 val)
357 {
358         struct hci_dev *hdev = data;
359
360         hci_dev_lock(hdev);
361         hdev->auto_accept_delay = val;
362         hci_dev_unlock(hdev);
363
364         return 0;
365 }
366
367 static int auto_accept_delay_get(void *data, u64 *val)
368 {
369         struct hci_dev *hdev = data;
370
371         hci_dev_lock(hdev);
372         *val = hdev->auto_accept_delay;
373         hci_dev_unlock(hdev);
374
375         return 0;
376 }
377
378 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
379                         auto_accept_delay_set, "%llu\n");
380
381 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
382                                      size_t count, loff_t *ppos)
383 {
384         struct hci_dev *hdev = file->private_data;
385         char buf[3];
386
387         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
388         buf[1] = '\n';
389         buf[2] = '\0';
390         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
391 }
392
393 static ssize_t force_sc_support_write(struct file *file,
394                                       const char __user *user_buf,
395                                       size_t count, loff_t *ppos)
396 {
397         struct hci_dev *hdev = file->private_data;
398         char buf[32];
399         size_t buf_size = min(count, (sizeof(buf)-1));
400         bool enable;
401
402         if (test_bit(HCI_UP, &hdev->flags))
403                 return -EBUSY;
404
405         if (copy_from_user(buf, user_buf, buf_size))
406                 return -EFAULT;
407
408         buf[buf_size] = '\0';
409         if (strtobool(buf, &enable))
410                 return -EINVAL;
411
412         if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
413                 return -EALREADY;
414
415         change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
416
417         return count;
418 }
419
420 static const struct file_operations force_sc_support_fops = {
421         .open           = simple_open,
422         .read           = force_sc_support_read,
423         .write          = force_sc_support_write,
424         .llseek         = default_llseek,
425 };
426
427 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
428                                  size_t count, loff_t *ppos)
429 {
430         struct hci_dev *hdev = file->private_data;
431         char buf[3];
432
433         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
434         buf[1] = '\n';
435         buf[2] = '\0';
436         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
437 }
438
439 static const struct file_operations sc_only_mode_fops = {
440         .open           = simple_open,
441         .read           = sc_only_mode_read,
442         .llseek         = default_llseek,
443 };
444
445 static int idle_timeout_set(void *data, u64 val)
446 {
447         struct hci_dev *hdev = data;
448
449         if (val != 0 && (val < 500 || val > 3600000))
450                 return -EINVAL;
451
452         hci_dev_lock(hdev);
453         hdev->idle_timeout = val;
454         hci_dev_unlock(hdev);
455
456         return 0;
457 }
458
459 static int idle_timeout_get(void *data, u64 *val)
460 {
461         struct hci_dev *hdev = data;
462
463         hci_dev_lock(hdev);
464         *val = hdev->idle_timeout;
465         hci_dev_unlock(hdev);
466
467         return 0;
468 }
469
470 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
471                         idle_timeout_set, "%llu\n");
472
473 static int rpa_timeout_set(void *data, u64 val)
474 {
475         struct hci_dev *hdev = data;
476
477         /* Require the RPA timeout to be at least 30 seconds and at most
478          * 24 hours.
479          */
480         if (val < 30 || val > (60 * 60 * 24))
481                 return -EINVAL;
482
483         hci_dev_lock(hdev);
484         hdev->rpa_timeout = val;
485         hci_dev_unlock(hdev);
486
487         return 0;
488 }
489
490 static int rpa_timeout_get(void *data, u64 *val)
491 {
492         struct hci_dev *hdev = data;
493
494         hci_dev_lock(hdev);
495         *val = hdev->rpa_timeout;
496         hci_dev_unlock(hdev);
497
498         return 0;
499 }
500
501 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
502                         rpa_timeout_set, "%llu\n");
503
504 static int sniff_min_interval_set(void *data, u64 val)
505 {
506         struct hci_dev *hdev = data;
507
508         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
509                 return -EINVAL;
510
511         hci_dev_lock(hdev);
512         hdev->sniff_min_interval = val;
513         hci_dev_unlock(hdev);
514
515         return 0;
516 }
517
518 static int sniff_min_interval_get(void *data, u64 *val)
519 {
520         struct hci_dev *hdev = data;
521
522         hci_dev_lock(hdev);
523         *val = hdev->sniff_min_interval;
524         hci_dev_unlock(hdev);
525
526         return 0;
527 }
528
529 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
530                         sniff_min_interval_set, "%llu\n");
531
532 static int sniff_max_interval_set(void *data, u64 val)
533 {
534         struct hci_dev *hdev = data;
535
536         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
537                 return -EINVAL;
538
539         hci_dev_lock(hdev);
540         hdev->sniff_max_interval = val;
541         hci_dev_unlock(hdev);
542
543         return 0;
544 }
545
546 static int sniff_max_interval_get(void *data, u64 *val)
547 {
548         struct hci_dev *hdev = data;
549
550         hci_dev_lock(hdev);
551         *val = hdev->sniff_max_interval;
552         hci_dev_unlock(hdev);
553
554         return 0;
555 }
556
557 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
558                         sniff_max_interval_set, "%llu\n");
559
560 static int conn_info_min_age_set(void *data, u64 val)
561 {
562         struct hci_dev *hdev = data;
563
564         if (val == 0 || val > hdev->conn_info_max_age)
565                 return -EINVAL;
566
567         hci_dev_lock(hdev);
568         hdev->conn_info_min_age = val;
569         hci_dev_unlock(hdev);
570
571         return 0;
572 }
573
574 static int conn_info_min_age_get(void *data, u64 *val)
575 {
576         struct hci_dev *hdev = data;
577
578         hci_dev_lock(hdev);
579         *val = hdev->conn_info_min_age;
580         hci_dev_unlock(hdev);
581
582         return 0;
583 }
584
585 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
586                         conn_info_min_age_set, "%llu\n");
587
588 static int conn_info_max_age_set(void *data, u64 val)
589 {
590         struct hci_dev *hdev = data;
591
592         if (val == 0 || val < hdev->conn_info_min_age)
593                 return -EINVAL;
594
595         hci_dev_lock(hdev);
596         hdev->conn_info_max_age = val;
597         hci_dev_unlock(hdev);
598
599         return 0;
600 }
601
602 static int conn_info_max_age_get(void *data, u64 *val)
603 {
604         struct hci_dev *hdev = data;
605
606         hci_dev_lock(hdev);
607         *val = hdev->conn_info_max_age;
608         hci_dev_unlock(hdev);
609
610         return 0;
611 }
612
613 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
614                         conn_info_max_age_set, "%llu\n");
615
616 static int identity_show(struct seq_file *f, void *p)
617 {
618         struct hci_dev *hdev = f->private;
619         bdaddr_t addr;
620         u8 addr_type;
621
622         hci_dev_lock(hdev);
623
624         hci_copy_identity_address(hdev, &addr, &addr_type);
625
626         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
627                    16, hdev->irk, &hdev->rpa);
628
629         hci_dev_unlock(hdev);
630
631         return 0;
632 }
633
634 static int identity_open(struct inode *inode, struct file *file)
635 {
636         return single_open(file, identity_show, inode->i_private);
637 }
638
639 static const struct file_operations identity_fops = {
640         .open           = identity_open,
641         .read           = seq_read,
642         .llseek         = seq_lseek,
643         .release        = single_release,
644 };
645
646 static int random_address_show(struct seq_file *f, void *p)
647 {
648         struct hci_dev *hdev = f->private;
649
650         hci_dev_lock(hdev);
651         seq_printf(f, "%pMR\n", &hdev->random_addr);
652         hci_dev_unlock(hdev);
653
654         return 0;
655 }
656
657 static int random_address_open(struct inode *inode, struct file *file)
658 {
659         return single_open(file, random_address_show, inode->i_private);
660 }
661
662 static const struct file_operations random_address_fops = {
663         .open           = random_address_open,
664         .read           = seq_read,
665         .llseek         = seq_lseek,
666         .release        = single_release,
667 };
668
669 static int static_address_show(struct seq_file *f, void *p)
670 {
671         struct hci_dev *hdev = f->private;
672
673         hci_dev_lock(hdev);
674         seq_printf(f, "%pMR\n", &hdev->static_addr);
675         hci_dev_unlock(hdev);
676
677         return 0;
678 }
679
680 static int static_address_open(struct inode *inode, struct file *file)
681 {
682         return single_open(file, static_address_show, inode->i_private);
683 }
684
685 static const struct file_operations static_address_fops = {
686         .open           = static_address_open,
687         .read           = seq_read,
688         .llseek         = seq_lseek,
689         .release        = single_release,
690 };
691
692 static ssize_t force_static_address_read(struct file *file,
693                                          char __user *user_buf,
694                                          size_t count, loff_t *ppos)
695 {
696         struct hci_dev *hdev = file->private_data;
697         char buf[3];
698
699         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
700         buf[1] = '\n';
701         buf[2] = '\0';
702         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
703 }
704
705 static ssize_t force_static_address_write(struct file *file,
706                                           const char __user *user_buf,
707                                           size_t count, loff_t *ppos)
708 {
709         struct hci_dev *hdev = file->private_data;
710         char buf[32];
711         size_t buf_size = min(count, (sizeof(buf)-1));
712         bool enable;
713
714         if (test_bit(HCI_UP, &hdev->flags))
715                 return -EBUSY;
716
717         if (copy_from_user(buf, user_buf, buf_size))
718                 return -EFAULT;
719
720         buf[buf_size] = '\0';
721         if (strtobool(buf, &enable))
722                 return -EINVAL;
723
724         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
725                 return -EALREADY;
726
727         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
728
729         return count;
730 }
731
732 static const struct file_operations force_static_address_fops = {
733         .open           = simple_open,
734         .read           = force_static_address_read,
735         .write          = force_static_address_write,
736         .llseek         = default_llseek,
737 };
738
739 static int white_list_show(struct seq_file *f, void *ptr)
740 {
741         struct hci_dev *hdev = f->private;
742         struct bdaddr_list *b;
743
744         hci_dev_lock(hdev);
745         list_for_each_entry(b, &hdev->le_white_list, list)
746                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
747         hci_dev_unlock(hdev);
748
749         return 0;
750 }
751
752 static int white_list_open(struct inode *inode, struct file *file)
753 {
754         return single_open(file, white_list_show, inode->i_private);
755 }
756
757 static const struct file_operations white_list_fops = {
758         .open           = white_list_open,
759         .read           = seq_read,
760         .llseek         = seq_lseek,
761         .release        = single_release,
762 };
763
764 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
765 {
766         struct hci_dev *hdev = f->private;
767         struct list_head *p, *n;
768
769         hci_dev_lock(hdev);
770         list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
771                 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
772                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
773                            &irk->bdaddr, irk->addr_type,
774                            16, irk->val, &irk->rpa);
775         }
776         hci_dev_unlock(hdev);
777
778         return 0;
779 }
780
781 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
782 {
783         return single_open(file, identity_resolving_keys_show,
784                            inode->i_private);
785 }
786
787 static const struct file_operations identity_resolving_keys_fops = {
788         .open           = identity_resolving_keys_open,
789         .read           = seq_read,
790         .llseek         = seq_lseek,
791         .release        = single_release,
792 };
793
794 static int long_term_keys_show(struct seq_file *f, void *ptr)
795 {
796         struct hci_dev *hdev = f->private;
797         struct list_head *p, *n;
798
799         hci_dev_lock(hdev);
800         list_for_each_safe(p, n, &hdev->long_term_keys) {
801                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
802                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
803                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
804                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
805                            __le64_to_cpu(ltk->rand), 16, ltk->val);
806         }
807         hci_dev_unlock(hdev);
808
809         return 0;
810 }
811
812 static int long_term_keys_open(struct inode *inode, struct file *file)
813 {
814         return single_open(file, long_term_keys_show, inode->i_private);
815 }
816
817 static const struct file_operations long_term_keys_fops = {
818         .open           = long_term_keys_open,
819         .read           = seq_read,
820         .llseek         = seq_lseek,
821         .release        = single_release,
822 };
823
824 static int conn_min_interval_set(void *data, u64 val)
825 {
826         struct hci_dev *hdev = data;
827
828         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
829                 return -EINVAL;
830
831         hci_dev_lock(hdev);
832         hdev->le_conn_min_interval = val;
833         hci_dev_unlock(hdev);
834
835         return 0;
836 }
837
838 static int conn_min_interval_get(void *data, u64 *val)
839 {
840         struct hci_dev *hdev = data;
841
842         hci_dev_lock(hdev);
843         *val = hdev->le_conn_min_interval;
844         hci_dev_unlock(hdev);
845
846         return 0;
847 }
848
849 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
850                         conn_min_interval_set, "%llu\n");
851
852 static int conn_max_interval_set(void *data, u64 val)
853 {
854         struct hci_dev *hdev = data;
855
856         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
857                 return -EINVAL;
858
859         hci_dev_lock(hdev);
860         hdev->le_conn_max_interval = val;
861         hci_dev_unlock(hdev);
862
863         return 0;
864 }
865
866 static int conn_max_interval_get(void *data, u64 *val)
867 {
868         struct hci_dev *hdev = data;
869
870         hci_dev_lock(hdev);
871         *val = hdev->le_conn_max_interval;
872         hci_dev_unlock(hdev);
873
874         return 0;
875 }
876
877 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
878                         conn_max_interval_set, "%llu\n");
879
880 static int conn_latency_set(void *data, u64 val)
881 {
882         struct hci_dev *hdev = data;
883
884         if (val > 0x01f3)
885                 return -EINVAL;
886
887         hci_dev_lock(hdev);
888         hdev->le_conn_latency = val;
889         hci_dev_unlock(hdev);
890
891         return 0;
892 }
893
894 static int conn_latency_get(void *data, u64 *val)
895 {
896         struct hci_dev *hdev = data;
897
898         hci_dev_lock(hdev);
899         *val = hdev->le_conn_latency;
900         hci_dev_unlock(hdev);
901
902         return 0;
903 }
904
905 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
906                         conn_latency_set, "%llu\n");
907
908 static int supervision_timeout_set(void *data, u64 val)
909 {
910         struct hci_dev *hdev = data;
911
912         if (val < 0x000a || val > 0x0c80)
913                 return -EINVAL;
914
915         hci_dev_lock(hdev);
916         hdev->le_supv_timeout = val;
917         hci_dev_unlock(hdev);
918
919         return 0;
920 }
921
922 static int supervision_timeout_get(void *data, u64 *val)
923 {
924         struct hci_dev *hdev = data;
925
926         hci_dev_lock(hdev);
927         *val = hdev->le_supv_timeout;
928         hci_dev_unlock(hdev);
929
930         return 0;
931 }
932
933 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
934                         supervision_timeout_set, "%llu\n");
935
936 static int adv_channel_map_set(void *data, u64 val)
937 {
938         struct hci_dev *hdev = data;
939
940         if (val < 0x01 || val > 0x07)
941                 return -EINVAL;
942
943         hci_dev_lock(hdev);
944         hdev->le_adv_channel_map = val;
945         hci_dev_unlock(hdev);
946
947         return 0;
948 }
949
950 static int adv_channel_map_get(void *data, u64 *val)
951 {
952         struct hci_dev *hdev = data;
953
954         hci_dev_lock(hdev);
955         *val = hdev->le_adv_channel_map;
956         hci_dev_unlock(hdev);
957
958         return 0;
959 }
960
961 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
962                         adv_channel_map_set, "%llu\n");
963
964 static int device_list_show(struct seq_file *f, void *ptr)
965 {
966         struct hci_dev *hdev = f->private;
967         struct hci_conn_params *p;
968
969         hci_dev_lock(hdev);
970         list_for_each_entry(p, &hdev->le_conn_params, list) {
971                 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
972                            p->auto_connect);
973         }
974         hci_dev_unlock(hdev);
975
976         return 0;
977 }
978
979 static int device_list_open(struct inode *inode, struct file *file)
980 {
981         return single_open(file, device_list_show, inode->i_private);
982 }
983
984 static const struct file_operations device_list_fops = {
985         .open           = device_list_open,
986         .read           = seq_read,
987         .llseek         = seq_lseek,
988         .release        = single_release,
989 };
990
991 /* ---- HCI requests ---- */
992
993 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
994 {
995         BT_DBG("%s result 0x%2.2x", hdev->name, result);
996
997         if (hdev->req_status == HCI_REQ_PEND) {
998                 hdev->req_result = result;
999                 hdev->req_status = HCI_REQ_DONE;
1000                 wake_up_interruptible(&hdev->req_wait_q);
1001         }
1002 }
1003
1004 static void hci_req_cancel(struct hci_dev *hdev, int err)
1005 {
1006         BT_DBG("%s err 0x%2.2x", hdev->name, err);
1007
1008         if (hdev->req_status == HCI_REQ_PEND) {
1009                 hdev->req_result = err;
1010                 hdev->req_status = HCI_REQ_CANCELED;
1011                 wake_up_interruptible(&hdev->req_wait_q);
1012         }
1013 }
1014
1015 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1016                                             u8 event)
1017 {
1018         struct hci_ev_cmd_complete *ev;
1019         struct hci_event_hdr *hdr;
1020         struct sk_buff *skb;
1021
1022         hci_dev_lock(hdev);
1023
1024         skb = hdev->recv_evt;
1025         hdev->recv_evt = NULL;
1026
1027         hci_dev_unlock(hdev);
1028
1029         if (!skb)
1030                 return ERR_PTR(-ENODATA);
1031
1032         if (skb->len < sizeof(*hdr)) {
1033                 BT_ERR("Too short HCI event");
1034                 goto failed;
1035         }
1036
1037         hdr = (void *) skb->data;
1038         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1039
1040         if (event) {
1041                 if (hdr->evt != event)
1042                         goto failed;
1043                 return skb;
1044         }
1045
1046         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1047                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1048                 goto failed;
1049         }
1050
1051         if (skb->len < sizeof(*ev)) {
1052                 BT_ERR("Too short cmd_complete event");
1053                 goto failed;
1054         }
1055
1056         ev = (void *) skb->data;
1057         skb_pull(skb, sizeof(*ev));
1058
1059         if (opcode == __le16_to_cpu(ev->opcode))
1060                 return skb;
1061
1062         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1063                __le16_to_cpu(ev->opcode));
1064
1065 failed:
1066         kfree_skb(skb);
1067         return ERR_PTR(-ENODATA);
1068 }
1069
1070 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1071                                   const void *param, u8 event, u32 timeout)
1072 {
1073         DECLARE_WAITQUEUE(wait, current);
1074         struct hci_request req;
1075         int err = 0;
1076
1077         BT_DBG("%s", hdev->name);
1078
1079         hci_req_init(&req, hdev);
1080
1081         hci_req_add_ev(&req, opcode, plen, param, event);
1082
1083         hdev->req_status = HCI_REQ_PEND;
1084
1085         err = hci_req_run(&req, hci_req_sync_complete);
1086         if (err < 0)
1087                 return ERR_PTR(err);
1088
1089         add_wait_queue(&hdev->req_wait_q, &wait);
1090         set_current_state(TASK_INTERRUPTIBLE);
1091
1092         schedule_timeout(timeout);
1093
1094         remove_wait_queue(&hdev->req_wait_q, &wait);
1095
1096         if (signal_pending(current))
1097                 return ERR_PTR(-EINTR);
1098
1099         switch (hdev->req_status) {
1100         case HCI_REQ_DONE:
1101                 err = -bt_to_errno(hdev->req_result);
1102                 break;
1103
1104         case HCI_REQ_CANCELED:
1105                 err = -hdev->req_result;
1106                 break;
1107
1108         default:
1109                 err = -ETIMEDOUT;
1110                 break;
1111         }
1112
1113         hdev->req_status = hdev->req_result = 0;
1114
1115         BT_DBG("%s end: err %d", hdev->name, err);
1116
1117         if (err < 0)
1118                 return ERR_PTR(err);
1119
1120         return hci_get_cmd_complete(hdev, opcode, event);
1121 }
1122 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1123
1124 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1125                                const void *param, u32 timeout)
1126 {
1127         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1128 }
1129 EXPORT_SYMBOL(__hci_cmd_sync);
1130
1131 /* Execute request and wait for completion. */
1132 static int __hci_req_sync(struct hci_dev *hdev,
1133                           void (*func)(struct hci_request *req,
1134                                       unsigned long opt),
1135                           unsigned long opt, __u32 timeout)
1136 {
1137         struct hci_request req;
1138         DECLARE_WAITQUEUE(wait, current);
1139         int err = 0;
1140
1141         BT_DBG("%s start", hdev->name);
1142
1143         hci_req_init(&req, hdev);
1144
1145         hdev->req_status = HCI_REQ_PEND;
1146
1147         func(&req, opt);
1148
1149         err = hci_req_run(&req, hci_req_sync_complete);
1150         if (err < 0) {
1151                 hdev->req_status = 0;
1152
1153                 /* ENODATA means the HCI request command queue is empty.
1154                  * This can happen when a request with conditionals doesn't
1155                  * trigger any commands to be sent. This is normal behavior
1156                  * and should not trigger an error return.
1157                  */
1158                 if (err == -ENODATA)
1159                         return 0;
1160
1161                 return err;
1162         }
1163
1164         add_wait_queue(&hdev->req_wait_q, &wait);
1165         set_current_state(TASK_INTERRUPTIBLE);
1166
1167         schedule_timeout(timeout);
1168
1169         remove_wait_queue(&hdev->req_wait_q, &wait);
1170
1171         if (signal_pending(current))
1172                 return -EINTR;
1173
1174         switch (hdev->req_status) {
1175         case HCI_REQ_DONE:
1176                 err = -bt_to_errno(hdev->req_result);
1177                 break;
1178
1179         case HCI_REQ_CANCELED:
1180                 err = -hdev->req_result;
1181                 break;
1182
1183         default:
1184                 err = -ETIMEDOUT;
1185                 break;
1186         }
1187
1188         hdev->req_status = hdev->req_result = 0;
1189
1190         BT_DBG("%s end: err %d", hdev->name, err);
1191
1192         return err;
1193 }
1194
1195 static int hci_req_sync(struct hci_dev *hdev,
1196                         void (*req)(struct hci_request *req,
1197                                     unsigned long opt),
1198                         unsigned long opt, __u32 timeout)
1199 {
1200         int ret;
1201
1202         if (!test_bit(HCI_UP, &hdev->flags))
1203                 return -ENETDOWN;
1204
1205         /* Serialize all requests */
1206         hci_req_lock(hdev);
1207         ret = __hci_req_sync(hdev, req, opt, timeout);
1208         hci_req_unlock(hdev);
1209
1210         return ret;
1211 }
1212
1213 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1214 {
1215         BT_DBG("%s %ld", req->hdev->name, opt);
1216
1217         /* Reset device */
1218         set_bit(HCI_RESET, &req->hdev->flags);
1219         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1220 }
1221
1222 static void bredr_init(struct hci_request *req)
1223 {
1224         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1225
1226         /* Read Local Supported Features */
1227         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1228
1229         /* Read Local Version */
1230         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1231
1232         /* Read BD Address */
1233         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1234 }
1235
1236 static void amp_init(struct hci_request *req)
1237 {
1238         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1239
1240         /* Read Local Version */
1241         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1242
1243         /* Read Local Supported Commands */
1244         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1245
1246         /* Read Local Supported Features */
1247         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1248
1249         /* Read Local AMP Info */
1250         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1251
1252         /* Read Data Blk size */
1253         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1254
1255         /* Read Flow Control Mode */
1256         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1257
1258         /* Read Location Data */
1259         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1260 }
1261
1262 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1263 {
1264         struct hci_dev *hdev = req->hdev;
1265
1266         BT_DBG("%s %ld", hdev->name, opt);
1267
1268         /* Reset */
1269         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1270                 hci_reset_req(req, 0);
1271
1272         switch (hdev->dev_type) {
1273         case HCI_BREDR:
1274                 bredr_init(req);
1275                 break;
1276
1277         case HCI_AMP:
1278                 amp_init(req);
1279                 break;
1280
1281         default:
1282                 BT_ERR("Unknown device type %d", hdev->dev_type);
1283                 break;
1284         }
1285 }
1286
1287 static void bredr_setup(struct hci_request *req)
1288 {
1289         struct hci_dev *hdev = req->hdev;
1290
1291         __le16 param;
1292         __u8 flt_type;
1293
1294         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1295         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1296
1297         /* Read Class of Device */
1298         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1299
1300         /* Read Local Name */
1301         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1302
1303         /* Read Voice Setting */
1304         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1305
1306         /* Read Number of Supported IAC */
1307         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1308
1309         /* Read Current IAC LAP */
1310         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1311
1312         /* Clear Event Filters */
1313         flt_type = HCI_FLT_CLEAR_ALL;
1314         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1315
1316         /* Connection accept timeout ~20 secs */
1317         param = cpu_to_le16(0x7d00);
1318         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1319
1320         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1321          * but it does not support page scan related HCI commands.
1322          */
1323         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1324                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1325                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1326         }
1327 }
1328
1329 static void le_setup(struct hci_request *req)
1330 {
1331         struct hci_dev *hdev = req->hdev;
1332
1333         /* Read LE Buffer Size */
1334         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1335
1336         /* Read LE Local Supported Features */
1337         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1338
1339         /* Read LE Supported States */
1340         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1341
1342         /* Read LE Advertising Channel TX Power */
1343         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1344
1345         /* Read LE White List Size */
1346         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1347
1348         /* Clear LE White List */
1349         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1350
1351         /* LE-only controllers have LE implicitly enabled */
1352         if (!lmp_bredr_capable(hdev))
1353                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1354 }
1355
1356 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1357 {
1358         if (lmp_ext_inq_capable(hdev))
1359                 return 0x02;
1360
1361         if (lmp_inq_rssi_capable(hdev))
1362                 return 0x01;
1363
1364         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1365             hdev->lmp_subver == 0x0757)
1366                 return 0x01;
1367
1368         if (hdev->manufacturer == 15) {
1369                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1370                         return 0x01;
1371                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1372                         return 0x01;
1373                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1374                         return 0x01;
1375         }
1376
1377         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1378             hdev->lmp_subver == 0x1805)
1379                 return 0x01;
1380
1381         return 0x00;
1382 }
1383
1384 static void hci_setup_inquiry_mode(struct hci_request *req)
1385 {
1386         u8 mode;
1387
1388         mode = hci_get_inquiry_mode(req->hdev);
1389
1390         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1391 }
1392
1393 static void hci_setup_event_mask(struct hci_request *req)
1394 {
1395         struct hci_dev *hdev = req->hdev;
1396
1397         /* The second byte is 0xff instead of 0x9f (two reserved bits
1398          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1399          * command otherwise.
1400          */
1401         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1402
1403         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1404          * any event mask for pre 1.2 devices.
1405          */
1406         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1407                 return;
1408
1409         if (lmp_bredr_capable(hdev)) {
1410                 events[4] |= 0x01; /* Flow Specification Complete */
1411                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1412                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1413                 events[5] |= 0x08; /* Synchronous Connection Complete */
1414                 events[5] |= 0x10; /* Synchronous Connection Changed */
1415         } else {
1416                 /* Use a different default for LE-only devices */
1417                 memset(events, 0, sizeof(events));
1418                 events[0] |= 0x10; /* Disconnection Complete */
1419                 events[0] |= 0x80; /* Encryption Change */
1420                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1421                 events[1] |= 0x20; /* Command Complete */
1422                 events[1] |= 0x40; /* Command Status */
1423                 events[1] |= 0x80; /* Hardware Error */
1424                 events[2] |= 0x04; /* Number of Completed Packets */
1425                 events[3] |= 0x02; /* Data Buffer Overflow */
1426                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1427         }
1428
1429         if (lmp_inq_rssi_capable(hdev))
1430                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1431
1432         if (lmp_sniffsubr_capable(hdev))
1433                 events[5] |= 0x20; /* Sniff Subrating */
1434
1435         if (lmp_pause_enc_capable(hdev))
1436                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1437
1438         if (lmp_ext_inq_capable(hdev))
1439                 events[5] |= 0x40; /* Extended Inquiry Result */
1440
1441         if (lmp_no_flush_capable(hdev))
1442                 events[7] |= 0x01; /* Enhanced Flush Complete */
1443
1444         if (lmp_lsto_capable(hdev))
1445                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1446
1447         if (lmp_ssp_capable(hdev)) {
1448                 events[6] |= 0x01;      /* IO Capability Request */
1449                 events[6] |= 0x02;      /* IO Capability Response */
1450                 events[6] |= 0x04;      /* User Confirmation Request */
1451                 events[6] |= 0x08;      /* User Passkey Request */
1452                 events[6] |= 0x10;      /* Remote OOB Data Request */
1453                 events[6] |= 0x20;      /* Simple Pairing Complete */
1454                 events[7] |= 0x04;      /* User Passkey Notification */
1455                 events[7] |= 0x08;      /* Keypress Notification */
1456                 events[7] |= 0x10;      /* Remote Host Supported
1457                                          * Features Notification
1458                                          */
1459         }
1460
1461         if (lmp_le_capable(hdev))
1462                 events[7] |= 0x20;      /* LE Meta-Event */
1463
1464         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1465 }
1466
1467 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1468 {
1469         struct hci_dev *hdev = req->hdev;
1470
1471         if (lmp_bredr_capable(hdev))
1472                 bredr_setup(req);
1473         else
1474                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1475
1476         if (lmp_le_capable(hdev))
1477                 le_setup(req);
1478
1479         hci_setup_event_mask(req);
1480
1481         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1482          * local supported commands HCI command.
1483          */
1484         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1485                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1486
1487         if (lmp_ssp_capable(hdev)) {
1488                 /* When SSP is available, then the host features page
1489                  * should also be available as well. However some
1490                  * controllers list the max_page as 0 as long as SSP
1491                  * has not been enabled. To achieve proper debugging
1492                  * output, force the minimum max_page to 1 at least.
1493                  */
1494                 hdev->max_page = 0x01;
1495
1496                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1497                         u8 mode = 0x01;
1498                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1499                                     sizeof(mode), &mode);
1500                 } else {
1501                         struct hci_cp_write_eir cp;
1502
1503                         memset(hdev->eir, 0, sizeof(hdev->eir));
1504                         memset(&cp, 0, sizeof(cp));
1505
1506                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1507                 }
1508         }
1509
1510         if (lmp_inq_rssi_capable(hdev))
1511                 hci_setup_inquiry_mode(req);
1512
1513         if (lmp_inq_tx_pwr_capable(hdev))
1514                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1515
1516         if (lmp_ext_feat_capable(hdev)) {
1517                 struct hci_cp_read_local_ext_features cp;
1518
1519                 cp.page = 0x01;
1520                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1521                             sizeof(cp), &cp);
1522         }
1523
1524         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1525                 u8 enable = 1;
1526                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1527                             &enable);
1528         }
1529 }
1530
1531 static void hci_setup_link_policy(struct hci_request *req)
1532 {
1533         struct hci_dev *hdev = req->hdev;
1534         struct hci_cp_write_def_link_policy cp;
1535         u16 link_policy = 0;
1536
1537         if (lmp_rswitch_capable(hdev))
1538                 link_policy |= HCI_LP_RSWITCH;
1539         if (lmp_hold_capable(hdev))
1540                 link_policy |= HCI_LP_HOLD;
1541         if (lmp_sniff_capable(hdev))
1542                 link_policy |= HCI_LP_SNIFF;
1543         if (lmp_park_capable(hdev))
1544                 link_policy |= HCI_LP_PARK;
1545
1546         cp.policy = cpu_to_le16(link_policy);
1547         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1548 }
1549
1550 static void hci_set_le_support(struct hci_request *req)
1551 {
1552         struct hci_dev *hdev = req->hdev;
1553         struct hci_cp_write_le_host_supported cp;
1554
1555         /* LE-only devices do not support explicit enablement */
1556         if (!lmp_bredr_capable(hdev))
1557                 return;
1558
1559         memset(&cp, 0, sizeof(cp));
1560
1561         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1562                 cp.le = 0x01;
1563                 cp.simul = lmp_le_br_capable(hdev);
1564         }
1565
1566         if (cp.le != lmp_host_le_capable(hdev))
1567                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1568                             &cp);
1569 }
1570
1571 static void hci_set_event_mask_page_2(struct hci_request *req)
1572 {
1573         struct hci_dev *hdev = req->hdev;
1574         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1575
1576         /* If Connectionless Slave Broadcast master role is supported
1577          * enable all necessary events for it.
1578          */
1579         if (lmp_csb_master_capable(hdev)) {
1580                 events[1] |= 0x40;      /* Triggered Clock Capture */
1581                 events[1] |= 0x80;      /* Synchronization Train Complete */
1582                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1583                 events[2] |= 0x20;      /* CSB Channel Map Change */
1584         }
1585
1586         /* If Connectionless Slave Broadcast slave role is supported
1587          * enable all necessary events for it.
1588          */
1589         if (lmp_csb_slave_capable(hdev)) {
1590                 events[2] |= 0x01;      /* Synchronization Train Received */
1591                 events[2] |= 0x02;      /* CSB Receive */
1592                 events[2] |= 0x04;      /* CSB Timeout */
1593                 events[2] |= 0x08;      /* Truncated Page Complete */
1594         }
1595
1596         /* Enable Authenticated Payload Timeout Expired event if supported */
1597         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1598                 events[2] |= 0x80;
1599
1600         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1601 }
1602
1603 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1604 {
1605         struct hci_dev *hdev = req->hdev;
1606         u8 p;
1607
1608         /* Some Broadcom based Bluetooth controllers do not support the
1609          * Delete Stored Link Key command. They are clearly indicating its
1610          * absence in the bit mask of supported commands.
1611          *
1612          * Check the supported commands and only if the the command is marked
1613          * as supported send it. If not supported assume that the controller
1614          * does not have actual support for stored link keys which makes this
1615          * command redundant anyway.
1616          *
1617          * Some controllers indicate that they support handling deleting
1618          * stored link keys, but they don't. The quirk lets a driver
1619          * just disable this command.
1620          */
1621         if (hdev->commands[6] & 0x80 &&
1622             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1623                 struct hci_cp_delete_stored_link_key cp;
1624
1625                 bacpy(&cp.bdaddr, BDADDR_ANY);
1626                 cp.delete_all = 0x01;
1627                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1628                             sizeof(cp), &cp);
1629         }
1630
1631         if (hdev->commands[5] & 0x10)
1632                 hci_setup_link_policy(req);
1633
1634         if (lmp_le_capable(hdev)) {
1635                 u8 events[8];
1636
1637                 memset(events, 0, sizeof(events));
1638                 events[0] = 0x1f;
1639
1640                 /* If controller supports the Connection Parameters Request
1641                  * Link Layer Procedure, enable the corresponding event.
1642                  */
1643                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1644                         events[0] |= 0x20;      /* LE Remote Connection
1645                                                  * Parameter Request
1646                                                  */
1647
1648                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1649                             events);
1650
1651                 hci_set_le_support(req);
1652         }
1653
1654         /* Read features beyond page 1 if available */
1655         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1656                 struct hci_cp_read_local_ext_features cp;
1657
1658                 cp.page = p;
1659                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1660                             sizeof(cp), &cp);
1661         }
1662 }
1663
1664 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1665 {
1666         struct hci_dev *hdev = req->hdev;
1667
1668         /* Set event mask page 2 if the HCI command for it is supported */
1669         if (hdev->commands[22] & 0x04)
1670                 hci_set_event_mask_page_2(req);
1671
1672         /* Check for Synchronization Train support */
1673         if (lmp_sync_train_capable(hdev))
1674                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1675
1676         /* Enable Secure Connections if supported and configured */
1677         if ((lmp_sc_capable(hdev) ||
1678              test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1679             test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1680                 u8 support = 0x01;
1681                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1682                             sizeof(support), &support);
1683         }
1684 }
1685
1686 static int __hci_init(struct hci_dev *hdev)
1687 {
1688         int err;
1689
1690         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1691         if (err < 0)
1692                 return err;
1693
1694         /* The Device Under Test (DUT) mode is special and available for
1695          * all controller types. So just create it early on.
1696          */
1697         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1698                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1699                                     &dut_mode_fops);
1700         }
1701
1702         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1703          * BR/EDR/LE type controllers. AMP controllers only need the
1704          * first stage init.
1705          */
1706         if (hdev->dev_type != HCI_BREDR)
1707                 return 0;
1708
1709         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1710         if (err < 0)
1711                 return err;
1712
1713         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1714         if (err < 0)
1715                 return err;
1716
1717         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1718         if (err < 0)
1719                 return err;
1720
1721         /* Only create debugfs entries during the initial setup
1722          * phase and not every time the controller gets powered on.
1723          */
1724         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1725                 return 0;
1726
1727         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1728                             &features_fops);
1729         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1730                            &hdev->manufacturer);
1731         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1732         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1733         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1734                             &blacklist_fops);
1735         debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1736                             &whitelist_fops);
1737         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1738
1739         debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1740                             &conn_info_min_age_fops);
1741         debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1742                             &conn_info_max_age_fops);
1743
1744         if (lmp_bredr_capable(hdev)) {
1745                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1746                                     hdev, &inquiry_cache_fops);
1747                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1748                                     hdev, &link_keys_fops);
1749                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1750                                     hdev, &dev_class_fops);
1751                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1752                                     hdev, &voice_setting_fops);
1753         }
1754
1755         if (lmp_ssp_capable(hdev)) {
1756                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1757                                     hdev, &auto_accept_delay_fops);
1758                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1759                                     hdev, &force_sc_support_fops);
1760                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1761                                     hdev, &sc_only_mode_fops);
1762         }
1763
1764         if (lmp_sniff_capable(hdev)) {
1765                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1766                                     hdev, &idle_timeout_fops);
1767                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1768                                     hdev, &sniff_min_interval_fops);
1769                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1770                                     hdev, &sniff_max_interval_fops);
1771         }
1772
1773         if (lmp_le_capable(hdev)) {
1774                 debugfs_create_file("identity", 0400, hdev->debugfs,
1775                                     hdev, &identity_fops);
1776                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1777                                     hdev, &rpa_timeout_fops);
1778                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1779                                     hdev, &random_address_fops);
1780                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1781                                     hdev, &static_address_fops);
1782
1783                 /* For controllers with a public address, provide a debug
1784                  * option to force the usage of the configured static
1785                  * address. By default the public address is used.
1786                  */
1787                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1788                         debugfs_create_file("force_static_address", 0644,
1789                                             hdev->debugfs, hdev,
1790                                             &force_static_address_fops);
1791
1792                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1793                                   &hdev->le_white_list_size);
1794                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1795                                     &white_list_fops);
1796                 debugfs_create_file("identity_resolving_keys", 0400,
1797                                     hdev->debugfs, hdev,
1798                                     &identity_resolving_keys_fops);
1799                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1800                                     hdev, &long_term_keys_fops);
1801                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1802                                     hdev, &conn_min_interval_fops);
1803                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1804                                     hdev, &conn_max_interval_fops);
1805                 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1806                                     hdev, &conn_latency_fops);
1807                 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1808                                     hdev, &supervision_timeout_fops);
1809                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1810                                     hdev, &adv_channel_map_fops);
1811                 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1812                                     &device_list_fops);
1813                 debugfs_create_u16("discov_interleaved_timeout", 0644,
1814                                    hdev->debugfs,
1815                                    &hdev->discov_interleaved_timeout);
1816         }
1817
1818         return 0;
1819 }
1820
1821 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1822 {
1823         struct hci_dev *hdev = req->hdev;
1824
1825         BT_DBG("%s %ld", hdev->name, opt);
1826
1827         /* Reset */
1828         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1829                 hci_reset_req(req, 0);
1830
1831         /* Read Local Version */
1832         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1833
1834         /* Read BD Address */
1835         if (hdev->set_bdaddr)
1836                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1837 }
1838
1839 static int __hci_unconf_init(struct hci_dev *hdev)
1840 {
1841         int err;
1842
1843         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1844                 return 0;
1845
1846         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1847         if (err < 0)
1848                 return err;
1849
1850         return 0;
1851 }
1852
1853 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1854 {
1855         __u8 scan = opt;
1856
1857         BT_DBG("%s %x", req->hdev->name, scan);
1858
1859         /* Inquiry and Page scans */
1860         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1861 }
1862
1863 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1864 {
1865         __u8 auth = opt;
1866
1867         BT_DBG("%s %x", req->hdev->name, auth);
1868
1869         /* Authentication */
1870         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1871 }
1872
1873 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1874 {
1875         __u8 encrypt = opt;
1876
1877         BT_DBG("%s %x", req->hdev->name, encrypt);
1878
1879         /* Encryption */
1880         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1881 }
1882
1883 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1884 {
1885         __le16 policy = cpu_to_le16(opt);
1886
1887         BT_DBG("%s %x", req->hdev->name, policy);
1888
1889         /* Default link policy */
1890         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1891 }
1892
1893 /* Get HCI device by index.
1894  * Device is held on return. */
1895 struct hci_dev *hci_dev_get(int index)
1896 {
1897         struct hci_dev *hdev = NULL, *d;
1898
1899         BT_DBG("%d", index);
1900
1901         if (index < 0)
1902                 return NULL;
1903
1904         read_lock(&hci_dev_list_lock);
1905         list_for_each_entry(d, &hci_dev_list, list) {
1906                 if (d->id == index) {
1907                         hdev = hci_dev_hold(d);
1908                         break;
1909                 }
1910         }
1911         read_unlock(&hci_dev_list_lock);
1912         return hdev;
1913 }
1914
1915 /* ---- Inquiry support ---- */
1916
1917 bool hci_discovery_active(struct hci_dev *hdev)
1918 {
1919         struct discovery_state *discov = &hdev->discovery;
1920
1921         switch (discov->state) {
1922         case DISCOVERY_FINDING:
1923         case DISCOVERY_RESOLVING:
1924                 return true;
1925
1926         default:
1927                 return false;
1928         }
1929 }
1930
1931 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1932 {
1933         int old_state = hdev->discovery.state;
1934
1935         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1936
1937         if (old_state == state)
1938                 return;
1939
1940         hdev->discovery.state = state;
1941
1942         switch (state) {
1943         case DISCOVERY_STOPPED:
1944                 hci_update_background_scan(hdev);
1945
1946                 if (old_state != DISCOVERY_STARTING)
1947                         mgmt_discovering(hdev, 0);
1948                 break;
1949         case DISCOVERY_STARTING:
1950                 break;
1951         case DISCOVERY_FINDING:
1952                 mgmt_discovering(hdev, 1);
1953                 break;
1954         case DISCOVERY_RESOLVING:
1955                 break;
1956         case DISCOVERY_STOPPING:
1957                 break;
1958         }
1959 }
1960
1961 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1962 {
1963         struct discovery_state *cache = &hdev->discovery;
1964         struct inquiry_entry *p, *n;
1965
1966         list_for_each_entry_safe(p, n, &cache->all, all) {
1967                 list_del(&p->all);
1968                 kfree(p);
1969         }
1970
1971         INIT_LIST_HEAD(&cache->unknown);
1972         INIT_LIST_HEAD(&cache->resolve);
1973 }
1974
1975 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1976                                                bdaddr_t *bdaddr)
1977 {
1978         struct discovery_state *cache = &hdev->discovery;
1979         struct inquiry_entry *e;
1980
1981         BT_DBG("cache %p, %pMR", cache, bdaddr);
1982
1983         list_for_each_entry(e, &cache->all, all) {
1984                 if (!bacmp(&e->data.bdaddr, bdaddr))
1985                         return e;
1986         }
1987
1988         return NULL;
1989 }
1990
1991 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1992                                                        bdaddr_t *bdaddr)
1993 {
1994         struct discovery_state *cache = &hdev->discovery;
1995         struct inquiry_entry *e;
1996
1997         BT_DBG("cache %p, %pMR", cache, bdaddr);
1998
1999         list_for_each_entry(e, &cache->unknown, list) {
2000                 if (!bacmp(&e->data.bdaddr, bdaddr))
2001                         return e;
2002         }
2003
2004         return NULL;
2005 }
2006
2007 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2008                                                        bdaddr_t *bdaddr,
2009                                                        int state)
2010 {
2011         struct discovery_state *cache = &hdev->discovery;
2012         struct inquiry_entry *e;
2013
2014         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2015
2016         list_for_each_entry(e, &cache->resolve, list) {
2017                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2018                         return e;
2019                 if (!bacmp(&e->data.bdaddr, bdaddr))
2020                         return e;
2021         }
2022
2023         return NULL;
2024 }
2025
2026 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2027                                       struct inquiry_entry *ie)
2028 {
2029         struct discovery_state *cache = &hdev->discovery;
2030         struct list_head *pos = &cache->resolve;
2031         struct inquiry_entry *p;
2032
2033         list_del(&ie->list);
2034
2035         list_for_each_entry(p, &cache->resolve, list) {
2036                 if (p->name_state != NAME_PENDING &&
2037                     abs(p->data.rssi) >= abs(ie->data.rssi))
2038                         break;
2039                 pos = &p->list;
2040         }
2041
2042         list_add(&ie->list, pos);
2043 }
2044
2045 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2046                              bool name_known)
2047 {
2048         struct discovery_state *cache = &hdev->discovery;
2049         struct inquiry_entry *ie;
2050         u32 flags = 0;
2051
2052         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2053
2054         hci_remove_remote_oob_data(hdev, &data->bdaddr);
2055
2056         if (!data->ssp_mode)
2057                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2058
2059         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2060         if (ie) {
2061                 if (!ie->data.ssp_mode)
2062                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2063
2064                 if (ie->name_state == NAME_NEEDED &&
2065                     data->rssi != ie->data.rssi) {
2066                         ie->data.rssi = data->rssi;
2067                         hci_inquiry_cache_update_resolve(hdev, ie);
2068                 }
2069
2070                 goto update;
2071         }
2072
2073         /* Entry not in the cache. Add new one. */
2074         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2075         if (!ie) {
2076                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2077                 goto done;
2078         }
2079
2080         list_add(&ie->all, &cache->all);
2081
2082         if (name_known) {
2083                 ie->name_state = NAME_KNOWN;
2084         } else {
2085                 ie->name_state = NAME_NOT_KNOWN;
2086                 list_add(&ie->list, &cache->unknown);
2087         }
2088
2089 update:
2090         if (name_known && ie->name_state != NAME_KNOWN &&
2091             ie->name_state != NAME_PENDING) {
2092                 ie->name_state = NAME_KNOWN;
2093                 list_del(&ie->list);
2094         }
2095
2096         memcpy(&ie->data, data, sizeof(*data));
2097         ie->timestamp = jiffies;
2098         cache->timestamp = jiffies;
2099
2100         if (ie->name_state == NAME_NOT_KNOWN)
2101                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2102
2103 done:
2104         return flags;
2105 }
2106
2107 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2108 {
2109         struct discovery_state *cache = &hdev->discovery;
2110         struct inquiry_info *info = (struct inquiry_info *) buf;
2111         struct inquiry_entry *e;
2112         int copied = 0;
2113
2114         list_for_each_entry(e, &cache->all, all) {
2115                 struct inquiry_data *data = &e->data;
2116
2117                 if (copied >= num)
2118                         break;
2119
2120                 bacpy(&info->bdaddr, &data->bdaddr);
2121                 info->pscan_rep_mode    = data->pscan_rep_mode;
2122                 info->pscan_period_mode = data->pscan_period_mode;
2123                 info->pscan_mode        = data->pscan_mode;
2124                 memcpy(info->dev_class, data->dev_class, 3);
2125                 info->clock_offset      = data->clock_offset;
2126
2127                 info++;
2128                 copied++;
2129         }
2130
2131         BT_DBG("cache %p, copied %d", cache, copied);
2132         return copied;
2133 }
2134
2135 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2136 {
2137         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2138         struct hci_dev *hdev = req->hdev;
2139         struct hci_cp_inquiry cp;
2140
2141         BT_DBG("%s", hdev->name);
2142
2143         if (test_bit(HCI_INQUIRY, &hdev->flags))
2144                 return;
2145
2146         /* Start Inquiry */
2147         memcpy(&cp.lap, &ir->lap, 3);
2148         cp.length  = ir->length;
2149         cp.num_rsp = ir->num_rsp;
2150         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2151 }
2152
2153 static int wait_inquiry(void *word)
2154 {
2155         schedule();
2156         return signal_pending(current);
2157 }
2158
2159 int hci_inquiry(void __user *arg)
2160 {
2161         __u8 __user *ptr = arg;
2162         struct hci_inquiry_req ir;
2163         struct hci_dev *hdev;
2164         int err = 0, do_inquiry = 0, max_rsp;
2165         long timeo;
2166         __u8 *buf;
2167
2168         if (copy_from_user(&ir, ptr, sizeof(ir)))
2169                 return -EFAULT;
2170
2171         hdev = hci_dev_get(ir.dev_id);
2172         if (!hdev)
2173                 return -ENODEV;
2174
2175         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2176                 err = -EBUSY;
2177                 goto done;
2178         }
2179
2180         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2181                 err = -EOPNOTSUPP;
2182                 goto done;
2183         }
2184
2185         if (hdev->dev_type != HCI_BREDR) {
2186                 err = -EOPNOTSUPP;
2187                 goto done;
2188         }
2189
2190         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2191                 err = -EOPNOTSUPP;
2192                 goto done;
2193         }
2194
2195         hci_dev_lock(hdev);
2196         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2197             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2198                 hci_inquiry_cache_flush(hdev);
2199                 do_inquiry = 1;
2200         }
2201         hci_dev_unlock(hdev);
2202
2203         timeo = ir.length * msecs_to_jiffies(2000);
2204
2205         if (do_inquiry) {
2206                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2207                                    timeo);
2208                 if (err < 0)
2209                         goto done;
2210
2211                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2212                  * cleared). If it is interrupted by a signal, return -EINTR.
2213                  */
2214                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2215                                 TASK_INTERRUPTIBLE))
2216                         return -EINTR;
2217         }
2218
2219         /* for unlimited number of responses we will use buffer with
2220          * 255 entries
2221          */
2222         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2223
2224         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2225          * copy it to the user space.
2226          */
2227         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2228         if (!buf) {
2229                 err = -ENOMEM;
2230                 goto done;
2231         }
2232
2233         hci_dev_lock(hdev);
2234         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2235         hci_dev_unlock(hdev);
2236
2237         BT_DBG("num_rsp %d", ir.num_rsp);
2238
2239         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2240                 ptr += sizeof(ir);
2241                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2242                                  ir.num_rsp))
2243                         err = -EFAULT;
2244         } else
2245                 err = -EFAULT;
2246
2247         kfree(buf);
2248
2249 done:
2250         hci_dev_put(hdev);
2251         return err;
2252 }
2253
2254 static int hci_dev_do_open(struct hci_dev *hdev)
2255 {
2256         int ret = 0;
2257
2258         BT_DBG("%s %p", hdev->name, hdev);
2259
2260         hci_req_lock(hdev);
2261
2262         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2263                 ret = -ENODEV;
2264                 goto done;
2265         }
2266
2267         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2268             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2269                 /* Check for rfkill but allow the HCI setup stage to
2270                  * proceed (which in itself doesn't cause any RF activity).
2271                  */
2272                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2273                         ret = -ERFKILL;
2274                         goto done;
2275                 }
2276
2277                 /* Check for valid public address or a configured static
2278                  * random adddress, but let the HCI setup proceed to
2279                  * be able to determine if there is a public address
2280                  * or not.
2281                  *
2282                  * In case of user channel usage, it is not important
2283                  * if a public address or static random address is
2284                  * available.
2285                  *
2286                  * This check is only valid for BR/EDR controllers
2287                  * since AMP controllers do not have an address.
2288                  */
2289                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2290                     hdev->dev_type == HCI_BREDR &&
2291                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2292                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2293                         ret = -EADDRNOTAVAIL;
2294                         goto done;
2295                 }
2296         }
2297
2298         if (test_bit(HCI_UP, &hdev->flags)) {
2299                 ret = -EALREADY;
2300                 goto done;
2301         }
2302
2303         if (hdev->open(hdev)) {
2304                 ret = -EIO;
2305                 goto done;
2306         }
2307
2308         atomic_set(&hdev->cmd_cnt, 1);
2309         set_bit(HCI_INIT, &hdev->flags);
2310
2311         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2312                 if (hdev->setup)
2313                         ret = hdev->setup(hdev);
2314
2315                 /* The transport driver can set these quirks before
2316                  * creating the HCI device or in its setup callback.
2317                  *
2318                  * In case any of them is set, the controller has to
2319                  * start up as unconfigured.
2320                  */
2321                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2322                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2323                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2324
2325                 /* For an unconfigured controller it is required to
2326                  * read at least the version information provided by
2327                  * the Read Local Version Information command.
2328                  *
2329                  * If the set_bdaddr driver callback is provided, then
2330                  * also the original Bluetooth public device address
2331                  * will be read using the Read BD Address command.
2332                  */
2333                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2334                         ret = __hci_unconf_init(hdev);
2335         }
2336
2337         if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2338                 /* If public address change is configured, ensure that
2339                  * the address gets programmed. If the driver does not
2340                  * support changing the public address, fail the power
2341                  * on procedure.
2342                  */
2343                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2344                     hdev->set_bdaddr)
2345                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2346                 else
2347                         ret = -EADDRNOTAVAIL;
2348         }
2349
2350         if (!ret) {
2351                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2352                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2353                         ret = __hci_init(hdev);
2354         }
2355
2356         clear_bit(HCI_INIT, &hdev->flags);
2357
2358         if (!ret) {
2359                 hci_dev_hold(hdev);
2360                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2361                 set_bit(HCI_UP, &hdev->flags);
2362                 hci_notify(hdev, HCI_DEV_UP);
2363                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2364                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2365                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2366                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2367                     hdev->dev_type == HCI_BREDR) {
2368                         hci_dev_lock(hdev);
2369                         mgmt_powered(hdev, 1);
2370                         hci_dev_unlock(hdev);
2371                 }
2372         } else {
2373                 /* Init failed, cleanup */
2374                 flush_work(&hdev->tx_work);
2375                 flush_work(&hdev->cmd_work);
2376                 flush_work(&hdev->rx_work);
2377
2378                 skb_queue_purge(&hdev->cmd_q);
2379                 skb_queue_purge(&hdev->rx_q);
2380
2381                 if (hdev->flush)
2382                         hdev->flush(hdev);
2383
2384                 if (hdev->sent_cmd) {
2385                         kfree_skb(hdev->sent_cmd);
2386                         hdev->sent_cmd = NULL;
2387                 }
2388
2389                 hdev->close(hdev);
2390                 hdev->flags &= BIT(HCI_RAW);
2391         }
2392
2393 done:
2394         hci_req_unlock(hdev);
2395         return ret;
2396 }
2397
2398 /* ---- HCI ioctl helpers ---- */
2399
2400 int hci_dev_open(__u16 dev)
2401 {
2402         struct hci_dev *hdev;
2403         int err;
2404
2405         hdev = hci_dev_get(dev);
2406         if (!hdev)
2407                 return -ENODEV;
2408
2409         /* Devices that are marked as unconfigured can only be powered
2410          * up as user channel. Trying to bring them up as normal devices
2411          * will result into a failure. Only user channel operation is
2412          * possible.
2413          *
2414          * When this function is called for a user channel, the flag
2415          * HCI_USER_CHANNEL will be set first before attempting to
2416          * open the device.
2417          */
2418         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2419             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2420                 err = -EOPNOTSUPP;
2421                 goto done;
2422         }
2423
2424         /* We need to ensure that no other power on/off work is pending
2425          * before proceeding to call hci_dev_do_open. This is
2426          * particularly important if the setup procedure has not yet
2427          * completed.
2428          */
2429         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2430                 cancel_delayed_work(&hdev->power_off);
2431
2432         /* After this call it is guaranteed that the setup procedure
2433          * has finished. This means that error conditions like RFKILL
2434          * or no valid public or static random address apply.
2435          */
2436         flush_workqueue(hdev->req_workqueue);
2437
2438         err = hci_dev_do_open(hdev);
2439
2440 done:
2441         hci_dev_put(hdev);
2442         return err;
2443 }
2444
2445 /* This function requires the caller holds hdev->lock */
2446 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2447 {
2448         struct hci_conn_params *p;
2449
2450         list_for_each_entry(p, &hdev->le_conn_params, list)
2451                 list_del_init(&p->action);
2452
2453         BT_DBG("All LE pending actions cleared");
2454 }
2455
2456 static int hci_dev_do_close(struct hci_dev *hdev)
2457 {
2458         BT_DBG("%s %p", hdev->name, hdev);
2459
2460         cancel_delayed_work(&hdev->power_off);
2461
2462         hci_req_cancel(hdev, ENODEV);
2463         hci_req_lock(hdev);
2464
2465         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2466                 cancel_delayed_work_sync(&hdev->cmd_timer);
2467                 hci_req_unlock(hdev);
2468                 return 0;
2469         }
2470
2471         /* Flush RX and TX works */
2472         flush_work(&hdev->tx_work);
2473         flush_work(&hdev->rx_work);
2474
2475         if (hdev->discov_timeout > 0) {
2476                 cancel_delayed_work(&hdev->discov_off);
2477                 hdev->discov_timeout = 0;
2478                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2479                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2480         }
2481
2482         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2483                 cancel_delayed_work(&hdev->service_cache);
2484
2485         cancel_delayed_work_sync(&hdev->le_scan_disable);
2486
2487         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2488                 cancel_delayed_work_sync(&hdev->rpa_expired);
2489
2490         hci_dev_lock(hdev);
2491         hci_inquiry_cache_flush(hdev);
2492         hci_conn_hash_flush(hdev);
2493         hci_pend_le_actions_clear(hdev);
2494         hci_dev_unlock(hdev);
2495
2496         hci_notify(hdev, HCI_DEV_DOWN);
2497
2498         if (hdev->flush)
2499                 hdev->flush(hdev);
2500
2501         /* Reset device */
2502         skb_queue_purge(&hdev->cmd_q);
2503         atomic_set(&hdev->cmd_cnt, 1);
2504         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2505             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2506             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2507                 set_bit(HCI_INIT, &hdev->flags);
2508                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2509                 clear_bit(HCI_INIT, &hdev->flags);
2510         }
2511
2512         /* flush cmd  work */
2513         flush_work(&hdev->cmd_work);
2514
2515         /* Drop queues */
2516         skb_queue_purge(&hdev->rx_q);
2517         skb_queue_purge(&hdev->cmd_q);
2518         skb_queue_purge(&hdev->raw_q);
2519
2520         /* Drop last sent command */
2521         if (hdev->sent_cmd) {
2522                 cancel_delayed_work_sync(&hdev->cmd_timer);
2523                 kfree_skb(hdev->sent_cmd);
2524                 hdev->sent_cmd = NULL;
2525         }
2526
2527         kfree_skb(hdev->recv_evt);
2528         hdev->recv_evt = NULL;
2529
2530         /* After this point our queues are empty
2531          * and no tasks are scheduled. */
2532         hdev->close(hdev);
2533
2534         /* Clear flags */
2535         hdev->flags &= BIT(HCI_RAW);
2536         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2537
2538         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2539                 if (hdev->dev_type == HCI_BREDR) {
2540                         hci_dev_lock(hdev);
2541                         mgmt_powered(hdev, 0);
2542                         hci_dev_unlock(hdev);
2543                 }
2544         }
2545
2546         /* Controller radio is available but is currently powered down */
2547         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2548
2549         memset(hdev->eir, 0, sizeof(hdev->eir));
2550         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2551         bacpy(&hdev->random_addr, BDADDR_ANY);
2552
2553         hci_req_unlock(hdev);
2554
2555         hci_dev_put(hdev);
2556         return 0;
2557 }
2558
2559 int hci_dev_close(__u16 dev)
2560 {
2561         struct hci_dev *hdev;
2562         int err;
2563
2564         hdev = hci_dev_get(dev);
2565         if (!hdev)
2566                 return -ENODEV;
2567
2568         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2569                 err = -EBUSY;
2570                 goto done;
2571         }
2572
2573         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2574                 cancel_delayed_work(&hdev->power_off);
2575
2576         err = hci_dev_do_close(hdev);
2577
2578 done:
2579         hci_dev_put(hdev);
2580         return err;
2581 }
2582
2583 int hci_dev_reset(__u16 dev)
2584 {
2585         struct hci_dev *hdev;
2586         int ret = 0;
2587
2588         hdev = hci_dev_get(dev);
2589         if (!hdev)
2590                 return -ENODEV;
2591
2592         hci_req_lock(hdev);
2593
2594         if (!test_bit(HCI_UP, &hdev->flags)) {
2595                 ret = -ENETDOWN;
2596                 goto done;
2597         }
2598
2599         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2600                 ret = -EBUSY;
2601                 goto done;
2602         }
2603
2604         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2605                 ret = -EOPNOTSUPP;
2606                 goto done;
2607         }
2608
2609         /* Drop queues */
2610         skb_queue_purge(&hdev->rx_q);
2611         skb_queue_purge(&hdev->cmd_q);
2612
2613         hci_dev_lock(hdev);
2614         hci_inquiry_cache_flush(hdev);
2615         hci_conn_hash_flush(hdev);
2616         hci_dev_unlock(hdev);
2617
2618         if (hdev->flush)
2619                 hdev->flush(hdev);
2620
2621         atomic_set(&hdev->cmd_cnt, 1);
2622         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2623
2624         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2625
2626 done:
2627         hci_req_unlock(hdev);
2628         hci_dev_put(hdev);
2629         return ret;
2630 }
2631
2632 int hci_dev_reset_stat(__u16 dev)
2633 {
2634         struct hci_dev *hdev;
2635         int ret = 0;
2636
2637         hdev = hci_dev_get(dev);
2638         if (!hdev)
2639                 return -ENODEV;
2640
2641         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2642                 ret = -EBUSY;
2643                 goto done;
2644         }
2645
2646         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2647                 ret = -EOPNOTSUPP;
2648                 goto done;
2649         }
2650
2651         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2652
2653 done:
2654         hci_dev_put(hdev);
2655         return ret;
2656 }
2657
2658 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2659 {
2660         bool conn_changed, discov_changed;
2661
2662         BT_DBG("%s scan 0x%02x", hdev->name, scan);
2663
2664         if ((scan & SCAN_PAGE))
2665                 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2666                                                  &hdev->dev_flags);
2667         else
2668                 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2669                                                   &hdev->dev_flags);
2670
2671         if ((scan & SCAN_INQUIRY)) {
2672                 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2673                                                    &hdev->dev_flags);
2674         } else {
2675                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2676                 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2677                                                     &hdev->dev_flags);
2678         }
2679
2680         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2681                 return;
2682
2683         if (conn_changed || discov_changed) {
2684                 /* In case this was disabled through mgmt */
2685                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2686
2687                 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2688                         mgmt_update_adv_data(hdev);
2689
2690                 mgmt_new_settings(hdev);
2691         }
2692 }
2693
2694 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2695 {
2696         struct hci_dev *hdev;
2697         struct hci_dev_req dr;
2698         int err = 0;
2699
2700         if (copy_from_user(&dr, arg, sizeof(dr)))
2701                 return -EFAULT;
2702
2703         hdev = hci_dev_get(dr.dev_id);
2704         if (!hdev)
2705                 return -ENODEV;
2706
2707         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2708                 err = -EBUSY;
2709                 goto done;
2710         }
2711
2712         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2713                 err = -EOPNOTSUPP;
2714                 goto done;
2715         }
2716
2717         if (hdev->dev_type != HCI_BREDR) {
2718                 err = -EOPNOTSUPP;
2719                 goto done;
2720         }
2721
2722         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2723                 err = -EOPNOTSUPP;
2724                 goto done;
2725         }
2726
2727         switch (cmd) {
2728         case HCISETAUTH:
2729                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2730                                    HCI_INIT_TIMEOUT);
2731                 break;
2732
2733         case HCISETENCRYPT:
2734                 if (!lmp_encrypt_capable(hdev)) {
2735                         err = -EOPNOTSUPP;
2736                         break;
2737                 }
2738
2739                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2740                         /* Auth must be enabled first */
2741                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2742                                            HCI_INIT_TIMEOUT);
2743                         if (err)
2744                                 break;
2745                 }
2746
2747                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2748                                    HCI_INIT_TIMEOUT);
2749                 break;
2750
2751         case HCISETSCAN:
2752                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2753                                    HCI_INIT_TIMEOUT);
2754
2755                 /* Ensure that the connectable and discoverable states
2756                  * get correctly modified as this was a non-mgmt change.
2757                  */
2758                 if (!err)
2759                         hci_update_scan_state(hdev, dr.dev_opt);
2760                 break;
2761
2762         case HCISETLINKPOL:
2763                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2764                                    HCI_INIT_TIMEOUT);
2765                 break;
2766
2767         case HCISETLINKMODE:
2768                 hdev->link_mode = ((__u16) dr.dev_opt) &
2769                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2770                 break;
2771
2772         case HCISETPTYPE:
2773                 hdev->pkt_type = (__u16) dr.dev_opt;
2774                 break;
2775
2776         case HCISETACLMTU:
2777                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2778                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2779                 break;
2780
2781         case HCISETSCOMTU:
2782                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2783                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2784                 break;
2785
2786         default:
2787                 err = -EINVAL;
2788                 break;
2789         }
2790
2791 done:
2792         hci_dev_put(hdev);
2793         return err;
2794 }
2795
2796 int hci_get_dev_list(void __user *arg)
2797 {
2798         struct hci_dev *hdev;
2799         struct hci_dev_list_req *dl;
2800         struct hci_dev_req *dr;
2801         int n = 0, size, err;
2802         __u16 dev_num;
2803
2804         if (get_user(dev_num, (__u16 __user *) arg))
2805                 return -EFAULT;
2806
2807         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2808                 return -EINVAL;
2809
2810         size = sizeof(*dl) + dev_num * sizeof(*dr);
2811
2812         dl = kzalloc(size, GFP_KERNEL);
2813         if (!dl)
2814                 return -ENOMEM;
2815
2816         dr = dl->dev_req;
2817
2818         read_lock(&hci_dev_list_lock);
2819         list_for_each_entry(hdev, &hci_dev_list, list) {
2820                 unsigned long flags = hdev->flags;
2821
2822                 /* When the auto-off is configured it means the transport
2823                  * is running, but in that case still indicate that the
2824                  * device is actually down.
2825                  */
2826                 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2827                         flags &= ~BIT(HCI_UP);
2828
2829                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2830                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2831
2832                 (dr + n)->dev_id  = hdev->id;
2833                 (dr + n)->dev_opt = flags;
2834
2835                 if (++n >= dev_num)
2836                         break;
2837         }
2838         read_unlock(&hci_dev_list_lock);
2839
2840         dl->dev_num = n;
2841         size = sizeof(*dl) + n * sizeof(*dr);
2842
2843         err = copy_to_user(arg, dl, size);
2844         kfree(dl);
2845
2846         return err ? -EFAULT : 0;
2847 }
2848
2849 int hci_get_dev_info(void __user *arg)
2850 {
2851         struct hci_dev *hdev;
2852         struct hci_dev_info di;
2853         unsigned long flags;
2854         int err = 0;
2855
2856         if (copy_from_user(&di, arg, sizeof(di)))
2857                 return -EFAULT;
2858
2859         hdev = hci_dev_get(di.dev_id);
2860         if (!hdev)
2861                 return -ENODEV;
2862
2863         /* When the auto-off is configured it means the transport
2864          * is running, but in that case still indicate that the
2865          * device is actually down.
2866          */
2867         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2868                 flags = hdev->flags & ~BIT(HCI_UP);
2869         else
2870                 flags = hdev->flags;
2871
2872         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2873                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2874
2875         strcpy(di.name, hdev->name);
2876         di.bdaddr   = hdev->bdaddr;
2877         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2878         di.flags    = flags;
2879         di.pkt_type = hdev->pkt_type;
2880         if (lmp_bredr_capable(hdev)) {
2881                 di.acl_mtu  = hdev->acl_mtu;
2882                 di.acl_pkts = hdev->acl_pkts;
2883                 di.sco_mtu  = hdev->sco_mtu;
2884                 di.sco_pkts = hdev->sco_pkts;
2885         } else {
2886                 di.acl_mtu  = hdev->le_mtu;
2887                 di.acl_pkts = hdev->le_pkts;
2888                 di.sco_mtu  = 0;
2889                 di.sco_pkts = 0;
2890         }
2891         di.link_policy = hdev->link_policy;
2892         di.link_mode   = hdev->link_mode;
2893
2894         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2895         memcpy(&di.features, &hdev->features, sizeof(di.features));
2896
2897         if (copy_to_user(arg, &di, sizeof(di)))
2898                 err = -EFAULT;
2899
2900         hci_dev_put(hdev);
2901
2902         return err;
2903 }
2904
2905 /* ---- Interface to HCI drivers ---- */
2906
2907 static int hci_rfkill_set_block(void *data, bool blocked)
2908 {
2909         struct hci_dev *hdev = data;
2910
2911         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2912
2913         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2914                 return -EBUSY;
2915
2916         if (blocked) {
2917                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2918                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2919                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
2920                         hci_dev_do_close(hdev);
2921         } else {
2922                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2923         }
2924
2925         return 0;
2926 }
2927
2928 static const struct rfkill_ops hci_rfkill_ops = {
2929         .set_block = hci_rfkill_set_block,
2930 };
2931
2932 static void hci_power_on(struct work_struct *work)
2933 {
2934         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2935         int err;
2936
2937         BT_DBG("%s", hdev->name);
2938
2939         err = hci_dev_do_open(hdev);
2940         if (err < 0) {
2941                 mgmt_set_powered_failed(hdev, err);
2942                 return;
2943         }
2944
2945         /* During the HCI setup phase, a few error conditions are
2946          * ignored and they need to be checked now. If they are still
2947          * valid, it is important to turn the device back off.
2948          */
2949         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2950             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2951             (hdev->dev_type == HCI_BREDR &&
2952              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2953              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2954                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2955                 hci_dev_do_close(hdev);
2956         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2957                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2958                                    HCI_AUTO_OFF_TIMEOUT);
2959         }
2960
2961         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2962                 /* For unconfigured devices, set the HCI_RAW flag
2963                  * so that userspace can easily identify them.
2964                  */
2965                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2966                         set_bit(HCI_RAW, &hdev->flags);
2967
2968                 /* For fully configured devices, this will send
2969                  * the Index Added event. For unconfigured devices,
2970                  * it will send Unconfigued Index Added event.
2971                  *
2972                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2973                  * and no event will be send.
2974                  */
2975                 mgmt_index_added(hdev);
2976         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2977                 /* When the controller is now configured, then it
2978                  * is important to clear the HCI_RAW flag.
2979                  */
2980                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2981                         clear_bit(HCI_RAW, &hdev->flags);
2982
2983                 /* Powering on the controller with HCI_CONFIG set only
2984                  * happens with the transition from unconfigured to
2985                  * configured. This will send the Index Added event.
2986                  */
2987                 mgmt_index_added(hdev);
2988         }
2989 }
2990
2991 static void hci_power_off(struct work_struct *work)
2992 {
2993         struct hci_dev *hdev = container_of(work, struct hci_dev,
2994                                             power_off.work);
2995
2996         BT_DBG("%s", hdev->name);
2997
2998         hci_dev_do_close(hdev);
2999 }
3000
3001 static void hci_discov_off(struct work_struct *work)
3002 {
3003         struct hci_dev *hdev;
3004
3005         hdev = container_of(work, struct hci_dev, discov_off.work);
3006
3007         BT_DBG("%s", hdev->name);
3008
3009         mgmt_discoverable_timeout(hdev);
3010 }
3011
3012 void hci_uuids_clear(struct hci_dev *hdev)
3013 {
3014         struct bt_uuid *uuid, *tmp;
3015
3016         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3017                 list_del(&uuid->list);
3018                 kfree(uuid);
3019         }
3020 }
3021
3022 void hci_link_keys_clear(struct hci_dev *hdev)
3023 {
3024         struct list_head *p, *n;
3025
3026         list_for_each_safe(p, n, &hdev->link_keys) {
3027                 struct link_key *key;
3028
3029                 key = list_entry(p, struct link_key, list);
3030
3031                 list_del(p);
3032                 kfree(key);
3033         }
3034 }
3035
3036 void hci_smp_ltks_clear(struct hci_dev *hdev)
3037 {
3038         struct smp_ltk *k, *tmp;
3039
3040         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3041                 list_del(&k->list);
3042                 kfree(k);
3043         }
3044 }
3045
3046 void hci_smp_irks_clear(struct hci_dev *hdev)
3047 {
3048         struct smp_irk *k, *tmp;
3049
3050         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3051                 list_del(&k->list);
3052                 kfree(k);
3053         }
3054 }
3055
3056 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3057 {
3058         struct link_key *k;
3059
3060         list_for_each_entry(k, &hdev->link_keys, list)
3061                 if (bacmp(bdaddr, &k->bdaddr) == 0)
3062                         return k;
3063
3064         return NULL;
3065 }
3066
3067 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3068                                u8 key_type, u8 old_key_type)
3069 {
3070         /* Legacy key */
3071         if (key_type < 0x03)
3072                 return true;
3073
3074         /* Debug keys are insecure so don't store them persistently */
3075         if (key_type == HCI_LK_DEBUG_COMBINATION)
3076                 return false;
3077
3078         /* Changed combination key and there's no previous one */
3079         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3080                 return false;
3081
3082         /* Security mode 3 case */
3083         if (!conn)
3084                 return true;
3085
3086         /* Neither local nor remote side had no-bonding as requirement */
3087         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3088                 return true;
3089
3090         /* Local side had dedicated bonding as requirement */
3091         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3092                 return true;
3093
3094         /* Remote side had dedicated bonding as requirement */
3095         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3096                 return true;
3097
3098         /* If none of the above criteria match, then don't store the key
3099          * persistently */
3100         return false;
3101 }
3102
3103 static bool ltk_type_master(u8 type)
3104 {
3105         return (type == SMP_LTK);
3106 }
3107
3108 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3109                              bool master)
3110 {
3111         struct smp_ltk *k;
3112
3113         list_for_each_entry(k, &hdev->long_term_keys, list) {
3114                 if (k->ediv != ediv || k->rand != rand)
3115                         continue;
3116
3117                 if (ltk_type_master(k->type) != master)
3118                         continue;
3119
3120                 return k;
3121         }
3122
3123         return NULL;
3124 }
3125
3126 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3127                                      u8 addr_type, bool master)
3128 {
3129         struct smp_ltk *k;
3130
3131         list_for_each_entry(k, &hdev->long_term_keys, list)
3132                 if (addr_type == k->bdaddr_type &&
3133                     bacmp(bdaddr, &k->bdaddr) == 0 &&
3134                     ltk_type_master(k->type) == master)
3135                         return k;
3136
3137         return NULL;
3138 }
3139
3140 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3141 {
3142         struct smp_irk *irk;
3143
3144         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3145                 if (!bacmp(&irk->rpa, rpa))
3146                         return irk;
3147         }
3148
3149         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3150                 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3151                         bacpy(&irk->rpa, rpa);
3152                         return irk;
3153                 }
3154         }
3155
3156         return NULL;
3157 }
3158
3159 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3160                                      u8 addr_type)
3161 {
3162         struct smp_irk *irk;
3163
3164         /* Identity Address must be public or static random */
3165         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3166                 return NULL;
3167
3168         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3169                 if (addr_type == irk->addr_type &&
3170                     bacmp(bdaddr, &irk->bdaddr) == 0)
3171                         return irk;
3172         }
3173
3174         return NULL;
3175 }
3176
3177 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3178                                   bdaddr_t *bdaddr, u8 *val, u8 type,
3179                                   u8 pin_len, bool *persistent)
3180 {
3181         struct link_key *key, *old_key;
3182         u8 old_key_type;
3183
3184         old_key = hci_find_link_key(hdev, bdaddr);
3185         if (old_key) {
3186                 old_key_type = old_key->type;
3187                 key = old_key;
3188         } else {
3189                 old_key_type = conn ? conn->key_type : 0xff;
3190                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3191                 if (!key)
3192                         return NULL;
3193                 list_add(&key->list, &hdev->link_keys);
3194         }
3195
3196         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3197
3198         /* Some buggy controller combinations generate a changed
3199          * combination key for legacy pairing even when there's no
3200          * previous key */
3201         if (type == HCI_LK_CHANGED_COMBINATION &&
3202             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3203                 type = HCI_LK_COMBINATION;
3204                 if (conn)
3205                         conn->key_type = type;
3206         }
3207
3208         bacpy(&key->bdaddr, bdaddr);
3209         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3210         key->pin_len = pin_len;
3211
3212         if (type == HCI_LK_CHANGED_COMBINATION)
3213                 key->type = old_key_type;
3214         else
3215                 key->type = type;
3216
3217         if (persistent)
3218                 *persistent = hci_persistent_key(hdev, conn, type,
3219                                                  old_key_type);
3220
3221         return key;
3222 }
3223
3224 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3225                             u8 addr_type, u8 type, u8 authenticated,
3226                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3227 {
3228         struct smp_ltk *key, *old_key;
3229         bool master = ltk_type_master(type);
3230
3231         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3232         if (old_key)
3233                 key = old_key;
3234         else {
3235                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3236                 if (!key)
3237                         return NULL;
3238                 list_add(&key->list, &hdev->long_term_keys);
3239         }
3240
3241         bacpy(&key->bdaddr, bdaddr);
3242         key->bdaddr_type = addr_type;
3243         memcpy(key->val, tk, sizeof(key->val));
3244         key->authenticated = authenticated;
3245         key->ediv = ediv;
3246         key->rand = rand;
3247         key->enc_size = enc_size;
3248         key->type = type;
3249
3250         return key;
3251 }
3252
3253 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3254                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3255 {
3256         struct smp_irk *irk;
3257
3258         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3259         if (!irk) {
3260                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3261                 if (!irk)
3262                         return NULL;
3263
3264                 bacpy(&irk->bdaddr, bdaddr);
3265                 irk->addr_type = addr_type;
3266
3267                 list_add(&irk->list, &hdev->identity_resolving_keys);
3268         }
3269
3270         memcpy(irk->val, val, 16);
3271         bacpy(&irk->rpa, rpa);
3272
3273         return irk;
3274 }
3275
3276 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3277 {
3278         struct link_key *key;
3279
3280         key = hci_find_link_key(hdev, bdaddr);
3281         if (!key)
3282                 return -ENOENT;
3283
3284         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3285
3286         list_del(&key->list);
3287         kfree(key);
3288
3289         return 0;
3290 }
3291
3292 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3293 {
3294         struct smp_ltk *k, *tmp;
3295         int removed = 0;
3296
3297         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3298                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3299                         continue;
3300
3301                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3302
3303                 list_del(&k->list);
3304                 kfree(k);
3305                 removed++;
3306         }
3307
3308         return removed ? 0 : -ENOENT;
3309 }
3310
3311 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3312 {
3313         struct smp_irk *k, *tmp;
3314
3315         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3316                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3317                         continue;
3318
3319                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3320
3321                 list_del(&k->list);
3322                 kfree(k);
3323         }
3324 }
3325
3326 /* HCI command timer function */
3327 static void hci_cmd_timeout(struct work_struct *work)
3328 {
3329         struct hci_dev *hdev = container_of(work, struct hci_dev,
3330                                             cmd_timer.work);
3331
3332         if (hdev->sent_cmd) {
3333                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3334                 u16 opcode = __le16_to_cpu(sent->opcode);
3335
3336                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3337         } else {
3338                 BT_ERR("%s command tx timeout", hdev->name);
3339         }
3340
3341         atomic_set(&hdev->cmd_cnt, 1);
3342         queue_work(hdev->workqueue, &hdev->cmd_work);
3343 }
3344
3345 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3346                                           bdaddr_t *bdaddr)
3347 {
3348         struct oob_data *data;
3349
3350         list_for_each_entry(data, &hdev->remote_oob_data, list)
3351                 if (bacmp(bdaddr, &data->bdaddr) == 0)
3352                         return data;
3353
3354         return NULL;
3355 }
3356
3357 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3358 {
3359         struct oob_data *data;
3360
3361         data = hci_find_remote_oob_data(hdev, bdaddr);
3362         if (!data)
3363                 return -ENOENT;
3364
3365         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3366
3367         list_del(&data->list);
3368         kfree(data);
3369
3370         return 0;
3371 }
3372
3373 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3374 {
3375         struct oob_data *data, *n;
3376
3377         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3378                 list_del(&data->list);
3379                 kfree(data);
3380         }
3381 }
3382
3383 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3384                             u8 *hash, u8 *randomizer)
3385 {
3386         struct oob_data *data;
3387
3388         data = hci_find_remote_oob_data(hdev, bdaddr);
3389         if (!data) {
3390                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3391                 if (!data)
3392                         return -ENOMEM;
3393
3394                 bacpy(&data->bdaddr, bdaddr);
3395                 list_add(&data->list, &hdev->remote_oob_data);
3396         }
3397
3398         memcpy(data->hash192, hash, sizeof(data->hash192));
3399         memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3400
3401         memset(data->hash256, 0, sizeof(data->hash256));
3402         memset(data->randomizer256, 0, sizeof(data->randomizer256));
3403
3404         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3405
3406         return 0;
3407 }
3408
3409 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3410                                 u8 *hash192, u8 *randomizer192,
3411                                 u8 *hash256, u8 *randomizer256)
3412 {
3413         struct oob_data *data;
3414
3415         data = hci_find_remote_oob_data(hdev, bdaddr);
3416         if (!data) {
3417                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3418                 if (!data)
3419                         return -ENOMEM;
3420
3421                 bacpy(&data->bdaddr, bdaddr);
3422                 list_add(&data->list, &hdev->remote_oob_data);
3423         }
3424
3425         memcpy(data->hash192, hash192, sizeof(data->hash192));
3426         memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3427
3428         memcpy(data->hash256, hash256, sizeof(data->hash256));
3429         memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3430
3431         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3432
3433         return 0;
3434 }
3435
3436 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3437                                          bdaddr_t *bdaddr, u8 type)
3438 {
3439         struct bdaddr_list *b;
3440
3441         list_for_each_entry(b, bdaddr_list, list) {
3442                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3443                         return b;
3444         }
3445
3446         return NULL;
3447 }
3448
3449 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3450 {
3451         struct list_head *p, *n;
3452
3453         list_for_each_safe(p, n, bdaddr_list) {
3454                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3455
3456                 list_del(p);
3457                 kfree(b);
3458         }
3459 }
3460
3461 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3462 {
3463         struct bdaddr_list *entry;
3464
3465         if (!bacmp(bdaddr, BDADDR_ANY))
3466                 return -EBADF;
3467
3468         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3469                 return -EEXIST;
3470
3471         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3472         if (!entry)
3473                 return -ENOMEM;
3474
3475         bacpy(&entry->bdaddr, bdaddr);
3476         entry->bdaddr_type = type;
3477
3478         list_add(&entry->list, list);
3479
3480         return 0;
3481 }
3482
3483 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3484 {
3485         struct bdaddr_list *entry;
3486
3487         if (!bacmp(bdaddr, BDADDR_ANY)) {
3488                 hci_bdaddr_list_clear(list);
3489                 return 0;
3490         }
3491
3492         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3493         if (!entry)
3494                 return -ENOENT;
3495
3496         list_del(&entry->list);
3497         kfree(entry);
3498
3499         return 0;
3500 }
3501
3502 /* This function requires the caller holds hdev->lock */
3503 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3504                                                bdaddr_t *addr, u8 addr_type)
3505 {
3506         struct hci_conn_params *params;
3507
3508         /* The conn params list only contains identity addresses */
3509         if (!hci_is_identity_address(addr, addr_type))
3510                 return NULL;
3511
3512         list_for_each_entry(params, &hdev->le_conn_params, list) {
3513                 if (bacmp(&params->addr, addr) == 0 &&
3514                     params->addr_type == addr_type) {
3515                         return params;
3516                 }
3517         }
3518
3519         return NULL;
3520 }
3521
3522 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3523 {
3524         struct hci_conn *conn;
3525
3526         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3527         if (!conn)
3528                 return false;
3529
3530         if (conn->dst_type != type)
3531                 return false;
3532
3533         if (conn->state != BT_CONNECTED)
3534                 return false;
3535
3536         return true;
3537 }
3538
3539 /* This function requires the caller holds hdev->lock */
3540 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3541                                                   bdaddr_t *addr, u8 addr_type)
3542 {
3543         struct hci_conn_params *param;
3544
3545         /* The list only contains identity addresses */
3546         if (!hci_is_identity_address(addr, addr_type))
3547                 return NULL;
3548
3549         list_for_each_entry(param, list, action) {
3550                 if (bacmp(&param->addr, addr) == 0 &&
3551                     param->addr_type == addr_type)
3552                         return param;
3553         }
3554
3555         return NULL;
3556 }
3557
3558 /* This function requires the caller holds hdev->lock */
3559 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3560                                             bdaddr_t *addr, u8 addr_type)
3561 {
3562         struct hci_conn_params *params;
3563
3564         if (!hci_is_identity_address(addr, addr_type))
3565                 return NULL;
3566
3567         params = hci_conn_params_lookup(hdev, addr, addr_type);
3568         if (params)
3569                 return params;
3570
3571         params = kzalloc(sizeof(*params), GFP_KERNEL);
3572         if (!params) {
3573                 BT_ERR("Out of memory");
3574                 return NULL;
3575         }
3576
3577         bacpy(&params->addr, addr);
3578         params->addr_type = addr_type;
3579
3580         list_add(&params->list, &hdev->le_conn_params);
3581         INIT_LIST_HEAD(&params->action);
3582
3583         params->conn_min_interval = hdev->le_conn_min_interval;
3584         params->conn_max_interval = hdev->le_conn_max_interval;
3585         params->conn_latency = hdev->le_conn_latency;
3586         params->supervision_timeout = hdev->le_supv_timeout;
3587         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3588
3589         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3590
3591         return params;
3592 }
3593
3594 /* This function requires the caller holds hdev->lock */
3595 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3596                         u8 auto_connect)
3597 {
3598         struct hci_conn_params *params;
3599
3600         params = hci_conn_params_add(hdev, addr, addr_type);
3601         if (!params)
3602                 return -EIO;
3603
3604         if (params->auto_connect == auto_connect)
3605                 return 0;
3606
3607         list_del_init(&params->action);
3608
3609         switch (auto_connect) {
3610         case HCI_AUTO_CONN_DISABLED:
3611         case HCI_AUTO_CONN_LINK_LOSS:
3612                 hci_update_background_scan(hdev);
3613                 break;
3614         case HCI_AUTO_CONN_REPORT:
3615                 list_add(&params->action, &hdev->pend_le_reports);
3616                 hci_update_background_scan(hdev);
3617                 break;
3618         case HCI_AUTO_CONN_ALWAYS:
3619                 if (!is_connected(hdev, addr, addr_type)) {
3620                         list_add(&params->action, &hdev->pend_le_conns);
3621                         hci_update_background_scan(hdev);
3622                 }
3623                 break;
3624         }
3625
3626         params->auto_connect = auto_connect;
3627
3628         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3629                auto_connect);
3630
3631         return 0;
3632 }
3633
3634 /* This function requires the caller holds hdev->lock */
3635 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3636 {
3637         struct hci_conn_params *params;
3638
3639         params = hci_conn_params_lookup(hdev, addr, addr_type);
3640         if (!params)
3641                 return;
3642
3643         list_del(&params->action);
3644         list_del(&params->list);
3645         kfree(params);
3646
3647         hci_update_background_scan(hdev);
3648
3649         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3650 }
3651
3652 /* This function requires the caller holds hdev->lock */
3653 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3654 {
3655         struct hci_conn_params *params, *tmp;
3656
3657         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3658                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3659                         continue;
3660                 list_del(&params->list);
3661                 kfree(params);
3662         }
3663
3664         BT_DBG("All LE disabled connection parameters were removed");
3665 }
3666
3667 /* This function requires the caller holds hdev->lock */
3668 void hci_conn_params_clear_all(struct hci_dev *hdev)
3669 {
3670         struct hci_conn_params *params, *tmp;
3671
3672         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3673                 list_del(&params->action);
3674                 list_del(&params->list);
3675                 kfree(params);
3676         }
3677
3678         hci_update_background_scan(hdev);
3679
3680         BT_DBG("All LE connection parameters were removed");
3681 }
3682
3683 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3684 {
3685         if (status) {
3686                 BT_ERR("Failed to start inquiry: status %d", status);
3687
3688                 hci_dev_lock(hdev);
3689                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3690                 hci_dev_unlock(hdev);
3691                 return;
3692         }
3693 }
3694
3695 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3696 {
3697         /* General inquiry access code (GIAC) */
3698         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3699         struct hci_request req;
3700         struct hci_cp_inquiry cp;
3701         int err;
3702
3703         if (status) {
3704                 BT_ERR("Failed to disable LE scanning: status %d", status);
3705                 return;
3706         }
3707
3708         switch (hdev->discovery.type) {
3709         case DISCOV_TYPE_LE:
3710                 hci_dev_lock(hdev);
3711                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3712                 hci_dev_unlock(hdev);
3713                 break;
3714
3715         case DISCOV_TYPE_INTERLEAVED:
3716                 hci_req_init(&req, hdev);
3717
3718                 memset(&cp, 0, sizeof(cp));
3719                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3720                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3721                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3722
3723                 hci_dev_lock(hdev);
3724
3725                 hci_inquiry_cache_flush(hdev);
3726
3727                 err = hci_req_run(&req, inquiry_complete);
3728                 if (err) {
3729                         BT_ERR("Inquiry request failed: err %d", err);
3730                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3731                 }
3732
3733                 hci_dev_unlock(hdev);
3734                 break;
3735         }
3736 }
3737
3738 static void le_scan_disable_work(struct work_struct *work)
3739 {
3740         struct hci_dev *hdev = container_of(work, struct hci_dev,
3741                                             le_scan_disable.work);
3742         struct hci_request req;
3743         int err;
3744
3745         BT_DBG("%s", hdev->name);
3746
3747         hci_req_init(&req, hdev);
3748
3749         hci_req_add_le_scan_disable(&req);
3750
3751         err = hci_req_run(&req, le_scan_disable_work_complete);
3752         if (err)
3753                 BT_ERR("Disable LE scanning request failed: err %d", err);
3754 }
3755
3756 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3757 {
3758         struct hci_dev *hdev = req->hdev;
3759
3760         /* If we're advertising or initiating an LE connection we can't
3761          * go ahead and change the random address at this time. This is
3762          * because the eventual initiator address used for the
3763          * subsequently created connection will be undefined (some
3764          * controllers use the new address and others the one we had
3765          * when the operation started).
3766          *
3767          * In this kind of scenario skip the update and let the random
3768          * address be updated at the next cycle.
3769          */
3770         if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3771             hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3772                 BT_DBG("Deferring random address update");
3773                 return;
3774         }
3775
3776         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3777 }
3778
3779 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3780                               u8 *own_addr_type)
3781 {
3782         struct hci_dev *hdev = req->hdev;
3783         int err;
3784
3785         /* If privacy is enabled use a resolvable private address. If
3786          * current RPA has expired or there is something else than
3787          * the current RPA in use, then generate a new one.
3788          */
3789         if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3790                 int to;
3791
3792                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3793
3794                 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3795                     !bacmp(&hdev->random_addr, &hdev->rpa))
3796                         return 0;
3797
3798                 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3799                 if (err < 0) {
3800                         BT_ERR("%s failed to generate new RPA", hdev->name);
3801                         return err;
3802                 }
3803
3804                 set_random_addr(req, &hdev->rpa);
3805
3806                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3807                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3808
3809                 return 0;
3810         }
3811
3812         /* In case of required privacy without resolvable private address,
3813          * use an unresolvable private address. This is useful for active
3814          * scanning and non-connectable advertising.
3815          */
3816         if (require_privacy) {
3817                 bdaddr_t urpa;
3818
3819                 get_random_bytes(&urpa, 6);
3820                 urpa.b[5] &= 0x3f;      /* Clear two most significant bits */
3821
3822                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3823                 set_random_addr(req, &urpa);
3824                 return 0;
3825         }
3826
3827         /* If forcing static address is in use or there is no public
3828          * address use the static address as random address (but skip
3829          * the HCI command if the current random address is already the
3830          * static one.
3831          */
3832         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3833             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3834                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3835                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3836                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3837                                     &hdev->static_addr);
3838                 return 0;
3839         }
3840
3841         /* Neither privacy nor static address is being used so use a
3842          * public address.
3843          */
3844         *own_addr_type = ADDR_LE_DEV_PUBLIC;
3845
3846         return 0;
3847 }
3848
3849 /* Copy the Identity Address of the controller.
3850  *
3851  * If the controller has a public BD_ADDR, then by default use that one.
3852  * If this is a LE only controller without a public address, default to
3853  * the static random address.
3854  *
3855  * For debugging purposes it is possible to force controllers with a
3856  * public address to use the static random address instead.
3857  */
3858 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3859                                u8 *bdaddr_type)
3860 {
3861         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3862             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3863                 bacpy(bdaddr, &hdev->static_addr);
3864                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3865         } else {
3866                 bacpy(bdaddr, &hdev->bdaddr);
3867                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3868         }
3869 }
3870
3871 /* Alloc HCI device */
3872 struct hci_dev *hci_alloc_dev(void)
3873 {
3874         struct hci_dev *hdev;
3875
3876         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3877         if (!hdev)
3878                 return NULL;
3879
3880         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3881         hdev->esco_type = (ESCO_HV1);
3882         hdev->link_mode = (HCI_LM_ACCEPT);
3883         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3884         hdev->io_capability = 0x03;     /* No Input No Output */
3885         hdev->manufacturer = 0xffff;    /* Default to internal use */
3886         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3887         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3888
3889         hdev->sniff_max_interval = 800;
3890         hdev->sniff_min_interval = 80;
3891
3892         hdev->le_adv_channel_map = 0x07;
3893         hdev->le_scan_interval = 0x0060;
3894         hdev->le_scan_window = 0x0030;
3895         hdev->le_conn_min_interval = 0x0028;
3896         hdev->le_conn_max_interval = 0x0038;
3897         hdev->le_conn_latency = 0x0000;
3898         hdev->le_supv_timeout = 0x002a;
3899
3900         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3901         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3902         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3903         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3904
3905         mutex_init(&hdev->lock);
3906         mutex_init(&hdev->req_lock);
3907
3908         INIT_LIST_HEAD(&hdev->mgmt_pending);
3909         INIT_LIST_HEAD(&hdev->blacklist);
3910         INIT_LIST_HEAD(&hdev->whitelist);
3911         INIT_LIST_HEAD(&hdev->uuids);
3912         INIT_LIST_HEAD(&hdev->link_keys);
3913         INIT_LIST_HEAD(&hdev->long_term_keys);
3914         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3915         INIT_LIST_HEAD(&hdev->remote_oob_data);
3916         INIT_LIST_HEAD(&hdev->le_white_list);
3917         INIT_LIST_HEAD(&hdev->le_conn_params);
3918         INIT_LIST_HEAD(&hdev->pend_le_conns);
3919         INIT_LIST_HEAD(&hdev->pend_le_reports);
3920         INIT_LIST_HEAD(&hdev->conn_hash.list);
3921
3922         INIT_WORK(&hdev->rx_work, hci_rx_work);
3923         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3924         INIT_WORK(&hdev->tx_work, hci_tx_work);
3925         INIT_WORK(&hdev->power_on, hci_power_on);
3926
3927         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3928         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3929         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3930
3931         skb_queue_head_init(&hdev->rx_q);
3932         skb_queue_head_init(&hdev->cmd_q);
3933         skb_queue_head_init(&hdev->raw_q);
3934
3935         init_waitqueue_head(&hdev->req_wait_q);
3936
3937         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3938
3939         hci_init_sysfs(hdev);
3940         discovery_init(hdev);
3941
3942         return hdev;
3943 }
3944 EXPORT_SYMBOL(hci_alloc_dev);
3945
3946 /* Free HCI device */
3947 void hci_free_dev(struct hci_dev *hdev)
3948 {
3949         /* will free via device release */
3950         put_device(&hdev->dev);
3951 }
3952 EXPORT_SYMBOL(hci_free_dev);
3953
3954 /* Register HCI device */
3955 int hci_register_dev(struct hci_dev *hdev)
3956 {
3957         int id, error;
3958
3959         if (!hdev->open || !hdev->close || !hdev->send)
3960                 return -EINVAL;
3961
3962         /* Do not allow HCI_AMP devices to register at index 0,
3963          * so the index can be used as the AMP controller ID.
3964          */
3965         switch (hdev->dev_type) {
3966         case HCI_BREDR:
3967                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3968                 break;
3969         case HCI_AMP:
3970                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3971                 break;
3972         default:
3973                 return -EINVAL;
3974         }
3975
3976         if (id < 0)
3977                 return id;
3978
3979         sprintf(hdev->name, "hci%d", id);
3980         hdev->id = id;
3981
3982         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3983
3984         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3985                                           WQ_MEM_RECLAIM, 1, hdev->name);
3986         if (!hdev->workqueue) {
3987                 error = -ENOMEM;
3988                 goto err;
3989         }
3990
3991         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3992                                               WQ_MEM_RECLAIM, 1, hdev->name);
3993         if (!hdev->req_workqueue) {
3994                 destroy_workqueue(hdev->workqueue);
3995                 error = -ENOMEM;
3996                 goto err;
3997         }
3998
3999         if (!IS_ERR_OR_NULL(bt_debugfs))
4000                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4001
4002         dev_set_name(&hdev->dev, "%s", hdev->name);
4003
4004         hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
4005                                                CRYPTO_ALG_ASYNC);
4006         if (IS_ERR(hdev->tfm_aes)) {
4007                 BT_ERR("Unable to create crypto context");
4008                 error = PTR_ERR(hdev->tfm_aes);
4009                 hdev->tfm_aes = NULL;
4010                 goto err_wqueue;
4011         }
4012
4013         error = device_add(&hdev->dev);
4014         if (error < 0)
4015                 goto err_tfm;
4016
4017         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4018                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4019                                     hdev);
4020         if (hdev->rfkill) {
4021                 if (rfkill_register(hdev->rfkill) < 0) {
4022                         rfkill_destroy(hdev->rfkill);
4023                         hdev->rfkill = NULL;
4024                 }
4025         }
4026
4027         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4028                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4029
4030         set_bit(HCI_SETUP, &hdev->dev_flags);
4031         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4032
4033         if (hdev->dev_type == HCI_BREDR) {
4034                 /* Assume BR/EDR support until proven otherwise (such as
4035                  * through reading supported features during init.
4036                  */
4037                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4038         }
4039
4040         write_lock(&hci_dev_list_lock);
4041         list_add(&hdev->list, &hci_dev_list);
4042         write_unlock(&hci_dev_list_lock);
4043
4044         /* Devices that are marked for raw-only usage are unconfigured
4045          * and should not be included in normal operation.
4046          */
4047         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4048                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4049
4050         hci_notify(hdev, HCI_DEV_REG);
4051         hci_dev_hold(hdev);
4052
4053         queue_work(hdev->req_workqueue, &hdev->power_on);
4054
4055         return id;
4056
4057 err_tfm:
4058         crypto_free_blkcipher(hdev->tfm_aes);
4059 err_wqueue:
4060         destroy_workqueue(hdev->workqueue);
4061         destroy_workqueue(hdev->req_workqueue);
4062 err:
4063         ida_simple_remove(&hci_index_ida, hdev->id);
4064
4065         return error;
4066 }
4067 EXPORT_SYMBOL(hci_register_dev);
4068
4069 /* Unregister HCI device */
4070 void hci_unregister_dev(struct hci_dev *hdev)
4071 {
4072         int i, id;
4073
4074         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4075
4076         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4077
4078         id = hdev->id;
4079
4080         write_lock(&hci_dev_list_lock);
4081         list_del(&hdev->list);
4082         write_unlock(&hci_dev_list_lock);
4083
4084         hci_dev_do_close(hdev);
4085
4086         for (i = 0; i < NUM_REASSEMBLY; i++)
4087                 kfree_skb(hdev->reassembly[i]);
4088
4089         cancel_work_sync(&hdev->power_on);
4090
4091         if (!test_bit(HCI_INIT, &hdev->flags) &&
4092             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4093             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4094                 hci_dev_lock(hdev);
4095                 mgmt_index_removed(hdev);
4096                 hci_dev_unlock(hdev);
4097         }
4098
4099         /* mgmt_index_removed should take care of emptying the
4100          * pending list */
4101         BUG_ON(!list_empty(&hdev->mgmt_pending));
4102
4103         hci_notify(hdev, HCI_DEV_UNREG);
4104
4105         if (hdev->rfkill) {
4106                 rfkill_unregister(hdev->rfkill);
4107                 rfkill_destroy(hdev->rfkill);
4108         }
4109
4110         if (hdev->tfm_aes)
4111                 crypto_free_blkcipher(hdev->tfm_aes);
4112
4113         device_del(&hdev->dev);
4114
4115         debugfs_remove_recursive(hdev->debugfs);
4116
4117         destroy_workqueue(hdev->workqueue);
4118         destroy_workqueue(hdev->req_workqueue);
4119
4120         hci_dev_lock(hdev);
4121         hci_bdaddr_list_clear(&hdev->blacklist);
4122         hci_bdaddr_list_clear(&hdev->whitelist);
4123         hci_uuids_clear(hdev);
4124         hci_link_keys_clear(hdev);
4125         hci_smp_ltks_clear(hdev);
4126         hci_smp_irks_clear(hdev);
4127         hci_remote_oob_data_clear(hdev);
4128         hci_bdaddr_list_clear(&hdev->le_white_list);
4129         hci_conn_params_clear_all(hdev);
4130         hci_dev_unlock(hdev);
4131
4132         hci_dev_put(hdev);
4133
4134         ida_simple_remove(&hci_index_ida, id);
4135 }
4136 EXPORT_SYMBOL(hci_unregister_dev);
4137
4138 /* Suspend HCI device */
4139 int hci_suspend_dev(struct hci_dev *hdev)
4140 {
4141         hci_notify(hdev, HCI_DEV_SUSPEND);
4142         return 0;
4143 }
4144 EXPORT_SYMBOL(hci_suspend_dev);
4145
4146 /* Resume HCI device */
4147 int hci_resume_dev(struct hci_dev *hdev)
4148 {
4149         hci_notify(hdev, HCI_DEV_RESUME);
4150         return 0;
4151 }
4152 EXPORT_SYMBOL(hci_resume_dev);
4153
4154 /* Receive frame from HCI drivers */
4155 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4156 {
4157         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4158                       && !test_bit(HCI_INIT, &hdev->flags))) {
4159                 kfree_skb(skb);
4160                 return -ENXIO;
4161         }
4162
4163         /* Incoming skb */
4164         bt_cb(skb)->incoming = 1;
4165
4166         /* Time stamp */
4167         __net_timestamp(skb);
4168
4169         skb_queue_tail(&hdev->rx_q, skb);
4170         queue_work(hdev->workqueue, &hdev->rx_work);
4171
4172         return 0;
4173 }
4174 EXPORT_SYMBOL(hci_recv_frame);
4175
4176 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4177                           int count, __u8 index)
4178 {
4179         int len = 0;
4180         int hlen = 0;
4181         int remain = count;
4182         struct sk_buff *skb;
4183         struct bt_skb_cb *scb;
4184
4185         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4186             index >= NUM_REASSEMBLY)
4187                 return -EILSEQ;
4188
4189         skb = hdev->reassembly[index];
4190
4191         if (!skb) {
4192                 switch (type) {
4193                 case HCI_ACLDATA_PKT:
4194                         len = HCI_MAX_FRAME_SIZE;
4195                         hlen = HCI_ACL_HDR_SIZE;
4196                         break;
4197                 case HCI_EVENT_PKT:
4198                         len = HCI_MAX_EVENT_SIZE;
4199                         hlen = HCI_EVENT_HDR_SIZE;
4200                         break;
4201                 case HCI_SCODATA_PKT:
4202                         len = HCI_MAX_SCO_SIZE;
4203                         hlen = HCI_SCO_HDR_SIZE;
4204                         break;
4205                 }
4206
4207                 skb = bt_skb_alloc(len, GFP_ATOMIC);
4208                 if (!skb)
4209                         return -ENOMEM;
4210
4211                 scb = (void *) skb->cb;
4212                 scb->expect = hlen;
4213                 scb->pkt_type = type;
4214
4215                 hdev->reassembly[index] = skb;
4216         }
4217
4218         while (count) {
4219                 scb = (void *) skb->cb;
4220                 len = min_t(uint, scb->expect, count);
4221
4222                 memcpy(skb_put(skb, len), data, len);
4223
4224                 count -= len;
4225                 data += len;
4226                 scb->expect -= len;
4227                 remain = count;
4228
4229                 switch (type) {
4230                 case HCI_EVENT_PKT:
4231                         if (skb->len == HCI_EVENT_HDR_SIZE) {
4232                                 struct hci_event_hdr *h = hci_event_hdr(skb);
4233                                 scb->expect = h->plen;
4234
4235                                 if (skb_tailroom(skb) < scb->expect) {
4236                                         kfree_skb(skb);
4237                                         hdev->reassembly[index] = NULL;
4238                                         return -ENOMEM;
4239                                 }
4240                         }
4241                         break;
4242
4243                 case HCI_ACLDATA_PKT:
4244                         if (skb->len  == HCI_ACL_HDR_SIZE) {
4245                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4246                                 scb->expect = __le16_to_cpu(h->dlen);
4247
4248                                 if (skb_tailroom(skb) < scb->expect) {
4249                                         kfree_skb(skb);
4250                                         hdev->reassembly[index] = NULL;
4251                                         return -ENOMEM;
4252                                 }
4253                         }
4254                         break;
4255
4256                 case HCI_SCODATA_PKT:
4257                         if (skb->len == HCI_SCO_HDR_SIZE) {
4258                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4259                                 scb->expect = h->dlen;
4260
4261                                 if (skb_tailroom(skb) < scb->expect) {
4262                                         kfree_skb(skb);
4263                                         hdev->reassembly[index] = NULL;
4264                                         return -ENOMEM;
4265                                 }
4266                         }
4267                         break;
4268                 }
4269
4270                 if (scb->expect == 0) {
4271                         /* Complete frame */
4272
4273                         bt_cb(skb)->pkt_type = type;
4274                         hci_recv_frame(hdev, skb);
4275
4276                         hdev->reassembly[index] = NULL;
4277                         return remain;
4278                 }
4279         }
4280
4281         return remain;
4282 }
4283
4284 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4285 {
4286         int rem = 0;
4287
4288         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4289                 return -EILSEQ;
4290
4291         while (count) {
4292                 rem = hci_reassembly(hdev, type, data, count, type - 1);
4293                 if (rem < 0)
4294                         return rem;
4295
4296                 data += (count - rem);
4297                 count = rem;
4298         }
4299
4300         return rem;
4301 }
4302 EXPORT_SYMBOL(hci_recv_fragment);
4303
4304 #define STREAM_REASSEMBLY 0
4305
4306 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4307 {
4308         int type;
4309         int rem = 0;
4310
4311         while (count) {
4312                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4313
4314                 if (!skb) {
4315                         struct { char type; } *pkt;
4316
4317                         /* Start of the frame */
4318                         pkt = data;
4319                         type = pkt->type;
4320
4321                         data++;
4322                         count--;
4323                 } else
4324                         type = bt_cb(skb)->pkt_type;
4325
4326                 rem = hci_reassembly(hdev, type, data, count,
4327                                      STREAM_REASSEMBLY);
4328                 if (rem < 0)
4329                         return rem;
4330
4331                 data += (count - rem);
4332                 count = rem;
4333         }
4334
4335         return rem;
4336 }
4337 EXPORT_SYMBOL(hci_recv_stream_fragment);
4338
4339 /* ---- Interface to upper protocols ---- */
4340
4341 int hci_register_cb(struct hci_cb *cb)
4342 {
4343         BT_DBG("%p name %s", cb, cb->name);
4344
4345         write_lock(&hci_cb_list_lock);
4346         list_add(&cb->list, &hci_cb_list);
4347         write_unlock(&hci_cb_list_lock);
4348
4349         return 0;
4350 }
4351 EXPORT_SYMBOL(hci_register_cb);
4352
4353 int hci_unregister_cb(struct hci_cb *cb)
4354 {
4355         BT_DBG("%p name %s", cb, cb->name);
4356
4357         write_lock(&hci_cb_list_lock);
4358         list_del(&cb->list);
4359         write_unlock(&hci_cb_list_lock);
4360
4361         return 0;
4362 }
4363 EXPORT_SYMBOL(hci_unregister_cb);
4364
4365 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4366 {
4367         int err;
4368
4369         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4370
4371         /* Time stamp */
4372         __net_timestamp(skb);
4373
4374         /* Send copy to monitor */
4375         hci_send_to_monitor(hdev, skb);
4376
4377         if (atomic_read(&hdev->promisc)) {
4378                 /* Send copy to the sockets */
4379                 hci_send_to_sock(hdev, skb);
4380         }
4381
4382         /* Get rid of skb owner, prior to sending to the driver. */
4383         skb_orphan(skb);
4384
4385         err = hdev->send(hdev, skb);
4386         if (err < 0) {
4387                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4388                 kfree_skb(skb);
4389         }
4390 }
4391
4392 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4393 {
4394         skb_queue_head_init(&req->cmd_q);
4395         req->hdev = hdev;
4396         req->err = 0;
4397 }
4398
4399 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4400 {
4401         struct hci_dev *hdev = req->hdev;
4402         struct sk_buff *skb;
4403         unsigned long flags;
4404
4405         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4406
4407         /* If an error occured during request building, remove all HCI
4408          * commands queued on the HCI request queue.
4409          */
4410         if (req->err) {
4411                 skb_queue_purge(&req->cmd_q);
4412                 return req->err;
4413         }
4414
4415         /* Do not allow empty requests */
4416         if (skb_queue_empty(&req->cmd_q))
4417                 return -ENODATA;
4418
4419         skb = skb_peek_tail(&req->cmd_q);
4420         bt_cb(skb)->req.complete = complete;
4421
4422         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4423         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4424         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4425
4426         queue_work(hdev->workqueue, &hdev->cmd_work);
4427
4428         return 0;
4429 }
4430
4431 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4432                                        u32 plen, const void *param)
4433 {
4434         int len = HCI_COMMAND_HDR_SIZE + plen;
4435         struct hci_command_hdr *hdr;
4436         struct sk_buff *skb;
4437
4438         skb = bt_skb_alloc(len, GFP_ATOMIC);
4439         if (!skb)
4440                 return NULL;
4441
4442         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4443         hdr->opcode = cpu_to_le16(opcode);
4444         hdr->plen   = plen;
4445
4446         if (plen)
4447                 memcpy(skb_put(skb, plen), param, plen);
4448
4449         BT_DBG("skb len %d", skb->len);
4450
4451         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4452
4453         return skb;
4454 }
4455
4456 /* Send HCI command */
4457 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4458                  const void *param)
4459 {
4460         struct sk_buff *skb;
4461
4462         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4463
4464         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4465         if (!skb) {
4466                 BT_ERR("%s no memory for command", hdev->name);
4467                 return -ENOMEM;
4468         }
4469
4470         /* Stand-alone HCI commands must be flaged as
4471          * single-command requests.
4472          */
4473         bt_cb(skb)->req.start = true;
4474
4475         skb_queue_tail(&hdev->cmd_q, skb);
4476         queue_work(hdev->workqueue, &hdev->cmd_work);
4477
4478         return 0;
4479 }
4480
4481 /* Queue a command to an asynchronous HCI request */
4482 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4483                     const void *param, u8 event)
4484 {
4485         struct hci_dev *hdev = req->hdev;
4486         struct sk_buff *skb;
4487
4488         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4489
4490         /* If an error occured during request building, there is no point in
4491          * queueing the HCI command. We can simply return.
4492          */
4493         if (req->err)
4494                 return;
4495
4496         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4497         if (!skb) {
4498                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4499                        hdev->name, opcode);
4500                 req->err = -ENOMEM;
4501                 return;
4502         }
4503
4504         if (skb_queue_empty(&req->cmd_q))
4505                 bt_cb(skb)->req.start = true;
4506
4507         bt_cb(skb)->req.event = event;
4508
4509         skb_queue_tail(&req->cmd_q, skb);
4510 }
4511
4512 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4513                  const void *param)
4514 {
4515         hci_req_add_ev(req, opcode, plen, param, 0);
4516 }
4517
4518 /* Get data from the previously sent command */
4519 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4520 {
4521         struct hci_command_hdr *hdr;
4522
4523         if (!hdev->sent_cmd)
4524                 return NULL;
4525
4526         hdr = (void *) hdev->sent_cmd->data;
4527
4528         if (hdr->opcode != cpu_to_le16(opcode))
4529                 return NULL;
4530
4531         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4532
4533         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4534 }
4535
4536 /* Send ACL data */
4537 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4538 {
4539         struct hci_acl_hdr *hdr;
4540         int len = skb->len;
4541
4542         skb_push(skb, HCI_ACL_HDR_SIZE);
4543         skb_reset_transport_header(skb);
4544         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4545         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4546         hdr->dlen   = cpu_to_le16(len);
4547 }
4548
4549 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4550                           struct sk_buff *skb, __u16 flags)
4551 {
4552         struct hci_conn *conn = chan->conn;
4553         struct hci_dev *hdev = conn->hdev;
4554         struct sk_buff *list;
4555
4556         skb->len = skb_headlen(skb);
4557         skb->data_len = 0;
4558
4559         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4560
4561         switch (hdev->dev_type) {
4562         case HCI_BREDR:
4563                 hci_add_acl_hdr(skb, conn->handle, flags);
4564                 break;
4565         case HCI_AMP:
4566                 hci_add_acl_hdr(skb, chan->handle, flags);
4567                 break;
4568         default:
4569                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4570                 return;
4571         }
4572
4573         list = skb_shinfo(skb)->frag_list;
4574         if (!list) {
4575                 /* Non fragmented */
4576                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4577
4578                 skb_queue_tail(queue, skb);
4579         } else {
4580                 /* Fragmented */
4581                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4582
4583                 skb_shinfo(skb)->frag_list = NULL;
4584
4585                 /* Queue all fragments atomically */
4586                 spin_lock(&queue->lock);
4587
4588                 __skb_queue_tail(queue, skb);
4589
4590                 flags &= ~ACL_START;
4591                 flags |= ACL_CONT;
4592                 do {
4593                         skb = list; list = list->next;
4594
4595                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4596                         hci_add_acl_hdr(skb, conn->handle, flags);
4597
4598                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4599
4600                         __skb_queue_tail(queue, skb);
4601                 } while (list);
4602
4603                 spin_unlock(&queue->lock);
4604         }
4605 }
4606
4607 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4608 {
4609         struct hci_dev *hdev = chan->conn->hdev;
4610
4611         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4612
4613         hci_queue_acl(chan, &chan->data_q, skb, flags);
4614
4615         queue_work(hdev->workqueue, &hdev->tx_work);
4616 }
4617
4618 /* Send SCO data */
4619 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4620 {
4621         struct hci_dev *hdev = conn->hdev;
4622         struct hci_sco_hdr hdr;
4623
4624         BT_DBG("%s len %d", hdev->name, skb->len);
4625
4626         hdr.handle = cpu_to_le16(conn->handle);
4627         hdr.dlen   = skb->len;
4628
4629         skb_push(skb, HCI_SCO_HDR_SIZE);
4630         skb_reset_transport_header(skb);
4631         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4632
4633         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4634
4635         skb_queue_tail(&conn->data_q, skb);
4636         queue_work(hdev->workqueue, &hdev->tx_work);
4637 }
4638
4639 /* ---- HCI TX task (outgoing data) ---- */
4640
4641 /* HCI Connection scheduler */
4642 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4643                                      int *quote)
4644 {
4645         struct hci_conn_hash *h = &hdev->conn_hash;
4646         struct hci_conn *conn = NULL, *c;
4647         unsigned int num = 0, min = ~0;
4648
4649         /* We don't have to lock device here. Connections are always
4650          * added and removed with TX task disabled. */
4651
4652         rcu_read_lock();
4653
4654         list_for_each_entry_rcu(c, &h->list, list) {
4655                 if (c->type != type || skb_queue_empty(&c->data_q))
4656                         continue;
4657
4658                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4659                         continue;
4660
4661                 num++;
4662
4663                 if (c->sent < min) {
4664                         min  = c->sent;
4665                         conn = c;
4666                 }
4667
4668                 if (hci_conn_num(hdev, type) == num)
4669                         break;
4670         }
4671
4672         rcu_read_unlock();
4673
4674         if (conn) {
4675                 int cnt, q;
4676
4677                 switch (conn->type) {
4678                 case ACL_LINK:
4679                         cnt = hdev->acl_cnt;
4680                         break;
4681                 case SCO_LINK:
4682                 case ESCO_LINK:
4683                         cnt = hdev->sco_cnt;
4684                         break;
4685                 case LE_LINK:
4686                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4687                         break;
4688                 default:
4689                         cnt = 0;
4690                         BT_ERR("Unknown link type");
4691                 }
4692
4693                 q = cnt / num;
4694                 *quote = q ? q : 1;
4695         } else
4696                 *quote = 0;
4697
4698         BT_DBG("conn %p quote %d", conn, *quote);
4699         return conn;
4700 }
4701
4702 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4703 {
4704         struct hci_conn_hash *h = &hdev->conn_hash;
4705         struct hci_conn *c;
4706
4707         BT_ERR("%s link tx timeout", hdev->name);
4708
4709         rcu_read_lock();
4710
4711         /* Kill stalled connections */
4712         list_for_each_entry_rcu(c, &h->list, list) {
4713                 if (c->type == type && c->sent) {
4714                         BT_ERR("%s killing stalled connection %pMR",
4715                                hdev->name, &c->dst);
4716                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4717                 }
4718         }
4719
4720         rcu_read_unlock();
4721 }
4722
4723 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4724                                       int *quote)
4725 {
4726         struct hci_conn_hash *h = &hdev->conn_hash;
4727         struct hci_chan *chan = NULL;
4728         unsigned int num = 0, min = ~0, cur_prio = 0;
4729         struct hci_conn *conn;
4730         int cnt, q, conn_num = 0;
4731
4732         BT_DBG("%s", hdev->name);
4733
4734         rcu_read_lock();
4735
4736         list_for_each_entry_rcu(conn, &h->list, list) {
4737                 struct hci_chan *tmp;
4738
4739                 if (conn->type != type)
4740                         continue;
4741
4742                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4743                         continue;
4744
4745                 conn_num++;
4746
4747                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4748                         struct sk_buff *skb;
4749
4750                         if (skb_queue_empty(&tmp->data_q))
4751                                 continue;
4752
4753                         skb = skb_peek(&tmp->data_q);
4754                         if (skb->priority < cur_prio)
4755                                 continue;
4756
4757                         if (skb->priority > cur_prio) {
4758                                 num = 0;
4759                                 min = ~0;
4760                                 cur_prio = skb->priority;
4761                         }
4762
4763                         num++;
4764
4765                         if (conn->sent < min) {
4766                                 min  = conn->sent;
4767                                 chan = tmp;
4768                         }
4769                 }
4770
4771                 if (hci_conn_num(hdev, type) == conn_num)
4772                         break;
4773         }
4774
4775         rcu_read_unlock();
4776
4777         if (!chan)
4778                 return NULL;
4779
4780         switch (chan->conn->type) {
4781         case ACL_LINK:
4782                 cnt = hdev->acl_cnt;
4783                 break;
4784         case AMP_LINK:
4785                 cnt = hdev->block_cnt;
4786                 break;
4787         case SCO_LINK:
4788         case ESCO_LINK:
4789                 cnt = hdev->sco_cnt;
4790                 break;
4791         case LE_LINK:
4792                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4793                 break;
4794         default:
4795                 cnt = 0;
4796                 BT_ERR("Unknown link type");
4797         }
4798
4799         q = cnt / num;
4800         *quote = q ? q : 1;
4801         BT_DBG("chan %p quote %d", chan, *quote);
4802         return chan;
4803 }
4804
4805 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4806 {
4807         struct hci_conn_hash *h = &hdev->conn_hash;
4808         struct hci_conn *conn;
4809         int num = 0;
4810
4811         BT_DBG("%s", hdev->name);
4812
4813         rcu_read_lock();
4814
4815         list_for_each_entry_rcu(conn, &h->list, list) {
4816                 struct hci_chan *chan;
4817
4818                 if (conn->type != type)
4819                         continue;
4820
4821                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4822                         continue;
4823
4824                 num++;
4825
4826                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4827                         struct sk_buff *skb;
4828
4829                         if (chan->sent) {
4830                                 chan->sent = 0;
4831                                 continue;
4832                         }
4833
4834                         if (skb_queue_empty(&chan->data_q))
4835                                 continue;
4836
4837                         skb = skb_peek(&chan->data_q);
4838                         if (skb->priority >= HCI_PRIO_MAX - 1)
4839                                 continue;
4840
4841                         skb->priority = HCI_PRIO_MAX - 1;
4842
4843                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4844                                skb->priority);
4845                 }
4846
4847                 if (hci_conn_num(hdev, type) == num)
4848                         break;
4849         }
4850
4851         rcu_read_unlock();
4852
4853 }
4854
4855 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4856 {
4857         /* Calculate count of blocks used by this packet */
4858         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4859 }
4860
4861 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4862 {
4863         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4864                 /* ACL tx timeout must be longer than maximum
4865                  * link supervision timeout (40.9 seconds) */
4866                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4867                                        HCI_ACL_TX_TIMEOUT))
4868                         hci_link_tx_to(hdev, ACL_LINK);
4869         }
4870 }
4871
4872 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4873 {
4874         unsigned int cnt = hdev->acl_cnt;
4875         struct hci_chan *chan;
4876         struct sk_buff *skb;
4877         int quote;
4878
4879         __check_timeout(hdev, cnt);
4880
4881         while (hdev->acl_cnt &&
4882                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4883                 u32 priority = (skb_peek(&chan->data_q))->priority;
4884                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4885                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4886                                skb->len, skb->priority);
4887
4888                         /* Stop if priority has changed */
4889                         if (skb->priority < priority)
4890                                 break;
4891
4892                         skb = skb_dequeue(&chan->data_q);
4893
4894                         hci_conn_enter_active_mode(chan->conn,
4895                                                    bt_cb(skb)->force_active);
4896
4897                         hci_send_frame(hdev, skb);
4898                         hdev->acl_last_tx = jiffies;
4899
4900                         hdev->acl_cnt--;
4901                         chan->sent++;
4902                         chan->conn->sent++;
4903                 }
4904         }
4905
4906         if (cnt != hdev->acl_cnt)
4907                 hci_prio_recalculate(hdev, ACL_LINK);
4908 }
4909
4910 static void hci_sched_acl_blk(struct hci_dev *hdev)
4911 {
4912         unsigned int cnt = hdev->block_cnt;
4913         struct hci_chan *chan;
4914         struct sk_buff *skb;
4915         int quote;
4916         u8 type;
4917
4918         __check_timeout(hdev, cnt);
4919
4920         BT_DBG("%s", hdev->name);
4921
4922         if (hdev->dev_type == HCI_AMP)
4923                 type = AMP_LINK;
4924         else
4925                 type = ACL_LINK;
4926
4927         while (hdev->block_cnt > 0 &&
4928                (chan = hci_chan_sent(hdev, type, &quote))) {
4929                 u32 priority = (skb_peek(&chan->data_q))->priority;
4930                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4931                         int blocks;
4932
4933                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4934                                skb->len, skb->priority);
4935
4936                         /* Stop if priority has changed */
4937                         if (skb->priority < priority)
4938                                 break;
4939
4940                         skb = skb_dequeue(&chan->data_q);
4941
4942                         blocks = __get_blocks(hdev, skb);
4943                         if (blocks > hdev->block_cnt)
4944                                 return;
4945
4946                         hci_conn_enter_active_mode(chan->conn,
4947                                                    bt_cb(skb)->force_active);
4948
4949                         hci_send_frame(hdev, skb);
4950                         hdev->acl_last_tx = jiffies;
4951
4952                         hdev->block_cnt -= blocks;
4953                         quote -= blocks;
4954
4955                         chan->sent += blocks;
4956                         chan->conn->sent += blocks;
4957                 }
4958         }
4959
4960         if (cnt != hdev->block_cnt)
4961                 hci_prio_recalculate(hdev, type);
4962 }
4963
4964 static void hci_sched_acl(struct hci_dev *hdev)
4965 {
4966         BT_DBG("%s", hdev->name);
4967
4968         /* No ACL link over BR/EDR controller */
4969         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4970                 return;
4971
4972         /* No AMP link over AMP controller */
4973         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4974                 return;
4975
4976         switch (hdev->flow_ctl_mode) {
4977         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4978                 hci_sched_acl_pkt(hdev);
4979                 break;
4980
4981         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4982                 hci_sched_acl_blk(hdev);
4983                 break;
4984         }
4985 }
4986
4987 /* Schedule SCO */
4988 static void hci_sched_sco(struct hci_dev *hdev)
4989 {
4990         struct hci_conn *conn;
4991         struct sk_buff *skb;
4992         int quote;
4993
4994         BT_DBG("%s", hdev->name);
4995
4996         if (!hci_conn_num(hdev, SCO_LINK))
4997                 return;
4998
4999         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5000                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5001                         BT_DBG("skb %p len %d", skb, skb->len);
5002                         hci_send_frame(hdev, skb);
5003
5004                         conn->sent++;
5005                         if (conn->sent == ~0)
5006                                 conn->sent = 0;
5007                 }
5008         }
5009 }
5010
5011 static void hci_sched_esco(struct hci_dev *hdev)
5012 {
5013         struct hci_conn *conn;
5014         struct sk_buff *skb;
5015         int quote;
5016
5017         BT_DBG("%s", hdev->name);
5018
5019         if (!hci_conn_num(hdev, ESCO_LINK))
5020                 return;
5021
5022         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5023                                                      &quote))) {
5024                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5025                         BT_DBG("skb %p len %d", skb, skb->len);
5026                         hci_send_frame(hdev, skb);
5027
5028                         conn->sent++;
5029                         if (conn->sent == ~0)
5030                                 conn->sent = 0;
5031                 }
5032         }
5033 }
5034
5035 static void hci_sched_le(struct hci_dev *hdev)
5036 {
5037         struct hci_chan *chan;
5038         struct sk_buff *skb;
5039         int quote, cnt, tmp;
5040
5041         BT_DBG("%s", hdev->name);
5042
5043         if (!hci_conn_num(hdev, LE_LINK))
5044                 return;
5045
5046         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5047                 /* LE tx timeout must be longer than maximum
5048                  * link supervision timeout (40.9 seconds) */
5049                 if (!hdev->le_cnt && hdev->le_pkts &&
5050                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
5051                         hci_link_tx_to(hdev, LE_LINK);
5052         }
5053
5054         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5055         tmp = cnt;
5056         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
5057                 u32 priority = (skb_peek(&chan->data_q))->priority;
5058                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5059                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5060                                skb->len, skb->priority);
5061
5062                         /* Stop if priority has changed */
5063                         if (skb->priority < priority)
5064                                 break;
5065
5066                         skb = skb_dequeue(&chan->data_q);
5067
5068                         hci_send_frame(hdev, skb);
5069                         hdev->le_last_tx = jiffies;
5070
5071                         cnt--;
5072                         chan->sent++;
5073                         chan->conn->sent++;
5074                 }
5075         }
5076
5077         if (hdev->le_pkts)
5078                 hdev->le_cnt = cnt;
5079         else
5080                 hdev->acl_cnt = cnt;
5081
5082         if (cnt != tmp)
5083                 hci_prio_recalculate(hdev, LE_LINK);
5084 }
5085
5086 static void hci_tx_work(struct work_struct *work)
5087 {
5088         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5089         struct sk_buff *skb;
5090
5091         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5092                hdev->sco_cnt, hdev->le_cnt);
5093
5094         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5095                 /* Schedule queues and send stuff to HCI driver */
5096                 hci_sched_acl(hdev);
5097                 hci_sched_sco(hdev);
5098                 hci_sched_esco(hdev);
5099                 hci_sched_le(hdev);
5100         }
5101
5102         /* Send next queued raw (unknown type) packet */
5103         while ((skb = skb_dequeue(&hdev->raw_q)))
5104                 hci_send_frame(hdev, skb);
5105 }
5106
5107 /* ----- HCI RX task (incoming data processing) ----- */
5108
5109 /* ACL data packet */
5110 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5111 {
5112         struct hci_acl_hdr *hdr = (void *) skb->data;
5113         struct hci_conn *conn;
5114         __u16 handle, flags;
5115
5116         skb_pull(skb, HCI_ACL_HDR_SIZE);
5117
5118         handle = __le16_to_cpu(hdr->handle);
5119         flags  = hci_flags(handle);
5120         handle = hci_handle(handle);
5121
5122         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5123                handle, flags);
5124
5125         hdev->stat.acl_rx++;
5126
5127         hci_dev_lock(hdev);
5128         conn = hci_conn_hash_lookup_handle(hdev, handle);
5129         hci_dev_unlock(hdev);
5130
5131         if (conn) {
5132                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5133
5134                 /* Send to upper protocol */
5135                 l2cap_recv_acldata(conn, skb, flags);
5136                 return;
5137         } else {
5138                 BT_ERR("%s ACL packet for unknown connection handle %d",
5139                        hdev->name, handle);
5140         }
5141
5142         kfree_skb(skb);
5143 }
5144
5145 /* SCO data packet */
5146 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5147 {
5148         struct hci_sco_hdr *hdr = (void *) skb->data;
5149         struct hci_conn *conn;
5150         __u16 handle;
5151
5152         skb_pull(skb, HCI_SCO_HDR_SIZE);
5153
5154         handle = __le16_to_cpu(hdr->handle);
5155
5156         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5157
5158         hdev->stat.sco_rx++;
5159
5160         hci_dev_lock(hdev);
5161         conn = hci_conn_hash_lookup_handle(hdev, handle);
5162         hci_dev_unlock(hdev);
5163
5164         if (conn) {
5165                 /* Send to upper protocol */
5166                 sco_recv_scodata(conn, skb);
5167                 return;
5168         } else {
5169                 BT_ERR("%s SCO packet for unknown connection handle %d",
5170                        hdev->name, handle);
5171         }
5172
5173         kfree_skb(skb);
5174 }
5175
5176 static bool hci_req_is_complete(struct hci_dev *hdev)
5177 {
5178         struct sk_buff *skb;
5179
5180         skb = skb_peek(&hdev->cmd_q);
5181         if (!skb)
5182                 return true;
5183
5184         return bt_cb(skb)->req.start;
5185 }
5186
5187 static void hci_resend_last(struct hci_dev *hdev)
5188 {
5189         struct hci_command_hdr *sent;
5190         struct sk_buff *skb;
5191         u16 opcode;
5192
5193         if (!hdev->sent_cmd)
5194                 return;
5195
5196         sent = (void *) hdev->sent_cmd->data;
5197         opcode = __le16_to_cpu(sent->opcode);
5198         if (opcode == HCI_OP_RESET)
5199                 return;
5200
5201         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5202         if (!skb)
5203                 return;
5204
5205         skb_queue_head(&hdev->cmd_q, skb);
5206         queue_work(hdev->workqueue, &hdev->cmd_work);
5207 }
5208
5209 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5210 {
5211         hci_req_complete_t req_complete = NULL;
5212         struct sk_buff *skb;
5213         unsigned long flags;
5214
5215         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5216
5217         /* If the completed command doesn't match the last one that was
5218          * sent we need to do special handling of it.
5219          */
5220         if (!hci_sent_cmd_data(hdev, opcode)) {
5221                 /* Some CSR based controllers generate a spontaneous
5222                  * reset complete event during init and any pending
5223                  * command will never be completed. In such a case we
5224                  * need to resend whatever was the last sent
5225                  * command.
5226                  */
5227                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5228                         hci_resend_last(hdev);
5229
5230                 return;
5231         }
5232
5233         /* If the command succeeded and there's still more commands in
5234          * this request the request is not yet complete.
5235          */
5236         if (!status && !hci_req_is_complete(hdev))
5237                 return;
5238
5239         /* If this was the last command in a request the complete
5240          * callback would be found in hdev->sent_cmd instead of the
5241          * command queue (hdev->cmd_q).
5242          */
5243         if (hdev->sent_cmd) {
5244                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5245
5246                 if (req_complete) {
5247                         /* We must set the complete callback to NULL to
5248                          * avoid calling the callback more than once if
5249                          * this function gets called again.
5250                          */
5251                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
5252
5253                         goto call_complete;
5254                 }
5255         }
5256
5257         /* Remove all pending commands belonging to this request */
5258         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5259         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5260                 if (bt_cb(skb)->req.start) {
5261                         __skb_queue_head(&hdev->cmd_q, skb);
5262                         break;
5263                 }
5264
5265                 req_complete = bt_cb(skb)->req.complete;
5266                 kfree_skb(skb);
5267         }
5268         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5269
5270 call_complete:
5271         if (req_complete)
5272                 req_complete(hdev, status);
5273 }
5274
5275 static void hci_rx_work(struct work_struct *work)
5276 {
5277         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5278         struct sk_buff *skb;
5279
5280         BT_DBG("%s", hdev->name);
5281
5282         while ((skb = skb_dequeue(&hdev->rx_q))) {
5283                 /* Send copy to monitor */
5284                 hci_send_to_monitor(hdev, skb);
5285
5286                 if (atomic_read(&hdev->promisc)) {
5287                         /* Send copy to the sockets */
5288                         hci_send_to_sock(hdev, skb);
5289                 }
5290
5291                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5292                         kfree_skb(skb);
5293                         continue;
5294                 }
5295
5296                 if (test_bit(HCI_INIT, &hdev->flags)) {
5297                         /* Don't process data packets in this states. */
5298                         switch (bt_cb(skb)->pkt_type) {
5299                         case HCI_ACLDATA_PKT:
5300                         case HCI_SCODATA_PKT:
5301                                 kfree_skb(skb);
5302                                 continue;
5303                         }
5304                 }
5305
5306                 /* Process frame */
5307                 switch (bt_cb(skb)->pkt_type) {
5308                 case HCI_EVENT_PKT:
5309                         BT_DBG("%s Event packet", hdev->name);
5310                         hci_event_packet(hdev, skb);
5311                         break;
5312
5313                 case HCI_ACLDATA_PKT:
5314                         BT_DBG("%s ACL data packet", hdev->name);
5315                         hci_acldata_packet(hdev, skb);
5316                         break;
5317
5318                 case HCI_SCODATA_PKT:
5319                         BT_DBG("%s SCO data packet", hdev->name);
5320                         hci_scodata_packet(hdev, skb);
5321                         break;
5322
5323                 default:
5324                         kfree_skb(skb);
5325                         break;
5326                 }
5327         }
5328 }
5329
5330 static void hci_cmd_work(struct work_struct *work)
5331 {
5332         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5333         struct sk_buff *skb;
5334
5335         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5336                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5337
5338         /* Send queued commands */
5339         if (atomic_read(&hdev->cmd_cnt)) {
5340                 skb = skb_dequeue(&hdev->cmd_q);
5341                 if (!skb)
5342                         return;
5343
5344                 kfree_skb(hdev->sent_cmd);
5345
5346                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5347                 if (hdev->sent_cmd) {
5348                         atomic_dec(&hdev->cmd_cnt);
5349                         hci_send_frame(hdev, skb);
5350                         if (test_bit(HCI_RESET, &hdev->flags))
5351                                 cancel_delayed_work(&hdev->cmd_timer);
5352                         else
5353                                 schedule_delayed_work(&hdev->cmd_timer,
5354                                                       HCI_CMD_TIMEOUT);
5355                 } else {
5356                         skb_queue_head(&hdev->cmd_q, skb);
5357                         queue_work(hdev->workqueue, &hdev->cmd_work);
5358                 }
5359         }
5360 }
5361
5362 void hci_req_add_le_scan_disable(struct hci_request *req)
5363 {
5364         struct hci_cp_le_set_scan_enable cp;
5365
5366         memset(&cp, 0, sizeof(cp));
5367         cp.enable = LE_SCAN_DISABLE;
5368         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5369 }
5370
5371 void hci_req_add_le_passive_scan(struct hci_request *req)
5372 {
5373         struct hci_cp_le_set_scan_param param_cp;
5374         struct hci_cp_le_set_scan_enable enable_cp;
5375         struct hci_dev *hdev = req->hdev;
5376         u8 own_addr_type;
5377
5378         /* Set require_privacy to false since no SCAN_REQ are send
5379          * during passive scanning. Not using an unresolvable address
5380          * here is important so that peer devices using direct
5381          * advertising with our address will be correctly reported
5382          * by the controller.
5383          */
5384         if (hci_update_random_address(req, false, &own_addr_type))
5385                 return;
5386
5387         memset(&param_cp, 0, sizeof(param_cp));
5388         param_cp.type = LE_SCAN_PASSIVE;
5389         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5390         param_cp.window = cpu_to_le16(hdev->le_scan_window);
5391         param_cp.own_address_type = own_addr_type;
5392         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5393                     &param_cp);
5394
5395         memset(&enable_cp, 0, sizeof(enable_cp));
5396         enable_cp.enable = LE_SCAN_ENABLE;
5397         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5398         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5399                     &enable_cp);
5400 }
5401
5402 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5403 {
5404         if (status)
5405                 BT_DBG("HCI request failed to update background scanning: "
5406                        "status 0x%2.2x", status);
5407 }
5408
5409 /* This function controls the background scanning based on hdev->pend_le_conns
5410  * list. If there are pending LE connection we start the background scanning,
5411  * otherwise we stop it.
5412  *
5413  * This function requires the caller holds hdev->lock.
5414  */
5415 void hci_update_background_scan(struct hci_dev *hdev)
5416 {
5417         struct hci_request req;
5418         struct hci_conn *conn;
5419         int err;
5420
5421         if (!test_bit(HCI_UP, &hdev->flags) ||
5422             test_bit(HCI_INIT, &hdev->flags) ||
5423             test_bit(HCI_SETUP, &hdev->dev_flags) ||
5424             test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5425             test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5426             test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5427                 return;
5428
5429         /* No point in doing scanning if LE support hasn't been enabled */
5430         if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5431                 return;
5432
5433         /* If discovery is active don't interfere with it */
5434         if (hdev->discovery.state != DISCOVERY_STOPPED)
5435                 return;
5436
5437         hci_req_init(&req, hdev);
5438
5439         if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
5440             list_empty(&hdev->pend_le_conns) &&
5441             list_empty(&hdev->pend_le_reports)) {
5442                 /* If there is no pending LE connections or devices
5443                  * to be scanned for, we should stop the background
5444                  * scanning.
5445                  */
5446
5447                 /* If controller is not scanning we are done. */
5448                 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5449                         return;
5450
5451                 hci_req_add_le_scan_disable(&req);
5452
5453                 BT_DBG("%s stopping background scanning", hdev->name);
5454         } else {
5455                 /* If there is at least one pending LE connection, we should
5456                  * keep the background scan running.
5457                  */
5458
5459                 /* If controller is connecting, we should not start scanning
5460                  * since some controllers are not able to scan and connect at
5461                  * the same time.
5462                  */
5463                 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5464                 if (conn)
5465                         return;
5466
5467                 /* If controller is currently scanning, we stop it to ensure we
5468                  * don't miss any advertising (due to duplicates filter).
5469                  */
5470                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5471                         hci_req_add_le_scan_disable(&req);
5472
5473                 hci_req_add_le_passive_scan(&req);
5474
5475                 BT_DBG("%s starting background scanning", hdev->name);
5476         }
5477
5478         err = hci_req_run(&req, update_background_scan_complete);
5479         if (err)
5480                 BT_ERR("Failed to run HCI request: err %d", err);
5481 }