Bluetooth: Unify remote OOB data functions
[cascardo/linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "smp.h"
41
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
45
46 /* HCI device list */
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
49
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
53
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
56
57 /* ----- HCI requests ----- */
58
59 #define HCI_REQ_DONE      0
60 #define HCI_REQ_PEND      1
61 #define HCI_REQ_CANCELED  2
62
63 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
64 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
65
66 /* ---- HCI notifications ---- */
67
68 static void hci_notify(struct hci_dev *hdev, int event)
69 {
70         hci_sock_dev_event(hdev, event);
71 }
72
73 /* ---- HCI debugfs entries ---- */
74
75 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76                              size_t count, loff_t *ppos)
77 {
78         struct hci_dev *hdev = file->private_data;
79         char buf[3];
80
81         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
82         buf[1] = '\n';
83         buf[2] = '\0';
84         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85 }
86
87 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88                               size_t count, loff_t *ppos)
89 {
90         struct hci_dev *hdev = file->private_data;
91         struct sk_buff *skb;
92         char buf[32];
93         size_t buf_size = min(count, (sizeof(buf)-1));
94         bool enable;
95         int err;
96
97         if (!test_bit(HCI_UP, &hdev->flags))
98                 return -ENETDOWN;
99
100         if (copy_from_user(buf, user_buf, buf_size))
101                 return -EFAULT;
102
103         buf[buf_size] = '\0';
104         if (strtobool(buf, &enable))
105                 return -EINVAL;
106
107         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
108                 return -EALREADY;
109
110         hci_req_lock(hdev);
111         if (enable)
112                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113                                      HCI_CMD_TIMEOUT);
114         else
115                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116                                      HCI_CMD_TIMEOUT);
117         hci_req_unlock(hdev);
118
119         if (IS_ERR(skb))
120                 return PTR_ERR(skb);
121
122         err = -bt_to_errno(skb->data[0]);
123         kfree_skb(skb);
124
125         if (err < 0)
126                 return err;
127
128         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
129
130         return count;
131 }
132
133 static const struct file_operations dut_mode_fops = {
134         .open           = simple_open,
135         .read           = dut_mode_read,
136         .write          = dut_mode_write,
137         .llseek         = default_llseek,
138 };
139
140 static int features_show(struct seq_file *f, void *ptr)
141 {
142         struct hci_dev *hdev = f->private;
143         u8 p;
144
145         hci_dev_lock(hdev);
146         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
147                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
148                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149                            hdev->features[p][0], hdev->features[p][1],
150                            hdev->features[p][2], hdev->features[p][3],
151                            hdev->features[p][4], hdev->features[p][5],
152                            hdev->features[p][6], hdev->features[p][7]);
153         }
154         if (lmp_le_capable(hdev))
155                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157                            hdev->le_features[0], hdev->le_features[1],
158                            hdev->le_features[2], hdev->le_features[3],
159                            hdev->le_features[4], hdev->le_features[5],
160                            hdev->le_features[6], hdev->le_features[7]);
161         hci_dev_unlock(hdev);
162
163         return 0;
164 }
165
166 static int features_open(struct inode *inode, struct file *file)
167 {
168         return single_open(file, features_show, inode->i_private);
169 }
170
171 static const struct file_operations features_fops = {
172         .open           = features_open,
173         .read           = seq_read,
174         .llseek         = seq_lseek,
175         .release        = single_release,
176 };
177
178 static int blacklist_show(struct seq_file *f, void *p)
179 {
180         struct hci_dev *hdev = f->private;
181         struct bdaddr_list *b;
182
183         hci_dev_lock(hdev);
184         list_for_each_entry(b, &hdev->blacklist, list)
185                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
186         hci_dev_unlock(hdev);
187
188         return 0;
189 }
190
191 static int blacklist_open(struct inode *inode, struct file *file)
192 {
193         return single_open(file, blacklist_show, inode->i_private);
194 }
195
196 static const struct file_operations blacklist_fops = {
197         .open           = blacklist_open,
198         .read           = seq_read,
199         .llseek         = seq_lseek,
200         .release        = single_release,
201 };
202
203 static int uuids_show(struct seq_file *f, void *p)
204 {
205         struct hci_dev *hdev = f->private;
206         struct bt_uuid *uuid;
207
208         hci_dev_lock(hdev);
209         list_for_each_entry(uuid, &hdev->uuids, list) {
210                 u8 i, val[16];
211
212                 /* The Bluetooth UUID values are stored in big endian,
213                  * but with reversed byte order. So convert them into
214                  * the right order for the %pUb modifier.
215                  */
216                 for (i = 0; i < 16; i++)
217                         val[i] = uuid->uuid[15 - i];
218
219                 seq_printf(f, "%pUb\n", val);
220         }
221         hci_dev_unlock(hdev);
222
223         return 0;
224 }
225
226 static int uuids_open(struct inode *inode, struct file *file)
227 {
228         return single_open(file, uuids_show, inode->i_private);
229 }
230
231 static const struct file_operations uuids_fops = {
232         .open           = uuids_open,
233         .read           = seq_read,
234         .llseek         = seq_lseek,
235         .release        = single_release,
236 };
237
238 static int inquiry_cache_show(struct seq_file *f, void *p)
239 {
240         struct hci_dev *hdev = f->private;
241         struct discovery_state *cache = &hdev->discovery;
242         struct inquiry_entry *e;
243
244         hci_dev_lock(hdev);
245
246         list_for_each_entry(e, &cache->all, all) {
247                 struct inquiry_data *data = &e->data;
248                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
249                            &data->bdaddr,
250                            data->pscan_rep_mode, data->pscan_period_mode,
251                            data->pscan_mode, data->dev_class[2],
252                            data->dev_class[1], data->dev_class[0],
253                            __le16_to_cpu(data->clock_offset),
254                            data->rssi, data->ssp_mode, e->timestamp);
255         }
256
257         hci_dev_unlock(hdev);
258
259         return 0;
260 }
261
262 static int inquiry_cache_open(struct inode *inode, struct file *file)
263 {
264         return single_open(file, inquiry_cache_show, inode->i_private);
265 }
266
267 static const struct file_operations inquiry_cache_fops = {
268         .open           = inquiry_cache_open,
269         .read           = seq_read,
270         .llseek         = seq_lseek,
271         .release        = single_release,
272 };
273
274 static int link_keys_show(struct seq_file *f, void *ptr)
275 {
276         struct hci_dev *hdev = f->private;
277         struct link_key *key;
278
279         rcu_read_lock();
280         list_for_each_entry_rcu(key, &hdev->link_keys, list)
281                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
282                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
283         rcu_read_unlock();
284
285         return 0;
286 }
287
288 static int link_keys_open(struct inode *inode, struct file *file)
289 {
290         return single_open(file, link_keys_show, inode->i_private);
291 }
292
293 static const struct file_operations link_keys_fops = {
294         .open           = link_keys_open,
295         .read           = seq_read,
296         .llseek         = seq_lseek,
297         .release        = single_release,
298 };
299
300 static int dev_class_show(struct seq_file *f, void *ptr)
301 {
302         struct hci_dev *hdev = f->private;
303
304         hci_dev_lock(hdev);
305         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
306                    hdev->dev_class[1], hdev->dev_class[0]);
307         hci_dev_unlock(hdev);
308
309         return 0;
310 }
311
312 static int dev_class_open(struct inode *inode, struct file *file)
313 {
314         return single_open(file, dev_class_show, inode->i_private);
315 }
316
317 static const struct file_operations dev_class_fops = {
318         .open           = dev_class_open,
319         .read           = seq_read,
320         .llseek         = seq_lseek,
321         .release        = single_release,
322 };
323
324 static int voice_setting_get(void *data, u64 *val)
325 {
326         struct hci_dev *hdev = data;
327
328         hci_dev_lock(hdev);
329         *val = hdev->voice_setting;
330         hci_dev_unlock(hdev);
331
332         return 0;
333 }
334
335 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
336                         NULL, "0x%4.4llx\n");
337
338 static int auto_accept_delay_set(void *data, u64 val)
339 {
340         struct hci_dev *hdev = data;
341
342         hci_dev_lock(hdev);
343         hdev->auto_accept_delay = val;
344         hci_dev_unlock(hdev);
345
346         return 0;
347 }
348
349 static int auto_accept_delay_get(void *data, u64 *val)
350 {
351         struct hci_dev *hdev = data;
352
353         hci_dev_lock(hdev);
354         *val = hdev->auto_accept_delay;
355         hci_dev_unlock(hdev);
356
357         return 0;
358 }
359
360 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
361                         auto_accept_delay_set, "%llu\n");
362
363 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
364                                      size_t count, loff_t *ppos)
365 {
366         struct hci_dev *hdev = file->private_data;
367         char buf[3];
368
369         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
370         buf[1] = '\n';
371         buf[2] = '\0';
372         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
373 }
374
375 static ssize_t force_sc_support_write(struct file *file,
376                                       const char __user *user_buf,
377                                       size_t count, loff_t *ppos)
378 {
379         struct hci_dev *hdev = file->private_data;
380         char buf[32];
381         size_t buf_size = min(count, (sizeof(buf)-1));
382         bool enable;
383
384         if (test_bit(HCI_UP, &hdev->flags))
385                 return -EBUSY;
386
387         if (copy_from_user(buf, user_buf, buf_size))
388                 return -EFAULT;
389
390         buf[buf_size] = '\0';
391         if (strtobool(buf, &enable))
392                 return -EINVAL;
393
394         if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
395                 return -EALREADY;
396
397         change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
398
399         return count;
400 }
401
402 static const struct file_operations force_sc_support_fops = {
403         .open           = simple_open,
404         .read           = force_sc_support_read,
405         .write          = force_sc_support_write,
406         .llseek         = default_llseek,
407 };
408
409 static ssize_t force_lesc_support_read(struct file *file, char __user *user_buf,
410                                        size_t count, loff_t *ppos)
411 {
412         struct hci_dev *hdev = file->private_data;
413         char buf[3];
414
415         buf[0] = test_bit(HCI_FORCE_LESC, &hdev->dbg_flags) ? 'Y': 'N';
416         buf[1] = '\n';
417         buf[2] = '\0';
418         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
419 }
420
421 static ssize_t force_lesc_support_write(struct file *file,
422                                         const char __user *user_buf,
423                                         size_t count, loff_t *ppos)
424 {
425         struct hci_dev *hdev = file->private_data;
426         char buf[32];
427         size_t buf_size = min(count, (sizeof(buf)-1));
428         bool enable;
429
430         if (copy_from_user(buf, user_buf, buf_size))
431                 return -EFAULT;
432
433         buf[buf_size] = '\0';
434         if (strtobool(buf, &enable))
435                 return -EINVAL;
436
437         if (enable == test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
438                 return -EALREADY;
439
440         change_bit(HCI_FORCE_LESC, &hdev->dbg_flags);
441
442         return count;
443 }
444
445 static const struct file_operations force_lesc_support_fops = {
446         .open           = simple_open,
447         .read           = force_lesc_support_read,
448         .write          = force_lesc_support_write,
449         .llseek         = default_llseek,
450 };
451
452 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
453                                  size_t count, loff_t *ppos)
454 {
455         struct hci_dev *hdev = file->private_data;
456         char buf[3];
457
458         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
459         buf[1] = '\n';
460         buf[2] = '\0';
461         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
462 }
463
464 static const struct file_operations sc_only_mode_fops = {
465         .open           = simple_open,
466         .read           = sc_only_mode_read,
467         .llseek         = default_llseek,
468 };
469
470 static int idle_timeout_set(void *data, u64 val)
471 {
472         struct hci_dev *hdev = data;
473
474         if (val != 0 && (val < 500 || val > 3600000))
475                 return -EINVAL;
476
477         hci_dev_lock(hdev);
478         hdev->idle_timeout = val;
479         hci_dev_unlock(hdev);
480
481         return 0;
482 }
483
484 static int idle_timeout_get(void *data, u64 *val)
485 {
486         struct hci_dev *hdev = data;
487
488         hci_dev_lock(hdev);
489         *val = hdev->idle_timeout;
490         hci_dev_unlock(hdev);
491
492         return 0;
493 }
494
495 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
496                         idle_timeout_set, "%llu\n");
497
498 static int rpa_timeout_set(void *data, u64 val)
499 {
500         struct hci_dev *hdev = data;
501
502         /* Require the RPA timeout to be at least 30 seconds and at most
503          * 24 hours.
504          */
505         if (val < 30 || val > (60 * 60 * 24))
506                 return -EINVAL;
507
508         hci_dev_lock(hdev);
509         hdev->rpa_timeout = val;
510         hci_dev_unlock(hdev);
511
512         return 0;
513 }
514
515 static int rpa_timeout_get(void *data, u64 *val)
516 {
517         struct hci_dev *hdev = data;
518
519         hci_dev_lock(hdev);
520         *val = hdev->rpa_timeout;
521         hci_dev_unlock(hdev);
522
523         return 0;
524 }
525
526 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
527                         rpa_timeout_set, "%llu\n");
528
529 static int sniff_min_interval_set(void *data, u64 val)
530 {
531         struct hci_dev *hdev = data;
532
533         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
534                 return -EINVAL;
535
536         hci_dev_lock(hdev);
537         hdev->sniff_min_interval = val;
538         hci_dev_unlock(hdev);
539
540         return 0;
541 }
542
543 static int sniff_min_interval_get(void *data, u64 *val)
544 {
545         struct hci_dev *hdev = data;
546
547         hci_dev_lock(hdev);
548         *val = hdev->sniff_min_interval;
549         hci_dev_unlock(hdev);
550
551         return 0;
552 }
553
554 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
555                         sniff_min_interval_set, "%llu\n");
556
557 static int sniff_max_interval_set(void *data, u64 val)
558 {
559         struct hci_dev *hdev = data;
560
561         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
562                 return -EINVAL;
563
564         hci_dev_lock(hdev);
565         hdev->sniff_max_interval = val;
566         hci_dev_unlock(hdev);
567
568         return 0;
569 }
570
571 static int sniff_max_interval_get(void *data, u64 *val)
572 {
573         struct hci_dev *hdev = data;
574
575         hci_dev_lock(hdev);
576         *val = hdev->sniff_max_interval;
577         hci_dev_unlock(hdev);
578
579         return 0;
580 }
581
582 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
583                         sniff_max_interval_set, "%llu\n");
584
585 static int conn_info_min_age_set(void *data, u64 val)
586 {
587         struct hci_dev *hdev = data;
588
589         if (val == 0 || val > hdev->conn_info_max_age)
590                 return -EINVAL;
591
592         hci_dev_lock(hdev);
593         hdev->conn_info_min_age = val;
594         hci_dev_unlock(hdev);
595
596         return 0;
597 }
598
599 static int conn_info_min_age_get(void *data, u64 *val)
600 {
601         struct hci_dev *hdev = data;
602
603         hci_dev_lock(hdev);
604         *val = hdev->conn_info_min_age;
605         hci_dev_unlock(hdev);
606
607         return 0;
608 }
609
610 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
611                         conn_info_min_age_set, "%llu\n");
612
613 static int conn_info_max_age_set(void *data, u64 val)
614 {
615         struct hci_dev *hdev = data;
616
617         if (val == 0 || val < hdev->conn_info_min_age)
618                 return -EINVAL;
619
620         hci_dev_lock(hdev);
621         hdev->conn_info_max_age = val;
622         hci_dev_unlock(hdev);
623
624         return 0;
625 }
626
627 static int conn_info_max_age_get(void *data, u64 *val)
628 {
629         struct hci_dev *hdev = data;
630
631         hci_dev_lock(hdev);
632         *val = hdev->conn_info_max_age;
633         hci_dev_unlock(hdev);
634
635         return 0;
636 }
637
638 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
639                         conn_info_max_age_set, "%llu\n");
640
641 static int identity_show(struct seq_file *f, void *p)
642 {
643         struct hci_dev *hdev = f->private;
644         bdaddr_t addr;
645         u8 addr_type;
646
647         hci_dev_lock(hdev);
648
649         hci_copy_identity_address(hdev, &addr, &addr_type);
650
651         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
652                    16, hdev->irk, &hdev->rpa);
653
654         hci_dev_unlock(hdev);
655
656         return 0;
657 }
658
659 static int identity_open(struct inode *inode, struct file *file)
660 {
661         return single_open(file, identity_show, inode->i_private);
662 }
663
664 static const struct file_operations identity_fops = {
665         .open           = identity_open,
666         .read           = seq_read,
667         .llseek         = seq_lseek,
668         .release        = single_release,
669 };
670
671 static int random_address_show(struct seq_file *f, void *p)
672 {
673         struct hci_dev *hdev = f->private;
674
675         hci_dev_lock(hdev);
676         seq_printf(f, "%pMR\n", &hdev->random_addr);
677         hci_dev_unlock(hdev);
678
679         return 0;
680 }
681
682 static int random_address_open(struct inode *inode, struct file *file)
683 {
684         return single_open(file, random_address_show, inode->i_private);
685 }
686
687 static const struct file_operations random_address_fops = {
688         .open           = random_address_open,
689         .read           = seq_read,
690         .llseek         = seq_lseek,
691         .release        = single_release,
692 };
693
694 static int static_address_show(struct seq_file *f, void *p)
695 {
696         struct hci_dev *hdev = f->private;
697
698         hci_dev_lock(hdev);
699         seq_printf(f, "%pMR\n", &hdev->static_addr);
700         hci_dev_unlock(hdev);
701
702         return 0;
703 }
704
705 static int static_address_open(struct inode *inode, struct file *file)
706 {
707         return single_open(file, static_address_show, inode->i_private);
708 }
709
710 static const struct file_operations static_address_fops = {
711         .open           = static_address_open,
712         .read           = seq_read,
713         .llseek         = seq_lseek,
714         .release        = single_release,
715 };
716
717 static ssize_t force_static_address_read(struct file *file,
718                                          char __user *user_buf,
719                                          size_t count, loff_t *ppos)
720 {
721         struct hci_dev *hdev = file->private_data;
722         char buf[3];
723
724         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
725         buf[1] = '\n';
726         buf[2] = '\0';
727         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
728 }
729
730 static ssize_t force_static_address_write(struct file *file,
731                                           const char __user *user_buf,
732                                           size_t count, loff_t *ppos)
733 {
734         struct hci_dev *hdev = file->private_data;
735         char buf[32];
736         size_t buf_size = min(count, (sizeof(buf)-1));
737         bool enable;
738
739         if (test_bit(HCI_UP, &hdev->flags))
740                 return -EBUSY;
741
742         if (copy_from_user(buf, user_buf, buf_size))
743                 return -EFAULT;
744
745         buf[buf_size] = '\0';
746         if (strtobool(buf, &enable))
747                 return -EINVAL;
748
749         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
750                 return -EALREADY;
751
752         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
753
754         return count;
755 }
756
757 static const struct file_operations force_static_address_fops = {
758         .open           = simple_open,
759         .read           = force_static_address_read,
760         .write          = force_static_address_write,
761         .llseek         = default_llseek,
762 };
763
764 static int white_list_show(struct seq_file *f, void *ptr)
765 {
766         struct hci_dev *hdev = f->private;
767         struct bdaddr_list *b;
768
769         hci_dev_lock(hdev);
770         list_for_each_entry(b, &hdev->le_white_list, list)
771                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
772         hci_dev_unlock(hdev);
773
774         return 0;
775 }
776
777 static int white_list_open(struct inode *inode, struct file *file)
778 {
779         return single_open(file, white_list_show, inode->i_private);
780 }
781
782 static const struct file_operations white_list_fops = {
783         .open           = white_list_open,
784         .read           = seq_read,
785         .llseek         = seq_lseek,
786         .release        = single_release,
787 };
788
789 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
790 {
791         struct hci_dev *hdev = f->private;
792         struct smp_irk *irk;
793
794         rcu_read_lock();
795         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
796                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
797                            &irk->bdaddr, irk->addr_type,
798                            16, irk->val, &irk->rpa);
799         }
800         rcu_read_unlock();
801
802         return 0;
803 }
804
805 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
806 {
807         return single_open(file, identity_resolving_keys_show,
808                            inode->i_private);
809 }
810
811 static const struct file_operations identity_resolving_keys_fops = {
812         .open           = identity_resolving_keys_open,
813         .read           = seq_read,
814         .llseek         = seq_lseek,
815         .release        = single_release,
816 };
817
818 static int long_term_keys_show(struct seq_file *f, void *ptr)
819 {
820         struct hci_dev *hdev = f->private;
821         struct smp_ltk *ltk;
822
823         rcu_read_lock();
824         list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
825                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
826                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
827                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
828                            __le64_to_cpu(ltk->rand), 16, ltk->val);
829         rcu_read_unlock();
830
831         return 0;
832 }
833
834 static int long_term_keys_open(struct inode *inode, struct file *file)
835 {
836         return single_open(file, long_term_keys_show, inode->i_private);
837 }
838
839 static const struct file_operations long_term_keys_fops = {
840         .open           = long_term_keys_open,
841         .read           = seq_read,
842         .llseek         = seq_lseek,
843         .release        = single_release,
844 };
845
846 static int conn_min_interval_set(void *data, u64 val)
847 {
848         struct hci_dev *hdev = data;
849
850         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
851                 return -EINVAL;
852
853         hci_dev_lock(hdev);
854         hdev->le_conn_min_interval = val;
855         hci_dev_unlock(hdev);
856
857         return 0;
858 }
859
860 static int conn_min_interval_get(void *data, u64 *val)
861 {
862         struct hci_dev *hdev = data;
863
864         hci_dev_lock(hdev);
865         *val = hdev->le_conn_min_interval;
866         hci_dev_unlock(hdev);
867
868         return 0;
869 }
870
871 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
872                         conn_min_interval_set, "%llu\n");
873
874 static int conn_max_interval_set(void *data, u64 val)
875 {
876         struct hci_dev *hdev = data;
877
878         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
879                 return -EINVAL;
880
881         hci_dev_lock(hdev);
882         hdev->le_conn_max_interval = val;
883         hci_dev_unlock(hdev);
884
885         return 0;
886 }
887
888 static int conn_max_interval_get(void *data, u64 *val)
889 {
890         struct hci_dev *hdev = data;
891
892         hci_dev_lock(hdev);
893         *val = hdev->le_conn_max_interval;
894         hci_dev_unlock(hdev);
895
896         return 0;
897 }
898
899 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
900                         conn_max_interval_set, "%llu\n");
901
902 static int conn_latency_set(void *data, u64 val)
903 {
904         struct hci_dev *hdev = data;
905
906         if (val > 0x01f3)
907                 return -EINVAL;
908
909         hci_dev_lock(hdev);
910         hdev->le_conn_latency = val;
911         hci_dev_unlock(hdev);
912
913         return 0;
914 }
915
916 static int conn_latency_get(void *data, u64 *val)
917 {
918         struct hci_dev *hdev = data;
919
920         hci_dev_lock(hdev);
921         *val = hdev->le_conn_latency;
922         hci_dev_unlock(hdev);
923
924         return 0;
925 }
926
927 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
928                         conn_latency_set, "%llu\n");
929
930 static int supervision_timeout_set(void *data, u64 val)
931 {
932         struct hci_dev *hdev = data;
933
934         if (val < 0x000a || val > 0x0c80)
935                 return -EINVAL;
936
937         hci_dev_lock(hdev);
938         hdev->le_supv_timeout = val;
939         hci_dev_unlock(hdev);
940
941         return 0;
942 }
943
944 static int supervision_timeout_get(void *data, u64 *val)
945 {
946         struct hci_dev *hdev = data;
947
948         hci_dev_lock(hdev);
949         *val = hdev->le_supv_timeout;
950         hci_dev_unlock(hdev);
951
952         return 0;
953 }
954
955 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
956                         supervision_timeout_set, "%llu\n");
957
958 static int adv_channel_map_set(void *data, u64 val)
959 {
960         struct hci_dev *hdev = data;
961
962         if (val < 0x01 || val > 0x07)
963                 return -EINVAL;
964
965         hci_dev_lock(hdev);
966         hdev->le_adv_channel_map = val;
967         hci_dev_unlock(hdev);
968
969         return 0;
970 }
971
972 static int adv_channel_map_get(void *data, u64 *val)
973 {
974         struct hci_dev *hdev = data;
975
976         hci_dev_lock(hdev);
977         *val = hdev->le_adv_channel_map;
978         hci_dev_unlock(hdev);
979
980         return 0;
981 }
982
983 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
984                         adv_channel_map_set, "%llu\n");
985
986 static int adv_min_interval_set(void *data, u64 val)
987 {
988         struct hci_dev *hdev = data;
989
990         if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
991                 return -EINVAL;
992
993         hci_dev_lock(hdev);
994         hdev->le_adv_min_interval = val;
995         hci_dev_unlock(hdev);
996
997         return 0;
998 }
999
1000 static int adv_min_interval_get(void *data, u64 *val)
1001 {
1002         struct hci_dev *hdev = data;
1003
1004         hci_dev_lock(hdev);
1005         *val = hdev->le_adv_min_interval;
1006         hci_dev_unlock(hdev);
1007
1008         return 0;
1009 }
1010
1011 DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
1012                         adv_min_interval_set, "%llu\n");
1013
1014 static int adv_max_interval_set(void *data, u64 val)
1015 {
1016         struct hci_dev *hdev = data;
1017
1018         if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
1019                 return -EINVAL;
1020
1021         hci_dev_lock(hdev);
1022         hdev->le_adv_max_interval = val;
1023         hci_dev_unlock(hdev);
1024
1025         return 0;
1026 }
1027
1028 static int adv_max_interval_get(void *data, u64 *val)
1029 {
1030         struct hci_dev *hdev = data;
1031
1032         hci_dev_lock(hdev);
1033         *val = hdev->le_adv_max_interval;
1034         hci_dev_unlock(hdev);
1035
1036         return 0;
1037 }
1038
1039 DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1040                         adv_max_interval_set, "%llu\n");
1041
1042 static int device_list_show(struct seq_file *f, void *ptr)
1043 {
1044         struct hci_dev *hdev = f->private;
1045         struct hci_conn_params *p;
1046         struct bdaddr_list *b;
1047
1048         hci_dev_lock(hdev);
1049         list_for_each_entry(b, &hdev->whitelist, list)
1050                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
1051         list_for_each_entry(p, &hdev->le_conn_params, list) {
1052                 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
1053                            p->auto_connect);
1054         }
1055         hci_dev_unlock(hdev);
1056
1057         return 0;
1058 }
1059
1060 static int device_list_open(struct inode *inode, struct file *file)
1061 {
1062         return single_open(file, device_list_show, inode->i_private);
1063 }
1064
1065 static const struct file_operations device_list_fops = {
1066         .open           = device_list_open,
1067         .read           = seq_read,
1068         .llseek         = seq_lseek,
1069         .release        = single_release,
1070 };
1071
1072 /* ---- HCI requests ---- */
1073
1074 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1075 {
1076         BT_DBG("%s result 0x%2.2x", hdev->name, result);
1077
1078         if (hdev->req_status == HCI_REQ_PEND) {
1079                 hdev->req_result = result;
1080                 hdev->req_status = HCI_REQ_DONE;
1081                 wake_up_interruptible(&hdev->req_wait_q);
1082         }
1083 }
1084
1085 static void hci_req_cancel(struct hci_dev *hdev, int err)
1086 {
1087         BT_DBG("%s err 0x%2.2x", hdev->name, err);
1088
1089         if (hdev->req_status == HCI_REQ_PEND) {
1090                 hdev->req_result = err;
1091                 hdev->req_status = HCI_REQ_CANCELED;
1092                 wake_up_interruptible(&hdev->req_wait_q);
1093         }
1094 }
1095
1096 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1097                                             u8 event)
1098 {
1099         struct hci_ev_cmd_complete *ev;
1100         struct hci_event_hdr *hdr;
1101         struct sk_buff *skb;
1102
1103         hci_dev_lock(hdev);
1104
1105         skb = hdev->recv_evt;
1106         hdev->recv_evt = NULL;
1107
1108         hci_dev_unlock(hdev);
1109
1110         if (!skb)
1111                 return ERR_PTR(-ENODATA);
1112
1113         if (skb->len < sizeof(*hdr)) {
1114                 BT_ERR("Too short HCI event");
1115                 goto failed;
1116         }
1117
1118         hdr = (void *) skb->data;
1119         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1120
1121         if (event) {
1122                 if (hdr->evt != event)
1123                         goto failed;
1124                 return skb;
1125         }
1126
1127         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1128                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1129                 goto failed;
1130         }
1131
1132         if (skb->len < sizeof(*ev)) {
1133                 BT_ERR("Too short cmd_complete event");
1134                 goto failed;
1135         }
1136
1137         ev = (void *) skb->data;
1138         skb_pull(skb, sizeof(*ev));
1139
1140         if (opcode == __le16_to_cpu(ev->opcode))
1141                 return skb;
1142
1143         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1144                __le16_to_cpu(ev->opcode));
1145
1146 failed:
1147         kfree_skb(skb);
1148         return ERR_PTR(-ENODATA);
1149 }
1150
1151 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1152                                   const void *param, u8 event, u32 timeout)
1153 {
1154         DECLARE_WAITQUEUE(wait, current);
1155         struct hci_request req;
1156         int err = 0;
1157
1158         BT_DBG("%s", hdev->name);
1159
1160         hci_req_init(&req, hdev);
1161
1162         hci_req_add_ev(&req, opcode, plen, param, event);
1163
1164         hdev->req_status = HCI_REQ_PEND;
1165
1166         add_wait_queue(&hdev->req_wait_q, &wait);
1167         set_current_state(TASK_INTERRUPTIBLE);
1168
1169         err = hci_req_run(&req, hci_req_sync_complete);
1170         if (err < 0) {
1171                 remove_wait_queue(&hdev->req_wait_q, &wait);
1172                 set_current_state(TASK_RUNNING);
1173                 return ERR_PTR(err);
1174         }
1175
1176         schedule_timeout(timeout);
1177
1178         remove_wait_queue(&hdev->req_wait_q, &wait);
1179
1180         if (signal_pending(current))
1181                 return ERR_PTR(-EINTR);
1182
1183         switch (hdev->req_status) {
1184         case HCI_REQ_DONE:
1185                 err = -bt_to_errno(hdev->req_result);
1186                 break;
1187
1188         case HCI_REQ_CANCELED:
1189                 err = -hdev->req_result;
1190                 break;
1191
1192         default:
1193                 err = -ETIMEDOUT;
1194                 break;
1195         }
1196
1197         hdev->req_status = hdev->req_result = 0;
1198
1199         BT_DBG("%s end: err %d", hdev->name, err);
1200
1201         if (err < 0)
1202                 return ERR_PTR(err);
1203
1204         return hci_get_cmd_complete(hdev, opcode, event);
1205 }
1206 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1207
1208 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1209                                const void *param, u32 timeout)
1210 {
1211         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1212 }
1213 EXPORT_SYMBOL(__hci_cmd_sync);
1214
1215 /* Execute request and wait for completion. */
1216 static int __hci_req_sync(struct hci_dev *hdev,
1217                           void (*func)(struct hci_request *req,
1218                                       unsigned long opt),
1219                           unsigned long opt, __u32 timeout)
1220 {
1221         struct hci_request req;
1222         DECLARE_WAITQUEUE(wait, current);
1223         int err = 0;
1224
1225         BT_DBG("%s start", hdev->name);
1226
1227         hci_req_init(&req, hdev);
1228
1229         hdev->req_status = HCI_REQ_PEND;
1230
1231         func(&req, opt);
1232
1233         add_wait_queue(&hdev->req_wait_q, &wait);
1234         set_current_state(TASK_INTERRUPTIBLE);
1235
1236         err = hci_req_run(&req, hci_req_sync_complete);
1237         if (err < 0) {
1238                 hdev->req_status = 0;
1239
1240                 remove_wait_queue(&hdev->req_wait_q, &wait);
1241                 set_current_state(TASK_RUNNING);
1242
1243                 /* ENODATA means the HCI request command queue is empty.
1244                  * This can happen when a request with conditionals doesn't
1245                  * trigger any commands to be sent. This is normal behavior
1246                  * and should not trigger an error return.
1247                  */
1248                 if (err == -ENODATA)
1249                         return 0;
1250
1251                 return err;
1252         }
1253
1254         schedule_timeout(timeout);
1255
1256         remove_wait_queue(&hdev->req_wait_q, &wait);
1257
1258         if (signal_pending(current))
1259                 return -EINTR;
1260
1261         switch (hdev->req_status) {
1262         case HCI_REQ_DONE:
1263                 err = -bt_to_errno(hdev->req_result);
1264                 break;
1265
1266         case HCI_REQ_CANCELED:
1267                 err = -hdev->req_result;
1268                 break;
1269
1270         default:
1271                 err = -ETIMEDOUT;
1272                 break;
1273         }
1274
1275         hdev->req_status = hdev->req_result = 0;
1276
1277         BT_DBG("%s end: err %d", hdev->name, err);
1278
1279         return err;
1280 }
1281
1282 static int hci_req_sync(struct hci_dev *hdev,
1283                         void (*req)(struct hci_request *req,
1284                                     unsigned long opt),
1285                         unsigned long opt, __u32 timeout)
1286 {
1287         int ret;
1288
1289         if (!test_bit(HCI_UP, &hdev->flags))
1290                 return -ENETDOWN;
1291
1292         /* Serialize all requests */
1293         hci_req_lock(hdev);
1294         ret = __hci_req_sync(hdev, req, opt, timeout);
1295         hci_req_unlock(hdev);
1296
1297         return ret;
1298 }
1299
1300 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1301 {
1302         BT_DBG("%s %ld", req->hdev->name, opt);
1303
1304         /* Reset device */
1305         set_bit(HCI_RESET, &req->hdev->flags);
1306         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1307 }
1308
1309 static void bredr_init(struct hci_request *req)
1310 {
1311         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1312
1313         /* Read Local Supported Features */
1314         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1315
1316         /* Read Local Version */
1317         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1318
1319         /* Read BD Address */
1320         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1321 }
1322
1323 static void amp_init(struct hci_request *req)
1324 {
1325         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1326
1327         /* Read Local Version */
1328         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1329
1330         /* Read Local Supported Commands */
1331         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1332
1333         /* Read Local Supported Features */
1334         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1335
1336         /* Read Local AMP Info */
1337         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1338
1339         /* Read Data Blk size */
1340         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1341
1342         /* Read Flow Control Mode */
1343         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1344
1345         /* Read Location Data */
1346         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1347 }
1348
1349 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1350 {
1351         struct hci_dev *hdev = req->hdev;
1352
1353         BT_DBG("%s %ld", hdev->name, opt);
1354
1355         /* Reset */
1356         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1357                 hci_reset_req(req, 0);
1358
1359         switch (hdev->dev_type) {
1360         case HCI_BREDR:
1361                 bredr_init(req);
1362                 break;
1363
1364         case HCI_AMP:
1365                 amp_init(req);
1366                 break;
1367
1368         default:
1369                 BT_ERR("Unknown device type %d", hdev->dev_type);
1370                 break;
1371         }
1372 }
1373
1374 static void bredr_setup(struct hci_request *req)
1375 {
1376         struct hci_dev *hdev = req->hdev;
1377
1378         __le16 param;
1379         __u8 flt_type;
1380
1381         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1382         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1383
1384         /* Read Class of Device */
1385         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1386
1387         /* Read Local Name */
1388         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1389
1390         /* Read Voice Setting */
1391         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1392
1393         /* Read Number of Supported IAC */
1394         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1395
1396         /* Read Current IAC LAP */
1397         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1398
1399         /* Clear Event Filters */
1400         flt_type = HCI_FLT_CLEAR_ALL;
1401         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1402
1403         /* Connection accept timeout ~20 secs */
1404         param = cpu_to_le16(0x7d00);
1405         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1406
1407         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1408          * but it does not support page scan related HCI commands.
1409          */
1410         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1411                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1412                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1413         }
1414 }
1415
1416 static void le_setup(struct hci_request *req)
1417 {
1418         struct hci_dev *hdev = req->hdev;
1419
1420         /* Read LE Buffer Size */
1421         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1422
1423         /* Read LE Local Supported Features */
1424         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1425
1426         /* Read LE Supported States */
1427         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1428
1429         /* Read LE White List Size */
1430         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1431
1432         /* Clear LE White List */
1433         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1434
1435         /* LE-only controllers have LE implicitly enabled */
1436         if (!lmp_bredr_capable(hdev))
1437                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1438 }
1439
1440 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1441 {
1442         if (lmp_ext_inq_capable(hdev))
1443                 return 0x02;
1444
1445         if (lmp_inq_rssi_capable(hdev))
1446                 return 0x01;
1447
1448         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1449             hdev->lmp_subver == 0x0757)
1450                 return 0x01;
1451
1452         if (hdev->manufacturer == 15) {
1453                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1454                         return 0x01;
1455                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1456                         return 0x01;
1457                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1458                         return 0x01;
1459         }
1460
1461         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1462             hdev->lmp_subver == 0x1805)
1463                 return 0x01;
1464
1465         return 0x00;
1466 }
1467
1468 static void hci_setup_inquiry_mode(struct hci_request *req)
1469 {
1470         u8 mode;
1471
1472         mode = hci_get_inquiry_mode(req->hdev);
1473
1474         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1475 }
1476
1477 static void hci_setup_event_mask(struct hci_request *req)
1478 {
1479         struct hci_dev *hdev = req->hdev;
1480
1481         /* The second byte is 0xff instead of 0x9f (two reserved bits
1482          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1483          * command otherwise.
1484          */
1485         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1486
1487         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1488          * any event mask for pre 1.2 devices.
1489          */
1490         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1491                 return;
1492
1493         if (lmp_bredr_capable(hdev)) {
1494                 events[4] |= 0x01; /* Flow Specification Complete */
1495                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1496                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1497                 events[5] |= 0x08; /* Synchronous Connection Complete */
1498                 events[5] |= 0x10; /* Synchronous Connection Changed */
1499         } else {
1500                 /* Use a different default for LE-only devices */
1501                 memset(events, 0, sizeof(events));
1502                 events[0] |= 0x10; /* Disconnection Complete */
1503                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1504                 events[1] |= 0x20; /* Command Complete */
1505                 events[1] |= 0x40; /* Command Status */
1506                 events[1] |= 0x80; /* Hardware Error */
1507                 events[2] |= 0x04; /* Number of Completed Packets */
1508                 events[3] |= 0x02; /* Data Buffer Overflow */
1509
1510                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1511                         events[0] |= 0x80; /* Encryption Change */
1512                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
1513                 }
1514         }
1515
1516         if (lmp_inq_rssi_capable(hdev))
1517                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1518
1519         if (lmp_sniffsubr_capable(hdev))
1520                 events[5] |= 0x20; /* Sniff Subrating */
1521
1522         if (lmp_pause_enc_capable(hdev))
1523                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1524
1525         if (lmp_ext_inq_capable(hdev))
1526                 events[5] |= 0x40; /* Extended Inquiry Result */
1527
1528         if (lmp_no_flush_capable(hdev))
1529                 events[7] |= 0x01; /* Enhanced Flush Complete */
1530
1531         if (lmp_lsto_capable(hdev))
1532                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1533
1534         if (lmp_ssp_capable(hdev)) {
1535                 events[6] |= 0x01;      /* IO Capability Request */
1536                 events[6] |= 0x02;      /* IO Capability Response */
1537                 events[6] |= 0x04;      /* User Confirmation Request */
1538                 events[6] |= 0x08;      /* User Passkey Request */
1539                 events[6] |= 0x10;      /* Remote OOB Data Request */
1540                 events[6] |= 0x20;      /* Simple Pairing Complete */
1541                 events[7] |= 0x04;      /* User Passkey Notification */
1542                 events[7] |= 0x08;      /* Keypress Notification */
1543                 events[7] |= 0x10;      /* Remote Host Supported
1544                                          * Features Notification
1545                                          */
1546         }
1547
1548         if (lmp_le_capable(hdev))
1549                 events[7] |= 0x20;      /* LE Meta-Event */
1550
1551         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1552 }
1553
1554 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1555 {
1556         struct hci_dev *hdev = req->hdev;
1557
1558         if (lmp_bredr_capable(hdev))
1559                 bredr_setup(req);
1560         else
1561                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1562
1563         if (lmp_le_capable(hdev))
1564                 le_setup(req);
1565
1566         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1567          * local supported commands HCI command.
1568          */
1569         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1570                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1571
1572         if (lmp_ssp_capable(hdev)) {
1573                 /* When SSP is available, then the host features page
1574                  * should also be available as well. However some
1575                  * controllers list the max_page as 0 as long as SSP
1576                  * has not been enabled. To achieve proper debugging
1577                  * output, force the minimum max_page to 1 at least.
1578                  */
1579                 hdev->max_page = 0x01;
1580
1581                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1582                         u8 mode = 0x01;
1583                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1584                                     sizeof(mode), &mode);
1585                 } else {
1586                         struct hci_cp_write_eir cp;
1587
1588                         memset(hdev->eir, 0, sizeof(hdev->eir));
1589                         memset(&cp, 0, sizeof(cp));
1590
1591                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1592                 }
1593         }
1594
1595         if (lmp_inq_rssi_capable(hdev))
1596                 hci_setup_inquiry_mode(req);
1597
1598         if (lmp_inq_tx_pwr_capable(hdev))
1599                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1600
1601         if (lmp_ext_feat_capable(hdev)) {
1602                 struct hci_cp_read_local_ext_features cp;
1603
1604                 cp.page = 0x01;
1605                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1606                             sizeof(cp), &cp);
1607         }
1608
1609         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1610                 u8 enable = 1;
1611                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1612                             &enable);
1613         }
1614 }
1615
1616 static void hci_setup_link_policy(struct hci_request *req)
1617 {
1618         struct hci_dev *hdev = req->hdev;
1619         struct hci_cp_write_def_link_policy cp;
1620         u16 link_policy = 0;
1621
1622         if (lmp_rswitch_capable(hdev))
1623                 link_policy |= HCI_LP_RSWITCH;
1624         if (lmp_hold_capable(hdev))
1625                 link_policy |= HCI_LP_HOLD;
1626         if (lmp_sniff_capable(hdev))
1627                 link_policy |= HCI_LP_SNIFF;
1628         if (lmp_park_capable(hdev))
1629                 link_policy |= HCI_LP_PARK;
1630
1631         cp.policy = cpu_to_le16(link_policy);
1632         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1633 }
1634
1635 static void hci_set_le_support(struct hci_request *req)
1636 {
1637         struct hci_dev *hdev = req->hdev;
1638         struct hci_cp_write_le_host_supported cp;
1639
1640         /* LE-only devices do not support explicit enablement */
1641         if (!lmp_bredr_capable(hdev))
1642                 return;
1643
1644         memset(&cp, 0, sizeof(cp));
1645
1646         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1647                 cp.le = 0x01;
1648                 cp.simul = 0x00;
1649         }
1650
1651         if (cp.le != lmp_host_le_capable(hdev))
1652                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1653                             &cp);
1654 }
1655
1656 static void hci_set_event_mask_page_2(struct hci_request *req)
1657 {
1658         struct hci_dev *hdev = req->hdev;
1659         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1660
1661         /* If Connectionless Slave Broadcast master role is supported
1662          * enable all necessary events for it.
1663          */
1664         if (lmp_csb_master_capable(hdev)) {
1665                 events[1] |= 0x40;      /* Triggered Clock Capture */
1666                 events[1] |= 0x80;      /* Synchronization Train Complete */
1667                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1668                 events[2] |= 0x20;      /* CSB Channel Map Change */
1669         }
1670
1671         /* If Connectionless Slave Broadcast slave role is supported
1672          * enable all necessary events for it.
1673          */
1674         if (lmp_csb_slave_capable(hdev)) {
1675                 events[2] |= 0x01;      /* Synchronization Train Received */
1676                 events[2] |= 0x02;      /* CSB Receive */
1677                 events[2] |= 0x04;      /* CSB Timeout */
1678                 events[2] |= 0x08;      /* Truncated Page Complete */
1679         }
1680
1681         /* Enable Authenticated Payload Timeout Expired event if supported */
1682         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1683                 events[2] |= 0x80;
1684
1685         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1686 }
1687
1688 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1689 {
1690         struct hci_dev *hdev = req->hdev;
1691         u8 p;
1692
1693         hci_setup_event_mask(req);
1694
1695         /* Some Broadcom based Bluetooth controllers do not support the
1696          * Delete Stored Link Key command. They are clearly indicating its
1697          * absence in the bit mask of supported commands.
1698          *
1699          * Check the supported commands and only if the the command is marked
1700          * as supported send it. If not supported assume that the controller
1701          * does not have actual support for stored link keys which makes this
1702          * command redundant anyway.
1703          *
1704          * Some controllers indicate that they support handling deleting
1705          * stored link keys, but they don't. The quirk lets a driver
1706          * just disable this command.
1707          */
1708         if (hdev->commands[6] & 0x80 &&
1709             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1710                 struct hci_cp_delete_stored_link_key cp;
1711
1712                 bacpy(&cp.bdaddr, BDADDR_ANY);
1713                 cp.delete_all = 0x01;
1714                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1715                             sizeof(cp), &cp);
1716         }
1717
1718         if (hdev->commands[5] & 0x10)
1719                 hci_setup_link_policy(req);
1720
1721         if (lmp_le_capable(hdev)) {
1722                 u8 events[8];
1723
1724                 memset(events, 0, sizeof(events));
1725                 events[0] = 0x0f;
1726
1727                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1728                         events[0] |= 0x10;      /* LE Long Term Key Request */
1729
1730                 /* If controller supports the Connection Parameters Request
1731                  * Link Layer Procedure, enable the corresponding event.
1732                  */
1733                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1734                         events[0] |= 0x20;      /* LE Remote Connection
1735                                                  * Parameter Request
1736                                                  */
1737
1738                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1739                             events);
1740
1741                 if (hdev->commands[25] & 0x40) {
1742                         /* Read LE Advertising Channel TX Power */
1743                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1744                 }
1745
1746                 hci_set_le_support(req);
1747         }
1748
1749         /* Read features beyond page 1 if available */
1750         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1751                 struct hci_cp_read_local_ext_features cp;
1752
1753                 cp.page = p;
1754                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1755                             sizeof(cp), &cp);
1756         }
1757 }
1758
1759 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1760 {
1761         struct hci_dev *hdev = req->hdev;
1762
1763         /* Set event mask page 2 if the HCI command for it is supported */
1764         if (hdev->commands[22] & 0x04)
1765                 hci_set_event_mask_page_2(req);
1766
1767         /* Read local codec list if the HCI command is supported */
1768         if (hdev->commands[29] & 0x20)
1769                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1770
1771         /* Get MWS transport configuration if the HCI command is supported */
1772         if (hdev->commands[30] & 0x08)
1773                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1774
1775         /* Check for Synchronization Train support */
1776         if (lmp_sync_train_capable(hdev))
1777                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1778
1779         /* Enable Secure Connections if supported and configured */
1780         if (bredr_sc_enabled(hdev)) {
1781                 u8 support = 0x01;
1782                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1783                             sizeof(support), &support);
1784         }
1785 }
1786
1787 static int __hci_init(struct hci_dev *hdev)
1788 {
1789         int err;
1790
1791         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1792         if (err < 0)
1793                 return err;
1794
1795         /* The Device Under Test (DUT) mode is special and available for
1796          * all controller types. So just create it early on.
1797          */
1798         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1799                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1800                                     &dut_mode_fops);
1801         }
1802
1803         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1804          * BR/EDR/LE type controllers. AMP controllers only need the
1805          * first stage init.
1806          */
1807         if (hdev->dev_type != HCI_BREDR)
1808                 return 0;
1809
1810         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1811         if (err < 0)
1812                 return err;
1813
1814         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1815         if (err < 0)
1816                 return err;
1817
1818         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1819         if (err < 0)
1820                 return err;
1821
1822         /* Only create debugfs entries during the initial setup
1823          * phase and not every time the controller gets powered on.
1824          */
1825         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1826                 return 0;
1827
1828         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1829                             &features_fops);
1830         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1831                            &hdev->manufacturer);
1832         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1833         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1834         debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1835                             &device_list_fops);
1836         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1837                             &blacklist_fops);
1838         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1839
1840         debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1841                             &conn_info_min_age_fops);
1842         debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1843                             &conn_info_max_age_fops);
1844
1845         if (lmp_bredr_capable(hdev)) {
1846                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1847                                     hdev, &inquiry_cache_fops);
1848                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1849                                     hdev, &link_keys_fops);
1850                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1851                                     hdev, &dev_class_fops);
1852                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1853                                     hdev, &voice_setting_fops);
1854         }
1855
1856         if (lmp_ssp_capable(hdev)) {
1857                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1858                                     hdev, &auto_accept_delay_fops);
1859                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1860                                     hdev, &force_sc_support_fops);
1861                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1862                                     hdev, &sc_only_mode_fops);
1863                 if (lmp_le_capable(hdev))
1864                         debugfs_create_file("force_lesc_support", 0644,
1865                                             hdev->debugfs, hdev,
1866                                             &force_lesc_support_fops);
1867         }
1868
1869         if (lmp_sniff_capable(hdev)) {
1870                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1871                                     hdev, &idle_timeout_fops);
1872                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1873                                     hdev, &sniff_min_interval_fops);
1874                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1875                                     hdev, &sniff_max_interval_fops);
1876         }
1877
1878         if (lmp_le_capable(hdev)) {
1879                 debugfs_create_file("identity", 0400, hdev->debugfs,
1880                                     hdev, &identity_fops);
1881                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1882                                     hdev, &rpa_timeout_fops);
1883                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1884                                     hdev, &random_address_fops);
1885                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1886                                     hdev, &static_address_fops);
1887
1888                 /* For controllers with a public address, provide a debug
1889                  * option to force the usage of the configured static
1890                  * address. By default the public address is used.
1891                  */
1892                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1893                         debugfs_create_file("force_static_address", 0644,
1894                                             hdev->debugfs, hdev,
1895                                             &force_static_address_fops);
1896
1897                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1898                                   &hdev->le_white_list_size);
1899                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1900                                     &white_list_fops);
1901                 debugfs_create_file("identity_resolving_keys", 0400,
1902                                     hdev->debugfs, hdev,
1903                                     &identity_resolving_keys_fops);
1904                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1905                                     hdev, &long_term_keys_fops);
1906                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1907                                     hdev, &conn_min_interval_fops);
1908                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1909                                     hdev, &conn_max_interval_fops);
1910                 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1911                                     hdev, &conn_latency_fops);
1912                 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1913                                     hdev, &supervision_timeout_fops);
1914                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1915                                     hdev, &adv_channel_map_fops);
1916                 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1917                                     hdev, &adv_min_interval_fops);
1918                 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1919                                     hdev, &adv_max_interval_fops);
1920                 debugfs_create_u16("discov_interleaved_timeout", 0644,
1921                                    hdev->debugfs,
1922                                    &hdev->discov_interleaved_timeout);
1923
1924                 smp_register(hdev);
1925         }
1926
1927         return 0;
1928 }
1929
1930 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1931 {
1932         struct hci_dev *hdev = req->hdev;
1933
1934         BT_DBG("%s %ld", hdev->name, opt);
1935
1936         /* Reset */
1937         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1938                 hci_reset_req(req, 0);
1939
1940         /* Read Local Version */
1941         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1942
1943         /* Read BD Address */
1944         if (hdev->set_bdaddr)
1945                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1946 }
1947
1948 static int __hci_unconf_init(struct hci_dev *hdev)
1949 {
1950         int err;
1951
1952         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1953                 return 0;
1954
1955         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1956         if (err < 0)
1957                 return err;
1958
1959         return 0;
1960 }
1961
1962 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1963 {
1964         __u8 scan = opt;
1965
1966         BT_DBG("%s %x", req->hdev->name, scan);
1967
1968         /* Inquiry and Page scans */
1969         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1970 }
1971
1972 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1973 {
1974         __u8 auth = opt;
1975
1976         BT_DBG("%s %x", req->hdev->name, auth);
1977
1978         /* Authentication */
1979         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1980 }
1981
1982 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1983 {
1984         __u8 encrypt = opt;
1985
1986         BT_DBG("%s %x", req->hdev->name, encrypt);
1987
1988         /* Encryption */
1989         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1990 }
1991
1992 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1993 {
1994         __le16 policy = cpu_to_le16(opt);
1995
1996         BT_DBG("%s %x", req->hdev->name, policy);
1997
1998         /* Default link policy */
1999         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
2000 }
2001
2002 /* Get HCI device by index.
2003  * Device is held on return. */
2004 struct hci_dev *hci_dev_get(int index)
2005 {
2006         struct hci_dev *hdev = NULL, *d;
2007
2008         BT_DBG("%d", index);
2009
2010         if (index < 0)
2011                 return NULL;
2012
2013         read_lock(&hci_dev_list_lock);
2014         list_for_each_entry(d, &hci_dev_list, list) {
2015                 if (d->id == index) {
2016                         hdev = hci_dev_hold(d);
2017                         break;
2018                 }
2019         }
2020         read_unlock(&hci_dev_list_lock);
2021         return hdev;
2022 }
2023
2024 /* ---- Inquiry support ---- */
2025
2026 bool hci_discovery_active(struct hci_dev *hdev)
2027 {
2028         struct discovery_state *discov = &hdev->discovery;
2029
2030         switch (discov->state) {
2031         case DISCOVERY_FINDING:
2032         case DISCOVERY_RESOLVING:
2033                 return true;
2034
2035         default:
2036                 return false;
2037         }
2038 }
2039
2040 void hci_discovery_set_state(struct hci_dev *hdev, int state)
2041 {
2042         int old_state = hdev->discovery.state;
2043
2044         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2045
2046         if (old_state == state)
2047                 return;
2048
2049         hdev->discovery.state = state;
2050
2051         switch (state) {
2052         case DISCOVERY_STOPPED:
2053                 hci_update_background_scan(hdev);
2054
2055                 if (old_state != DISCOVERY_STARTING)
2056                         mgmt_discovering(hdev, 0);
2057                 break;
2058         case DISCOVERY_STARTING:
2059                 break;
2060         case DISCOVERY_FINDING:
2061                 mgmt_discovering(hdev, 1);
2062                 break;
2063         case DISCOVERY_RESOLVING:
2064                 break;
2065         case DISCOVERY_STOPPING:
2066                 break;
2067         }
2068 }
2069
2070 void hci_inquiry_cache_flush(struct hci_dev *hdev)
2071 {
2072         struct discovery_state *cache = &hdev->discovery;
2073         struct inquiry_entry *p, *n;
2074
2075         list_for_each_entry_safe(p, n, &cache->all, all) {
2076                 list_del(&p->all);
2077                 kfree(p);
2078         }
2079
2080         INIT_LIST_HEAD(&cache->unknown);
2081         INIT_LIST_HEAD(&cache->resolve);
2082 }
2083
2084 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2085                                                bdaddr_t *bdaddr)
2086 {
2087         struct discovery_state *cache = &hdev->discovery;
2088         struct inquiry_entry *e;
2089
2090         BT_DBG("cache %p, %pMR", cache, bdaddr);
2091
2092         list_for_each_entry(e, &cache->all, all) {
2093                 if (!bacmp(&e->data.bdaddr, bdaddr))
2094                         return e;
2095         }
2096
2097         return NULL;
2098 }
2099
2100 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2101                                                        bdaddr_t *bdaddr)
2102 {
2103         struct discovery_state *cache = &hdev->discovery;
2104         struct inquiry_entry *e;
2105
2106         BT_DBG("cache %p, %pMR", cache, bdaddr);
2107
2108         list_for_each_entry(e, &cache->unknown, list) {
2109                 if (!bacmp(&e->data.bdaddr, bdaddr))
2110                         return e;
2111         }
2112
2113         return NULL;
2114 }
2115
2116 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2117                                                        bdaddr_t *bdaddr,
2118                                                        int state)
2119 {
2120         struct discovery_state *cache = &hdev->discovery;
2121         struct inquiry_entry *e;
2122
2123         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2124
2125         list_for_each_entry(e, &cache->resolve, list) {
2126                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2127                         return e;
2128                 if (!bacmp(&e->data.bdaddr, bdaddr))
2129                         return e;
2130         }
2131
2132         return NULL;
2133 }
2134
2135 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2136                                       struct inquiry_entry *ie)
2137 {
2138         struct discovery_state *cache = &hdev->discovery;
2139         struct list_head *pos = &cache->resolve;
2140         struct inquiry_entry *p;
2141
2142         list_del(&ie->list);
2143
2144         list_for_each_entry(p, &cache->resolve, list) {
2145                 if (p->name_state != NAME_PENDING &&
2146                     abs(p->data.rssi) >= abs(ie->data.rssi))
2147                         break;
2148                 pos = &p->list;
2149         }
2150
2151         list_add(&ie->list, pos);
2152 }
2153
2154 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2155                              bool name_known)
2156 {
2157         struct discovery_state *cache = &hdev->discovery;
2158         struct inquiry_entry *ie;
2159         u32 flags = 0;
2160
2161         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2162
2163         hci_remove_remote_oob_data(hdev, &data->bdaddr);
2164
2165         if (!data->ssp_mode)
2166                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2167
2168         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2169         if (ie) {
2170                 if (!ie->data.ssp_mode)
2171                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2172
2173                 if (ie->name_state == NAME_NEEDED &&
2174                     data->rssi != ie->data.rssi) {
2175                         ie->data.rssi = data->rssi;
2176                         hci_inquiry_cache_update_resolve(hdev, ie);
2177                 }
2178
2179                 goto update;
2180         }
2181
2182         /* Entry not in the cache. Add new one. */
2183         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
2184         if (!ie) {
2185                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2186                 goto done;
2187         }
2188
2189         list_add(&ie->all, &cache->all);
2190
2191         if (name_known) {
2192                 ie->name_state = NAME_KNOWN;
2193         } else {
2194                 ie->name_state = NAME_NOT_KNOWN;
2195                 list_add(&ie->list, &cache->unknown);
2196         }
2197
2198 update:
2199         if (name_known && ie->name_state != NAME_KNOWN &&
2200             ie->name_state != NAME_PENDING) {
2201                 ie->name_state = NAME_KNOWN;
2202                 list_del(&ie->list);
2203         }
2204
2205         memcpy(&ie->data, data, sizeof(*data));
2206         ie->timestamp = jiffies;
2207         cache->timestamp = jiffies;
2208
2209         if (ie->name_state == NAME_NOT_KNOWN)
2210                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2211
2212 done:
2213         return flags;
2214 }
2215
2216 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2217 {
2218         struct discovery_state *cache = &hdev->discovery;
2219         struct inquiry_info *info = (struct inquiry_info *) buf;
2220         struct inquiry_entry *e;
2221         int copied = 0;
2222
2223         list_for_each_entry(e, &cache->all, all) {
2224                 struct inquiry_data *data = &e->data;
2225
2226                 if (copied >= num)
2227                         break;
2228
2229                 bacpy(&info->bdaddr, &data->bdaddr);
2230                 info->pscan_rep_mode    = data->pscan_rep_mode;
2231                 info->pscan_period_mode = data->pscan_period_mode;
2232                 info->pscan_mode        = data->pscan_mode;
2233                 memcpy(info->dev_class, data->dev_class, 3);
2234                 info->clock_offset      = data->clock_offset;
2235
2236                 info++;
2237                 copied++;
2238         }
2239
2240         BT_DBG("cache %p, copied %d", cache, copied);
2241         return copied;
2242 }
2243
2244 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2245 {
2246         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2247         struct hci_dev *hdev = req->hdev;
2248         struct hci_cp_inquiry cp;
2249
2250         BT_DBG("%s", hdev->name);
2251
2252         if (test_bit(HCI_INQUIRY, &hdev->flags))
2253                 return;
2254
2255         /* Start Inquiry */
2256         memcpy(&cp.lap, &ir->lap, 3);
2257         cp.length  = ir->length;
2258         cp.num_rsp = ir->num_rsp;
2259         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2260 }
2261
2262 int hci_inquiry(void __user *arg)
2263 {
2264         __u8 __user *ptr = arg;
2265         struct hci_inquiry_req ir;
2266         struct hci_dev *hdev;
2267         int err = 0, do_inquiry = 0, max_rsp;
2268         long timeo;
2269         __u8 *buf;
2270
2271         if (copy_from_user(&ir, ptr, sizeof(ir)))
2272                 return -EFAULT;
2273
2274         hdev = hci_dev_get(ir.dev_id);
2275         if (!hdev)
2276                 return -ENODEV;
2277
2278         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2279                 err = -EBUSY;
2280                 goto done;
2281         }
2282
2283         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2284                 err = -EOPNOTSUPP;
2285                 goto done;
2286         }
2287
2288         if (hdev->dev_type != HCI_BREDR) {
2289                 err = -EOPNOTSUPP;
2290                 goto done;
2291         }
2292
2293         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2294                 err = -EOPNOTSUPP;
2295                 goto done;
2296         }
2297
2298         hci_dev_lock(hdev);
2299         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2300             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2301                 hci_inquiry_cache_flush(hdev);
2302                 do_inquiry = 1;
2303         }
2304         hci_dev_unlock(hdev);
2305
2306         timeo = ir.length * msecs_to_jiffies(2000);
2307
2308         if (do_inquiry) {
2309                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2310                                    timeo);
2311                 if (err < 0)
2312                         goto done;
2313
2314                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2315                  * cleared). If it is interrupted by a signal, return -EINTR.
2316                  */
2317                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
2318                                 TASK_INTERRUPTIBLE))
2319                         return -EINTR;
2320         }
2321
2322         /* for unlimited number of responses we will use buffer with
2323          * 255 entries
2324          */
2325         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2326
2327         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2328          * copy it to the user space.
2329          */
2330         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2331         if (!buf) {
2332                 err = -ENOMEM;
2333                 goto done;
2334         }
2335
2336         hci_dev_lock(hdev);
2337         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2338         hci_dev_unlock(hdev);
2339
2340         BT_DBG("num_rsp %d", ir.num_rsp);
2341
2342         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2343                 ptr += sizeof(ir);
2344                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2345                                  ir.num_rsp))
2346                         err = -EFAULT;
2347         } else
2348                 err = -EFAULT;
2349
2350         kfree(buf);
2351
2352 done:
2353         hci_dev_put(hdev);
2354         return err;
2355 }
2356
2357 static int hci_dev_do_open(struct hci_dev *hdev)
2358 {
2359         int ret = 0;
2360
2361         BT_DBG("%s %p", hdev->name, hdev);
2362
2363         hci_req_lock(hdev);
2364
2365         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2366                 ret = -ENODEV;
2367                 goto done;
2368         }
2369
2370         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2371             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2372                 /* Check for rfkill but allow the HCI setup stage to
2373                  * proceed (which in itself doesn't cause any RF activity).
2374                  */
2375                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2376                         ret = -ERFKILL;
2377                         goto done;
2378                 }
2379
2380                 /* Check for valid public address or a configured static
2381                  * random adddress, but let the HCI setup proceed to
2382                  * be able to determine if there is a public address
2383                  * or not.
2384                  *
2385                  * In case of user channel usage, it is not important
2386                  * if a public address or static random address is
2387                  * available.
2388                  *
2389                  * This check is only valid for BR/EDR controllers
2390                  * since AMP controllers do not have an address.
2391                  */
2392                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2393                     hdev->dev_type == HCI_BREDR &&
2394                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2395                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2396                         ret = -EADDRNOTAVAIL;
2397                         goto done;
2398                 }
2399         }
2400
2401         if (test_bit(HCI_UP, &hdev->flags)) {
2402                 ret = -EALREADY;
2403                 goto done;
2404         }
2405
2406         if (hdev->open(hdev)) {
2407                 ret = -EIO;
2408                 goto done;
2409         }
2410
2411         atomic_set(&hdev->cmd_cnt, 1);
2412         set_bit(HCI_INIT, &hdev->flags);
2413
2414         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2415                 if (hdev->setup)
2416                         ret = hdev->setup(hdev);
2417
2418                 /* The transport driver can set these quirks before
2419                  * creating the HCI device or in its setup callback.
2420                  *
2421                  * In case any of them is set, the controller has to
2422                  * start up as unconfigured.
2423                  */
2424                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2425                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2426                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2427
2428                 /* For an unconfigured controller it is required to
2429                  * read at least the version information provided by
2430                  * the Read Local Version Information command.
2431                  *
2432                  * If the set_bdaddr driver callback is provided, then
2433                  * also the original Bluetooth public device address
2434                  * will be read using the Read BD Address command.
2435                  */
2436                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2437                         ret = __hci_unconf_init(hdev);
2438         }
2439
2440         if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2441                 /* If public address change is configured, ensure that
2442                  * the address gets programmed. If the driver does not
2443                  * support changing the public address, fail the power
2444                  * on procedure.
2445                  */
2446                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2447                     hdev->set_bdaddr)
2448                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2449                 else
2450                         ret = -EADDRNOTAVAIL;
2451         }
2452
2453         if (!ret) {
2454                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2455                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2456                         ret = __hci_init(hdev);
2457         }
2458
2459         clear_bit(HCI_INIT, &hdev->flags);
2460
2461         if (!ret) {
2462                 hci_dev_hold(hdev);
2463                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2464                 set_bit(HCI_UP, &hdev->flags);
2465                 hci_notify(hdev, HCI_DEV_UP);
2466                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2467                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2468                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2469                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2470                     hdev->dev_type == HCI_BREDR) {
2471                         hci_dev_lock(hdev);
2472                         mgmt_powered(hdev, 1);
2473                         hci_dev_unlock(hdev);
2474                 }
2475         } else {
2476                 /* Init failed, cleanup */
2477                 flush_work(&hdev->tx_work);
2478                 flush_work(&hdev->cmd_work);
2479                 flush_work(&hdev->rx_work);
2480
2481                 skb_queue_purge(&hdev->cmd_q);
2482                 skb_queue_purge(&hdev->rx_q);
2483
2484                 if (hdev->flush)
2485                         hdev->flush(hdev);
2486
2487                 if (hdev->sent_cmd) {
2488                         kfree_skb(hdev->sent_cmd);
2489                         hdev->sent_cmd = NULL;
2490                 }
2491
2492                 hdev->close(hdev);
2493                 hdev->flags &= BIT(HCI_RAW);
2494         }
2495
2496 done:
2497         hci_req_unlock(hdev);
2498         return ret;
2499 }
2500
2501 /* ---- HCI ioctl helpers ---- */
2502
2503 int hci_dev_open(__u16 dev)
2504 {
2505         struct hci_dev *hdev;
2506         int err;
2507
2508         hdev = hci_dev_get(dev);
2509         if (!hdev)
2510                 return -ENODEV;
2511
2512         /* Devices that are marked as unconfigured can only be powered
2513          * up as user channel. Trying to bring them up as normal devices
2514          * will result into a failure. Only user channel operation is
2515          * possible.
2516          *
2517          * When this function is called for a user channel, the flag
2518          * HCI_USER_CHANNEL will be set first before attempting to
2519          * open the device.
2520          */
2521         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2522             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2523                 err = -EOPNOTSUPP;
2524                 goto done;
2525         }
2526
2527         /* We need to ensure that no other power on/off work is pending
2528          * before proceeding to call hci_dev_do_open. This is
2529          * particularly important if the setup procedure has not yet
2530          * completed.
2531          */
2532         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2533                 cancel_delayed_work(&hdev->power_off);
2534
2535         /* After this call it is guaranteed that the setup procedure
2536          * has finished. This means that error conditions like RFKILL
2537          * or no valid public or static random address apply.
2538          */
2539         flush_workqueue(hdev->req_workqueue);
2540
2541         /* For controllers not using the management interface and that
2542          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
2543          * so that pairing works for them. Once the management interface
2544          * is in use this bit will be cleared again and userspace has
2545          * to explicitly enable it.
2546          */
2547         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2548             !test_bit(HCI_MGMT, &hdev->dev_flags))
2549                 set_bit(HCI_BONDABLE, &hdev->dev_flags);
2550
2551         err = hci_dev_do_open(hdev);
2552
2553 done:
2554         hci_dev_put(hdev);
2555         return err;
2556 }
2557
2558 /* This function requires the caller holds hdev->lock */
2559 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2560 {
2561         struct hci_conn_params *p;
2562
2563         list_for_each_entry(p, &hdev->le_conn_params, list) {
2564                 if (p->conn) {
2565                         hci_conn_drop(p->conn);
2566                         hci_conn_put(p->conn);
2567                         p->conn = NULL;
2568                 }
2569                 list_del_init(&p->action);
2570         }
2571
2572         BT_DBG("All LE pending actions cleared");
2573 }
2574
2575 static int hci_dev_do_close(struct hci_dev *hdev)
2576 {
2577         BT_DBG("%s %p", hdev->name, hdev);
2578
2579         cancel_delayed_work(&hdev->power_off);
2580
2581         hci_req_cancel(hdev, ENODEV);
2582         hci_req_lock(hdev);
2583
2584         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2585                 cancel_delayed_work_sync(&hdev->cmd_timer);
2586                 hci_req_unlock(hdev);
2587                 return 0;
2588         }
2589
2590         /* Flush RX and TX works */
2591         flush_work(&hdev->tx_work);
2592         flush_work(&hdev->rx_work);
2593
2594         if (hdev->discov_timeout > 0) {
2595                 cancel_delayed_work(&hdev->discov_off);
2596                 hdev->discov_timeout = 0;
2597                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2598                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2599         }
2600
2601         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2602                 cancel_delayed_work(&hdev->service_cache);
2603
2604         cancel_delayed_work_sync(&hdev->le_scan_disable);
2605
2606         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2607                 cancel_delayed_work_sync(&hdev->rpa_expired);
2608
2609         /* Avoid potential lockdep warnings from the *_flush() calls by
2610          * ensuring the workqueue is empty up front.
2611          */
2612         drain_workqueue(hdev->workqueue);
2613
2614         hci_dev_lock(hdev);
2615         hci_inquiry_cache_flush(hdev);
2616         hci_pend_le_actions_clear(hdev);
2617         hci_conn_hash_flush(hdev);
2618         hci_dev_unlock(hdev);
2619
2620         hci_notify(hdev, HCI_DEV_DOWN);
2621
2622         if (hdev->flush)
2623                 hdev->flush(hdev);
2624
2625         /* Reset device */
2626         skb_queue_purge(&hdev->cmd_q);
2627         atomic_set(&hdev->cmd_cnt, 1);
2628         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2629             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2630             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2631                 set_bit(HCI_INIT, &hdev->flags);
2632                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2633                 clear_bit(HCI_INIT, &hdev->flags);
2634         }
2635
2636         /* flush cmd  work */
2637         flush_work(&hdev->cmd_work);
2638
2639         /* Drop queues */
2640         skb_queue_purge(&hdev->rx_q);
2641         skb_queue_purge(&hdev->cmd_q);
2642         skb_queue_purge(&hdev->raw_q);
2643
2644         /* Drop last sent command */
2645         if (hdev->sent_cmd) {
2646                 cancel_delayed_work_sync(&hdev->cmd_timer);
2647                 kfree_skb(hdev->sent_cmd);
2648                 hdev->sent_cmd = NULL;
2649         }
2650
2651         kfree_skb(hdev->recv_evt);
2652         hdev->recv_evt = NULL;
2653
2654         /* After this point our queues are empty
2655          * and no tasks are scheduled. */
2656         hdev->close(hdev);
2657
2658         /* Clear flags */
2659         hdev->flags &= BIT(HCI_RAW);
2660         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2661
2662         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2663                 if (hdev->dev_type == HCI_BREDR) {
2664                         hci_dev_lock(hdev);
2665                         mgmt_powered(hdev, 0);
2666                         hci_dev_unlock(hdev);
2667                 }
2668         }
2669
2670         /* Controller radio is available but is currently powered down */
2671         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2672
2673         memset(hdev->eir, 0, sizeof(hdev->eir));
2674         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2675         bacpy(&hdev->random_addr, BDADDR_ANY);
2676
2677         hci_req_unlock(hdev);
2678
2679         hci_dev_put(hdev);
2680         return 0;
2681 }
2682
2683 int hci_dev_close(__u16 dev)
2684 {
2685         struct hci_dev *hdev;
2686         int err;
2687
2688         hdev = hci_dev_get(dev);
2689         if (!hdev)
2690                 return -ENODEV;
2691
2692         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2693                 err = -EBUSY;
2694                 goto done;
2695         }
2696
2697         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2698                 cancel_delayed_work(&hdev->power_off);
2699
2700         err = hci_dev_do_close(hdev);
2701
2702 done:
2703         hci_dev_put(hdev);
2704         return err;
2705 }
2706
2707 int hci_dev_reset(__u16 dev)
2708 {
2709         struct hci_dev *hdev;
2710         int ret = 0;
2711
2712         hdev = hci_dev_get(dev);
2713         if (!hdev)
2714                 return -ENODEV;
2715
2716         hci_req_lock(hdev);
2717
2718         if (!test_bit(HCI_UP, &hdev->flags)) {
2719                 ret = -ENETDOWN;
2720                 goto done;
2721         }
2722
2723         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2724                 ret = -EBUSY;
2725                 goto done;
2726         }
2727
2728         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2729                 ret = -EOPNOTSUPP;
2730                 goto done;
2731         }
2732
2733         /* Drop queues */
2734         skb_queue_purge(&hdev->rx_q);
2735         skb_queue_purge(&hdev->cmd_q);
2736
2737         /* Avoid potential lockdep warnings from the *_flush() calls by
2738          * ensuring the workqueue is empty up front.
2739          */
2740         drain_workqueue(hdev->workqueue);
2741
2742         hci_dev_lock(hdev);
2743         hci_inquiry_cache_flush(hdev);
2744         hci_conn_hash_flush(hdev);
2745         hci_dev_unlock(hdev);
2746
2747         if (hdev->flush)
2748                 hdev->flush(hdev);
2749
2750         atomic_set(&hdev->cmd_cnt, 1);
2751         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2752
2753         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2754
2755 done:
2756         hci_req_unlock(hdev);
2757         hci_dev_put(hdev);
2758         return ret;
2759 }
2760
2761 int hci_dev_reset_stat(__u16 dev)
2762 {
2763         struct hci_dev *hdev;
2764         int ret = 0;
2765
2766         hdev = hci_dev_get(dev);
2767         if (!hdev)
2768                 return -ENODEV;
2769
2770         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2771                 ret = -EBUSY;
2772                 goto done;
2773         }
2774
2775         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2776                 ret = -EOPNOTSUPP;
2777                 goto done;
2778         }
2779
2780         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2781
2782 done:
2783         hci_dev_put(hdev);
2784         return ret;
2785 }
2786
2787 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2788 {
2789         bool conn_changed, discov_changed;
2790
2791         BT_DBG("%s scan 0x%02x", hdev->name, scan);
2792
2793         if ((scan & SCAN_PAGE))
2794                 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2795                                                  &hdev->dev_flags);
2796         else
2797                 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2798                                                   &hdev->dev_flags);
2799
2800         if ((scan & SCAN_INQUIRY)) {
2801                 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2802                                                    &hdev->dev_flags);
2803         } else {
2804                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2805                 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2806                                                     &hdev->dev_flags);
2807         }
2808
2809         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2810                 return;
2811
2812         if (conn_changed || discov_changed) {
2813                 /* In case this was disabled through mgmt */
2814                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2815
2816                 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2817                         mgmt_update_adv_data(hdev);
2818
2819                 mgmt_new_settings(hdev);
2820         }
2821 }
2822
2823 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2824 {
2825         struct hci_dev *hdev;
2826         struct hci_dev_req dr;
2827         int err = 0;
2828
2829         if (copy_from_user(&dr, arg, sizeof(dr)))
2830                 return -EFAULT;
2831
2832         hdev = hci_dev_get(dr.dev_id);
2833         if (!hdev)
2834                 return -ENODEV;
2835
2836         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2837                 err = -EBUSY;
2838                 goto done;
2839         }
2840
2841         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2842                 err = -EOPNOTSUPP;
2843                 goto done;
2844         }
2845
2846         if (hdev->dev_type != HCI_BREDR) {
2847                 err = -EOPNOTSUPP;
2848                 goto done;
2849         }
2850
2851         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2852                 err = -EOPNOTSUPP;
2853                 goto done;
2854         }
2855
2856         switch (cmd) {
2857         case HCISETAUTH:
2858                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2859                                    HCI_INIT_TIMEOUT);
2860                 break;
2861
2862         case HCISETENCRYPT:
2863                 if (!lmp_encrypt_capable(hdev)) {
2864                         err = -EOPNOTSUPP;
2865                         break;
2866                 }
2867
2868                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2869                         /* Auth must be enabled first */
2870                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2871                                            HCI_INIT_TIMEOUT);
2872                         if (err)
2873                                 break;
2874                 }
2875
2876                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2877                                    HCI_INIT_TIMEOUT);
2878                 break;
2879
2880         case HCISETSCAN:
2881                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2882                                    HCI_INIT_TIMEOUT);
2883
2884                 /* Ensure that the connectable and discoverable states
2885                  * get correctly modified as this was a non-mgmt change.
2886                  */
2887                 if (!err)
2888                         hci_update_scan_state(hdev, dr.dev_opt);
2889                 break;
2890
2891         case HCISETLINKPOL:
2892                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2893                                    HCI_INIT_TIMEOUT);
2894                 break;
2895
2896         case HCISETLINKMODE:
2897                 hdev->link_mode = ((__u16) dr.dev_opt) &
2898                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2899                 break;
2900
2901         case HCISETPTYPE:
2902                 hdev->pkt_type = (__u16) dr.dev_opt;
2903                 break;
2904
2905         case HCISETACLMTU:
2906                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2907                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2908                 break;
2909
2910         case HCISETSCOMTU:
2911                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2912                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2913                 break;
2914
2915         default:
2916                 err = -EINVAL;
2917                 break;
2918         }
2919
2920 done:
2921         hci_dev_put(hdev);
2922         return err;
2923 }
2924
2925 int hci_get_dev_list(void __user *arg)
2926 {
2927         struct hci_dev *hdev;
2928         struct hci_dev_list_req *dl;
2929         struct hci_dev_req *dr;
2930         int n = 0, size, err;
2931         __u16 dev_num;
2932
2933         if (get_user(dev_num, (__u16 __user *) arg))
2934                 return -EFAULT;
2935
2936         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2937                 return -EINVAL;
2938
2939         size = sizeof(*dl) + dev_num * sizeof(*dr);
2940
2941         dl = kzalloc(size, GFP_KERNEL);
2942         if (!dl)
2943                 return -ENOMEM;
2944
2945         dr = dl->dev_req;
2946
2947         read_lock(&hci_dev_list_lock);
2948         list_for_each_entry(hdev, &hci_dev_list, list) {
2949                 unsigned long flags = hdev->flags;
2950
2951                 /* When the auto-off is configured it means the transport
2952                  * is running, but in that case still indicate that the
2953                  * device is actually down.
2954                  */
2955                 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2956                         flags &= ~BIT(HCI_UP);
2957
2958                 (dr + n)->dev_id  = hdev->id;
2959                 (dr + n)->dev_opt = flags;
2960
2961                 if (++n >= dev_num)
2962                         break;
2963         }
2964         read_unlock(&hci_dev_list_lock);
2965
2966         dl->dev_num = n;
2967         size = sizeof(*dl) + n * sizeof(*dr);
2968
2969         err = copy_to_user(arg, dl, size);
2970         kfree(dl);
2971
2972         return err ? -EFAULT : 0;
2973 }
2974
2975 int hci_get_dev_info(void __user *arg)
2976 {
2977         struct hci_dev *hdev;
2978         struct hci_dev_info di;
2979         unsigned long flags;
2980         int err = 0;
2981
2982         if (copy_from_user(&di, arg, sizeof(di)))
2983                 return -EFAULT;
2984
2985         hdev = hci_dev_get(di.dev_id);
2986         if (!hdev)
2987                 return -ENODEV;
2988
2989         /* When the auto-off is configured it means the transport
2990          * is running, but in that case still indicate that the
2991          * device is actually down.
2992          */
2993         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2994                 flags = hdev->flags & ~BIT(HCI_UP);
2995         else
2996                 flags = hdev->flags;
2997
2998         strcpy(di.name, hdev->name);
2999         di.bdaddr   = hdev->bdaddr;
3000         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
3001         di.flags    = flags;
3002         di.pkt_type = hdev->pkt_type;
3003         if (lmp_bredr_capable(hdev)) {
3004                 di.acl_mtu  = hdev->acl_mtu;
3005                 di.acl_pkts = hdev->acl_pkts;
3006                 di.sco_mtu  = hdev->sco_mtu;
3007                 di.sco_pkts = hdev->sco_pkts;
3008         } else {
3009                 di.acl_mtu  = hdev->le_mtu;
3010                 di.acl_pkts = hdev->le_pkts;
3011                 di.sco_mtu  = 0;
3012                 di.sco_pkts = 0;
3013         }
3014         di.link_policy = hdev->link_policy;
3015         di.link_mode   = hdev->link_mode;
3016
3017         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
3018         memcpy(&di.features, &hdev->features, sizeof(di.features));
3019
3020         if (copy_to_user(arg, &di, sizeof(di)))
3021                 err = -EFAULT;
3022
3023         hci_dev_put(hdev);
3024
3025         return err;
3026 }
3027
3028 /* ---- Interface to HCI drivers ---- */
3029
3030 static int hci_rfkill_set_block(void *data, bool blocked)
3031 {
3032         struct hci_dev *hdev = data;
3033
3034         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
3035
3036         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
3037                 return -EBUSY;
3038
3039         if (blocked) {
3040                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3041                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3042                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
3043                         hci_dev_do_close(hdev);
3044         } else {
3045                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
3046         }
3047
3048         return 0;
3049 }
3050
3051 static const struct rfkill_ops hci_rfkill_ops = {
3052         .set_block = hci_rfkill_set_block,
3053 };
3054
3055 static void hci_power_on(struct work_struct *work)
3056 {
3057         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
3058         int err;
3059
3060         BT_DBG("%s", hdev->name);
3061
3062         err = hci_dev_do_open(hdev);
3063         if (err < 0) {
3064                 mgmt_set_powered_failed(hdev, err);
3065                 return;
3066         }
3067
3068         /* During the HCI setup phase, a few error conditions are
3069          * ignored and they need to be checked now. If they are still
3070          * valid, it is important to turn the device back off.
3071          */
3072         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
3073             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
3074             (hdev->dev_type == HCI_BREDR &&
3075              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3076              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
3077                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3078                 hci_dev_do_close(hdev);
3079         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
3080                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3081                                    HCI_AUTO_OFF_TIMEOUT);
3082         }
3083
3084         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
3085                 /* For unconfigured devices, set the HCI_RAW flag
3086                  * so that userspace can easily identify them.
3087                  */
3088                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3089                         set_bit(HCI_RAW, &hdev->flags);
3090
3091                 /* For fully configured devices, this will send
3092                  * the Index Added event. For unconfigured devices,
3093                  * it will send Unconfigued Index Added event.
3094                  *
3095                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3096                  * and no event will be send.
3097                  */
3098                 mgmt_index_added(hdev);
3099         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
3100                 /* When the controller is now configured, then it
3101                  * is important to clear the HCI_RAW flag.
3102                  */
3103                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3104                         clear_bit(HCI_RAW, &hdev->flags);
3105
3106                 /* Powering on the controller with HCI_CONFIG set only
3107                  * happens with the transition from unconfigured to
3108                  * configured. This will send the Index Added event.
3109                  */
3110                 mgmt_index_added(hdev);
3111         }
3112 }
3113
3114 static void hci_power_off(struct work_struct *work)
3115 {
3116         struct hci_dev *hdev = container_of(work, struct hci_dev,
3117                                             power_off.work);
3118
3119         BT_DBG("%s", hdev->name);
3120
3121         hci_dev_do_close(hdev);
3122 }
3123
3124 static void hci_discov_off(struct work_struct *work)
3125 {
3126         struct hci_dev *hdev;
3127
3128         hdev = container_of(work, struct hci_dev, discov_off.work);
3129
3130         BT_DBG("%s", hdev->name);
3131
3132         mgmt_discoverable_timeout(hdev);
3133 }
3134
3135 void hci_uuids_clear(struct hci_dev *hdev)
3136 {
3137         struct bt_uuid *uuid, *tmp;
3138
3139         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3140                 list_del(&uuid->list);
3141                 kfree(uuid);
3142         }
3143 }
3144
3145 void hci_link_keys_clear(struct hci_dev *hdev)
3146 {
3147         struct link_key *key;
3148
3149         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
3150                 list_del_rcu(&key->list);
3151                 kfree_rcu(key, rcu);
3152         }
3153 }
3154
3155 void hci_smp_ltks_clear(struct hci_dev *hdev)
3156 {
3157         struct smp_ltk *k;
3158
3159         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3160                 list_del_rcu(&k->list);
3161                 kfree_rcu(k, rcu);
3162         }
3163 }
3164
3165 void hci_smp_irks_clear(struct hci_dev *hdev)
3166 {
3167         struct smp_irk *k;
3168
3169         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3170                 list_del_rcu(&k->list);
3171                 kfree_rcu(k, rcu);
3172         }
3173 }
3174
3175 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3176 {
3177         struct link_key *k;
3178
3179         rcu_read_lock();
3180         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
3181                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
3182                         rcu_read_unlock();
3183                         return k;
3184                 }
3185         }
3186         rcu_read_unlock();
3187
3188         return NULL;
3189 }
3190
3191 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3192                                u8 key_type, u8 old_key_type)
3193 {
3194         /* Legacy key */
3195         if (key_type < 0x03)
3196                 return true;
3197
3198         /* Debug keys are insecure so don't store them persistently */
3199         if (key_type == HCI_LK_DEBUG_COMBINATION)
3200                 return false;
3201
3202         /* Changed combination key and there's no previous one */
3203         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3204                 return false;
3205
3206         /* Security mode 3 case */
3207         if (!conn)
3208                 return true;
3209
3210         /* BR/EDR key derived using SC from an LE link */
3211         if (conn->type == LE_LINK)
3212                 return true;
3213
3214         /* Neither local nor remote side had no-bonding as requirement */
3215         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3216                 return true;
3217
3218         /* Local side had dedicated bonding as requirement */
3219         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3220                 return true;
3221
3222         /* Remote side had dedicated bonding as requirement */
3223         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3224                 return true;
3225
3226         /* If none of the above criteria match, then don't store the key
3227          * persistently */
3228         return false;
3229 }
3230
3231 static u8 ltk_role(u8 type)
3232 {
3233         if (type == SMP_LTK)
3234                 return HCI_ROLE_MASTER;
3235
3236         return HCI_ROLE_SLAVE;
3237 }
3238
3239 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3240                              u8 addr_type, u8 role)
3241 {
3242         struct smp_ltk *k;
3243
3244         rcu_read_lock();
3245         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3246                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
3247                         continue;
3248
3249                 if (smp_ltk_is_sc(k)) {
3250                         if (k->type == SMP_LTK_P256_DEBUG &&
3251                             !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
3252                                 continue;
3253                         rcu_read_unlock();
3254                         return k;
3255                 }
3256
3257                 if (ltk_role(k->type) == role) {
3258                         rcu_read_unlock();
3259                         return k;
3260                 }
3261         }
3262         rcu_read_unlock();
3263
3264         return NULL;
3265 }
3266
3267 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3268 {
3269         struct smp_irk *irk;
3270
3271         rcu_read_lock();
3272         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3273                 if (!bacmp(&irk->rpa, rpa)) {
3274                         rcu_read_unlock();
3275                         return irk;
3276                 }
3277         }
3278
3279         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3280                 if (smp_irk_matches(hdev, irk->val, rpa)) {
3281                         bacpy(&irk->rpa, rpa);
3282                         rcu_read_unlock();
3283                         return irk;
3284                 }
3285         }
3286         rcu_read_unlock();
3287
3288         return NULL;
3289 }
3290
3291 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3292                                      u8 addr_type)
3293 {
3294         struct smp_irk *irk;
3295
3296         /* Identity Address must be public or static random */
3297         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3298                 return NULL;
3299
3300         rcu_read_lock();
3301         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3302                 if (addr_type == irk->addr_type &&
3303                     bacmp(bdaddr, &irk->bdaddr) == 0) {
3304                         rcu_read_unlock();
3305                         return irk;
3306                 }
3307         }
3308         rcu_read_unlock();
3309
3310         return NULL;
3311 }
3312
3313 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3314                                   bdaddr_t *bdaddr, u8 *val, u8 type,
3315                                   u8 pin_len, bool *persistent)
3316 {
3317         struct link_key *key, *old_key;
3318         u8 old_key_type;
3319
3320         old_key = hci_find_link_key(hdev, bdaddr);
3321         if (old_key) {
3322                 old_key_type = old_key->type;
3323                 key = old_key;
3324         } else {
3325                 old_key_type = conn ? conn->key_type : 0xff;
3326                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3327                 if (!key)
3328                         return NULL;
3329                 list_add_rcu(&key->list, &hdev->link_keys);
3330         }
3331
3332         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3333
3334         /* Some buggy controller combinations generate a changed
3335          * combination key for legacy pairing even when there's no
3336          * previous key */
3337         if (type == HCI_LK_CHANGED_COMBINATION &&
3338             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3339                 type = HCI_LK_COMBINATION;
3340                 if (conn)
3341                         conn->key_type = type;
3342         }
3343
3344         bacpy(&key->bdaddr, bdaddr);
3345         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3346         key->pin_len = pin_len;
3347
3348         if (type == HCI_LK_CHANGED_COMBINATION)
3349                 key->type = old_key_type;
3350         else
3351                 key->type = type;
3352
3353         if (persistent)
3354                 *persistent = hci_persistent_key(hdev, conn, type,
3355                                                  old_key_type);
3356
3357         return key;
3358 }
3359
3360 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3361                             u8 addr_type, u8 type, u8 authenticated,
3362                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3363 {
3364         struct smp_ltk *key, *old_key;
3365         u8 role = ltk_role(type);
3366
3367         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
3368         if (old_key)
3369                 key = old_key;
3370         else {
3371                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3372                 if (!key)
3373                         return NULL;
3374                 list_add_rcu(&key->list, &hdev->long_term_keys);
3375         }
3376
3377         bacpy(&key->bdaddr, bdaddr);
3378         key->bdaddr_type = addr_type;
3379         memcpy(key->val, tk, sizeof(key->val));
3380         key->authenticated = authenticated;
3381         key->ediv = ediv;
3382         key->rand = rand;
3383         key->enc_size = enc_size;
3384         key->type = type;
3385
3386         return key;
3387 }
3388
3389 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3390                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3391 {
3392         struct smp_irk *irk;
3393
3394         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3395         if (!irk) {
3396                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3397                 if (!irk)
3398                         return NULL;
3399
3400                 bacpy(&irk->bdaddr, bdaddr);
3401                 irk->addr_type = addr_type;
3402
3403                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
3404         }
3405
3406         memcpy(irk->val, val, 16);
3407         bacpy(&irk->rpa, rpa);
3408
3409         return irk;
3410 }
3411
3412 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3413 {
3414         struct link_key *key;
3415
3416         key = hci_find_link_key(hdev, bdaddr);
3417         if (!key)
3418                 return -ENOENT;
3419
3420         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3421
3422         list_del_rcu(&key->list);
3423         kfree_rcu(key, rcu);
3424
3425         return 0;
3426 }
3427
3428 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3429 {
3430         struct smp_ltk *k;
3431         int removed = 0;
3432
3433         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3434                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3435                         continue;
3436
3437                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3438
3439                 list_del_rcu(&k->list);
3440                 kfree_rcu(k, rcu);
3441                 removed++;
3442         }
3443
3444         return removed ? 0 : -ENOENT;
3445 }
3446
3447 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3448 {
3449         struct smp_irk *k;
3450
3451         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3452                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3453                         continue;
3454
3455                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3456
3457                 list_del_rcu(&k->list);
3458                 kfree_rcu(k, rcu);
3459         }
3460 }
3461
3462 /* HCI command timer function */
3463 static void hci_cmd_timeout(struct work_struct *work)
3464 {
3465         struct hci_dev *hdev = container_of(work, struct hci_dev,
3466                                             cmd_timer.work);
3467
3468         if (hdev->sent_cmd) {
3469                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3470                 u16 opcode = __le16_to_cpu(sent->opcode);
3471
3472                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3473         } else {
3474                 BT_ERR("%s command tx timeout", hdev->name);
3475         }
3476
3477         atomic_set(&hdev->cmd_cnt, 1);
3478         queue_work(hdev->workqueue, &hdev->cmd_work);
3479 }
3480
3481 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3482                                           bdaddr_t *bdaddr)
3483 {
3484         struct oob_data *data;
3485
3486         list_for_each_entry(data, &hdev->remote_oob_data, list)
3487                 if (bacmp(bdaddr, &data->bdaddr) == 0)
3488                         return data;
3489
3490         return NULL;
3491 }
3492
3493 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3494 {
3495         struct oob_data *data;
3496
3497         data = hci_find_remote_oob_data(hdev, bdaddr);
3498         if (!data)
3499                 return -ENOENT;
3500
3501         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3502
3503         list_del(&data->list);
3504         kfree(data);
3505
3506         return 0;
3507 }
3508
3509 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3510 {
3511         struct oob_data *data, *n;
3512
3513         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3514                 list_del(&data->list);
3515                 kfree(data);
3516         }
3517 }
3518
3519 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3520                             u8 *hash192, u8 *rand192,
3521                             u8 *hash256, u8 *rand256)
3522 {
3523         struct oob_data *data;
3524
3525         data = hci_find_remote_oob_data(hdev, bdaddr);
3526         if (!data) {
3527                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3528                 if (!data)
3529                         return -ENOMEM;
3530
3531                 bacpy(&data->bdaddr, bdaddr);
3532                 list_add(&data->list, &hdev->remote_oob_data);
3533         }
3534
3535         if (hash192 && rand192) {
3536                 memcpy(data->hash192, hash192, sizeof(data->hash192));
3537                 memcpy(data->rand192, rand192, sizeof(data->rand192));
3538         } else {
3539                 memset(data->hash192, 0, sizeof(data->hash192));
3540                 memset(data->rand192, 0, sizeof(data->rand192));
3541         }
3542
3543         if (hash256 && rand256) {
3544                 memcpy(data->hash256, hash256, sizeof(data->hash256));
3545                 memcpy(data->rand256, rand256, sizeof(data->rand256));
3546         } else {
3547                 memset(data->hash256, 0, sizeof(data->hash256));
3548                 memset(data->rand256, 0, sizeof(data->rand256));
3549         }
3550
3551         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3552
3553         return 0;
3554 }
3555
3556 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3557                                          bdaddr_t *bdaddr, u8 type)
3558 {
3559         struct bdaddr_list *b;
3560
3561         list_for_each_entry(b, bdaddr_list, list) {
3562                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3563                         return b;
3564         }
3565
3566         return NULL;
3567 }
3568
3569 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3570 {
3571         struct list_head *p, *n;
3572
3573         list_for_each_safe(p, n, bdaddr_list) {
3574                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3575
3576                 list_del(p);
3577                 kfree(b);
3578         }
3579 }
3580
3581 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3582 {
3583         struct bdaddr_list *entry;
3584
3585         if (!bacmp(bdaddr, BDADDR_ANY))
3586                 return -EBADF;
3587
3588         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3589                 return -EEXIST;
3590
3591         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3592         if (!entry)
3593                 return -ENOMEM;
3594
3595         bacpy(&entry->bdaddr, bdaddr);
3596         entry->bdaddr_type = type;
3597
3598         list_add(&entry->list, list);
3599
3600         return 0;
3601 }
3602
3603 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3604 {
3605         struct bdaddr_list *entry;
3606
3607         if (!bacmp(bdaddr, BDADDR_ANY)) {
3608                 hci_bdaddr_list_clear(list);
3609                 return 0;
3610         }
3611
3612         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3613         if (!entry)
3614                 return -ENOENT;
3615
3616         list_del(&entry->list);
3617         kfree(entry);
3618
3619         return 0;
3620 }
3621
3622 /* This function requires the caller holds hdev->lock */
3623 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3624                                                bdaddr_t *addr, u8 addr_type)
3625 {
3626         struct hci_conn_params *params;
3627
3628         /* The conn params list only contains identity addresses */
3629         if (!hci_is_identity_address(addr, addr_type))
3630                 return NULL;
3631
3632         list_for_each_entry(params, &hdev->le_conn_params, list) {
3633                 if (bacmp(&params->addr, addr) == 0 &&
3634                     params->addr_type == addr_type) {
3635                         return params;
3636                 }
3637         }
3638
3639         return NULL;
3640 }
3641
3642 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3643 {
3644         struct hci_conn *conn;
3645
3646         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3647         if (!conn)
3648                 return false;
3649
3650         if (conn->dst_type != type)
3651                 return false;
3652
3653         if (conn->state != BT_CONNECTED)
3654                 return false;
3655
3656         return true;
3657 }
3658
3659 /* This function requires the caller holds hdev->lock */
3660 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3661                                                   bdaddr_t *addr, u8 addr_type)
3662 {
3663         struct hci_conn_params *param;
3664
3665         /* The list only contains identity addresses */
3666         if (!hci_is_identity_address(addr, addr_type))
3667                 return NULL;
3668
3669         list_for_each_entry(param, list, action) {
3670                 if (bacmp(&param->addr, addr) == 0 &&
3671                     param->addr_type == addr_type)
3672                         return param;
3673         }
3674
3675         return NULL;
3676 }
3677
3678 /* This function requires the caller holds hdev->lock */
3679 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3680                                             bdaddr_t *addr, u8 addr_type)
3681 {
3682         struct hci_conn_params *params;
3683
3684         if (!hci_is_identity_address(addr, addr_type))
3685                 return NULL;
3686
3687         params = hci_conn_params_lookup(hdev, addr, addr_type);
3688         if (params)
3689                 return params;
3690
3691         params = kzalloc(sizeof(*params), GFP_KERNEL);
3692         if (!params) {
3693                 BT_ERR("Out of memory");
3694                 return NULL;
3695         }
3696
3697         bacpy(&params->addr, addr);
3698         params->addr_type = addr_type;
3699
3700         list_add(&params->list, &hdev->le_conn_params);
3701         INIT_LIST_HEAD(&params->action);
3702
3703         params->conn_min_interval = hdev->le_conn_min_interval;
3704         params->conn_max_interval = hdev->le_conn_max_interval;
3705         params->conn_latency = hdev->le_conn_latency;
3706         params->supervision_timeout = hdev->le_supv_timeout;
3707         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3708
3709         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3710
3711         return params;
3712 }
3713
3714 /* This function requires the caller holds hdev->lock */
3715 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3716                         u8 auto_connect)
3717 {
3718         struct hci_conn_params *params;
3719
3720         params = hci_conn_params_add(hdev, addr, addr_type);
3721         if (!params)
3722                 return -EIO;
3723
3724         if (params->auto_connect == auto_connect)
3725                 return 0;
3726
3727         list_del_init(&params->action);
3728
3729         switch (auto_connect) {
3730         case HCI_AUTO_CONN_DISABLED:
3731         case HCI_AUTO_CONN_LINK_LOSS:
3732                 hci_update_background_scan(hdev);
3733                 break;
3734         case HCI_AUTO_CONN_REPORT:
3735                 list_add(&params->action, &hdev->pend_le_reports);
3736                 hci_update_background_scan(hdev);
3737                 break;
3738         case HCI_AUTO_CONN_DIRECT:
3739         case HCI_AUTO_CONN_ALWAYS:
3740                 if (!is_connected(hdev, addr, addr_type)) {
3741                         list_add(&params->action, &hdev->pend_le_conns);
3742                         hci_update_background_scan(hdev);
3743                 }
3744                 break;
3745         }
3746
3747         params->auto_connect = auto_connect;
3748
3749         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3750                auto_connect);
3751
3752         return 0;
3753 }
3754
3755 static void hci_conn_params_free(struct hci_conn_params *params)
3756 {
3757         if (params->conn) {
3758                 hci_conn_drop(params->conn);
3759                 hci_conn_put(params->conn);
3760         }
3761
3762         list_del(&params->action);
3763         list_del(&params->list);
3764         kfree(params);
3765 }
3766
3767 /* This function requires the caller holds hdev->lock */
3768 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3769 {
3770         struct hci_conn_params *params;
3771
3772         params = hci_conn_params_lookup(hdev, addr, addr_type);
3773         if (!params)
3774                 return;
3775
3776         hci_conn_params_free(params);
3777
3778         hci_update_background_scan(hdev);
3779
3780         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3781 }
3782
3783 /* This function requires the caller holds hdev->lock */
3784 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3785 {
3786         struct hci_conn_params *params, *tmp;
3787
3788         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3789                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3790                         continue;
3791                 list_del(&params->list);
3792                 kfree(params);
3793         }
3794
3795         BT_DBG("All LE disabled connection parameters were removed");
3796 }
3797
3798 /* This function requires the caller holds hdev->lock */
3799 void hci_conn_params_clear_all(struct hci_dev *hdev)
3800 {
3801         struct hci_conn_params *params, *tmp;
3802
3803         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3804                 hci_conn_params_free(params);
3805
3806         hci_update_background_scan(hdev);
3807
3808         BT_DBG("All LE connection parameters were removed");
3809 }
3810
3811 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3812 {
3813         if (status) {
3814                 BT_ERR("Failed to start inquiry: status %d", status);
3815
3816                 hci_dev_lock(hdev);
3817                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3818                 hci_dev_unlock(hdev);
3819                 return;
3820         }
3821 }
3822
3823 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3824 {
3825         /* General inquiry access code (GIAC) */
3826         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3827         struct hci_request req;
3828         struct hci_cp_inquiry cp;
3829         int err;
3830
3831         if (status) {
3832                 BT_ERR("Failed to disable LE scanning: status %d", status);
3833                 return;
3834         }
3835
3836         switch (hdev->discovery.type) {
3837         case DISCOV_TYPE_LE:
3838                 hci_dev_lock(hdev);
3839                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3840                 hci_dev_unlock(hdev);
3841                 break;
3842
3843         case DISCOV_TYPE_INTERLEAVED:
3844                 hci_req_init(&req, hdev);
3845
3846                 memset(&cp, 0, sizeof(cp));
3847                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3848                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3849                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3850
3851                 hci_dev_lock(hdev);
3852
3853                 hci_inquiry_cache_flush(hdev);
3854
3855                 err = hci_req_run(&req, inquiry_complete);
3856                 if (err) {
3857                         BT_ERR("Inquiry request failed: err %d", err);
3858                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3859                 }
3860
3861                 hci_dev_unlock(hdev);
3862                 break;
3863         }
3864 }
3865
3866 static void le_scan_disable_work(struct work_struct *work)
3867 {
3868         struct hci_dev *hdev = container_of(work, struct hci_dev,
3869                                             le_scan_disable.work);
3870         struct hci_request req;
3871         int err;
3872
3873         BT_DBG("%s", hdev->name);
3874
3875         hci_req_init(&req, hdev);
3876
3877         hci_req_add_le_scan_disable(&req);
3878
3879         err = hci_req_run(&req, le_scan_disable_work_complete);
3880         if (err)
3881                 BT_ERR("Disable LE scanning request failed: err %d", err);
3882 }
3883
3884 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3885 {
3886         struct hci_dev *hdev = req->hdev;
3887
3888         /* If we're advertising or initiating an LE connection we can't
3889          * go ahead and change the random address at this time. This is
3890          * because the eventual initiator address used for the
3891          * subsequently created connection will be undefined (some
3892          * controllers use the new address and others the one we had
3893          * when the operation started).
3894          *
3895          * In this kind of scenario skip the update and let the random
3896          * address be updated at the next cycle.
3897          */
3898         if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3899             hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3900                 BT_DBG("Deferring random address update");
3901                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
3902                 return;
3903         }
3904
3905         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3906 }
3907
3908 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3909                               u8 *own_addr_type)
3910 {
3911         struct hci_dev *hdev = req->hdev;
3912         int err;
3913
3914         /* If privacy is enabled use a resolvable private address. If
3915          * current RPA has expired or there is something else than
3916          * the current RPA in use, then generate a new one.
3917          */
3918         if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3919                 int to;
3920
3921                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3922
3923                 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3924                     !bacmp(&hdev->random_addr, &hdev->rpa))
3925                         return 0;
3926
3927                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
3928                 if (err < 0) {
3929                         BT_ERR("%s failed to generate new RPA", hdev->name);
3930                         return err;
3931                 }
3932
3933                 set_random_addr(req, &hdev->rpa);
3934
3935                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3936                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3937
3938                 return 0;
3939         }
3940
3941         /* In case of required privacy without resolvable private address,
3942          * use an unresolvable private address. This is useful for active
3943          * scanning and non-connectable advertising.
3944          */
3945         if (require_privacy) {
3946                 bdaddr_t urpa;
3947
3948                 get_random_bytes(&urpa, 6);
3949                 urpa.b[5] &= 0x3f;      /* Clear two most significant bits */
3950
3951                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3952                 set_random_addr(req, &urpa);
3953                 return 0;
3954         }
3955
3956         /* If forcing static address is in use or there is no public
3957          * address use the static address as random address (but skip
3958          * the HCI command if the current random address is already the
3959          * static one.
3960          */
3961         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3962             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3963                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3964                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3965                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3966                                     &hdev->static_addr);
3967                 return 0;
3968         }
3969
3970         /* Neither privacy nor static address is being used so use a
3971          * public address.
3972          */
3973         *own_addr_type = ADDR_LE_DEV_PUBLIC;
3974
3975         return 0;
3976 }
3977
3978 /* Copy the Identity Address of the controller.
3979  *
3980  * If the controller has a public BD_ADDR, then by default use that one.
3981  * If this is a LE only controller without a public address, default to
3982  * the static random address.
3983  *
3984  * For debugging purposes it is possible to force controllers with a
3985  * public address to use the static random address instead.
3986  */
3987 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3988                                u8 *bdaddr_type)
3989 {
3990         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3991             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3992                 bacpy(bdaddr, &hdev->static_addr);
3993                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3994         } else {
3995                 bacpy(bdaddr, &hdev->bdaddr);
3996                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3997         }
3998 }
3999
4000 /* Alloc HCI device */
4001 struct hci_dev *hci_alloc_dev(void)
4002 {
4003         struct hci_dev *hdev;
4004
4005         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
4006         if (!hdev)
4007                 return NULL;
4008
4009         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
4010         hdev->esco_type = (ESCO_HV1);
4011         hdev->link_mode = (HCI_LM_ACCEPT);
4012         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
4013         hdev->io_capability = 0x03;     /* No Input No Output */
4014         hdev->manufacturer = 0xffff;    /* Default to internal use */
4015         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
4016         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
4017
4018         hdev->sniff_max_interval = 800;
4019         hdev->sniff_min_interval = 80;
4020
4021         hdev->le_adv_channel_map = 0x07;
4022         hdev->le_adv_min_interval = 0x0800;
4023         hdev->le_adv_max_interval = 0x0800;
4024         hdev->le_scan_interval = 0x0060;
4025         hdev->le_scan_window = 0x0030;
4026         hdev->le_conn_min_interval = 0x0028;
4027         hdev->le_conn_max_interval = 0x0038;
4028         hdev->le_conn_latency = 0x0000;
4029         hdev->le_supv_timeout = 0x002a;
4030
4031         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
4032         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
4033         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4034         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
4035
4036         mutex_init(&hdev->lock);
4037         mutex_init(&hdev->req_lock);
4038
4039         INIT_LIST_HEAD(&hdev->mgmt_pending);
4040         INIT_LIST_HEAD(&hdev->blacklist);
4041         INIT_LIST_HEAD(&hdev->whitelist);
4042         INIT_LIST_HEAD(&hdev->uuids);
4043         INIT_LIST_HEAD(&hdev->link_keys);
4044         INIT_LIST_HEAD(&hdev->long_term_keys);
4045         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
4046         INIT_LIST_HEAD(&hdev->remote_oob_data);
4047         INIT_LIST_HEAD(&hdev->le_white_list);
4048         INIT_LIST_HEAD(&hdev->le_conn_params);
4049         INIT_LIST_HEAD(&hdev->pend_le_conns);
4050         INIT_LIST_HEAD(&hdev->pend_le_reports);
4051         INIT_LIST_HEAD(&hdev->conn_hash.list);
4052
4053         INIT_WORK(&hdev->rx_work, hci_rx_work);
4054         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4055         INIT_WORK(&hdev->tx_work, hci_tx_work);
4056         INIT_WORK(&hdev->power_on, hci_power_on);
4057
4058         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4059         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4060         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4061
4062         skb_queue_head_init(&hdev->rx_q);
4063         skb_queue_head_init(&hdev->cmd_q);
4064         skb_queue_head_init(&hdev->raw_q);
4065
4066         init_waitqueue_head(&hdev->req_wait_q);
4067
4068         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
4069
4070         hci_init_sysfs(hdev);
4071         discovery_init(hdev);
4072
4073         return hdev;
4074 }
4075 EXPORT_SYMBOL(hci_alloc_dev);
4076
4077 /* Free HCI device */
4078 void hci_free_dev(struct hci_dev *hdev)
4079 {
4080         /* will free via device release */
4081         put_device(&hdev->dev);
4082 }
4083 EXPORT_SYMBOL(hci_free_dev);
4084
4085 /* Register HCI device */
4086 int hci_register_dev(struct hci_dev *hdev)
4087 {
4088         int id, error;
4089
4090         if (!hdev->open || !hdev->close || !hdev->send)
4091                 return -EINVAL;
4092
4093         /* Do not allow HCI_AMP devices to register at index 0,
4094          * so the index can be used as the AMP controller ID.
4095          */
4096         switch (hdev->dev_type) {
4097         case HCI_BREDR:
4098                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4099                 break;
4100         case HCI_AMP:
4101                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4102                 break;
4103         default:
4104                 return -EINVAL;
4105         }
4106
4107         if (id < 0)
4108                 return id;
4109
4110         sprintf(hdev->name, "hci%d", id);
4111         hdev->id = id;
4112
4113         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4114
4115         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4116                                           WQ_MEM_RECLAIM, 1, hdev->name);
4117         if (!hdev->workqueue) {
4118                 error = -ENOMEM;
4119                 goto err;
4120         }
4121
4122         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4123                                               WQ_MEM_RECLAIM, 1, hdev->name);
4124         if (!hdev->req_workqueue) {
4125                 destroy_workqueue(hdev->workqueue);
4126                 error = -ENOMEM;
4127                 goto err;
4128         }
4129
4130         if (!IS_ERR_OR_NULL(bt_debugfs))
4131                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4132
4133         dev_set_name(&hdev->dev, "%s", hdev->name);
4134
4135         error = device_add(&hdev->dev);
4136         if (error < 0)
4137                 goto err_wqueue;
4138
4139         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4140                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4141                                     hdev);
4142         if (hdev->rfkill) {
4143                 if (rfkill_register(hdev->rfkill) < 0) {
4144                         rfkill_destroy(hdev->rfkill);
4145                         hdev->rfkill = NULL;
4146                 }
4147         }
4148
4149         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4150                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4151
4152         set_bit(HCI_SETUP, &hdev->dev_flags);
4153         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4154
4155         if (hdev->dev_type == HCI_BREDR) {
4156                 /* Assume BR/EDR support until proven otherwise (such as
4157                  * through reading supported features during init.
4158                  */
4159                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4160         }
4161
4162         write_lock(&hci_dev_list_lock);
4163         list_add(&hdev->list, &hci_dev_list);
4164         write_unlock(&hci_dev_list_lock);
4165
4166         /* Devices that are marked for raw-only usage are unconfigured
4167          * and should not be included in normal operation.
4168          */
4169         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4170                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4171
4172         hci_notify(hdev, HCI_DEV_REG);
4173         hci_dev_hold(hdev);
4174
4175         queue_work(hdev->req_workqueue, &hdev->power_on);
4176
4177         return id;
4178
4179 err_wqueue:
4180         destroy_workqueue(hdev->workqueue);
4181         destroy_workqueue(hdev->req_workqueue);
4182 err:
4183         ida_simple_remove(&hci_index_ida, hdev->id);
4184
4185         return error;
4186 }
4187 EXPORT_SYMBOL(hci_register_dev);
4188
4189 /* Unregister HCI device */
4190 void hci_unregister_dev(struct hci_dev *hdev)
4191 {
4192         int i, id;
4193
4194         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4195
4196         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4197
4198         id = hdev->id;
4199
4200         write_lock(&hci_dev_list_lock);
4201         list_del(&hdev->list);
4202         write_unlock(&hci_dev_list_lock);
4203
4204         hci_dev_do_close(hdev);
4205
4206         for (i = 0; i < NUM_REASSEMBLY; i++)
4207                 kfree_skb(hdev->reassembly[i]);
4208
4209         cancel_work_sync(&hdev->power_on);
4210
4211         if (!test_bit(HCI_INIT, &hdev->flags) &&
4212             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4213             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4214                 hci_dev_lock(hdev);
4215                 mgmt_index_removed(hdev);
4216                 hci_dev_unlock(hdev);
4217         }
4218
4219         /* mgmt_index_removed should take care of emptying the
4220          * pending list */
4221         BUG_ON(!list_empty(&hdev->mgmt_pending));
4222
4223         hci_notify(hdev, HCI_DEV_UNREG);
4224
4225         if (hdev->rfkill) {
4226                 rfkill_unregister(hdev->rfkill);
4227                 rfkill_destroy(hdev->rfkill);
4228         }
4229
4230         smp_unregister(hdev);
4231
4232         device_del(&hdev->dev);
4233
4234         debugfs_remove_recursive(hdev->debugfs);
4235
4236         destroy_workqueue(hdev->workqueue);
4237         destroy_workqueue(hdev->req_workqueue);
4238
4239         hci_dev_lock(hdev);
4240         hci_bdaddr_list_clear(&hdev->blacklist);
4241         hci_bdaddr_list_clear(&hdev->whitelist);
4242         hci_uuids_clear(hdev);
4243         hci_link_keys_clear(hdev);
4244         hci_smp_ltks_clear(hdev);
4245         hci_smp_irks_clear(hdev);
4246         hci_remote_oob_data_clear(hdev);
4247         hci_bdaddr_list_clear(&hdev->le_white_list);
4248         hci_conn_params_clear_all(hdev);
4249         hci_dev_unlock(hdev);
4250
4251         hci_dev_put(hdev);
4252
4253         ida_simple_remove(&hci_index_ida, id);
4254 }
4255 EXPORT_SYMBOL(hci_unregister_dev);
4256
4257 /* Suspend HCI device */
4258 int hci_suspend_dev(struct hci_dev *hdev)
4259 {
4260         hci_notify(hdev, HCI_DEV_SUSPEND);
4261         return 0;
4262 }
4263 EXPORT_SYMBOL(hci_suspend_dev);
4264
4265 /* Resume HCI device */
4266 int hci_resume_dev(struct hci_dev *hdev)
4267 {
4268         hci_notify(hdev, HCI_DEV_RESUME);
4269         return 0;
4270 }
4271 EXPORT_SYMBOL(hci_resume_dev);
4272
4273 /* Reset HCI device */
4274 int hci_reset_dev(struct hci_dev *hdev)
4275 {
4276         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4277         struct sk_buff *skb;
4278
4279         skb = bt_skb_alloc(3, GFP_ATOMIC);
4280         if (!skb)
4281                 return -ENOMEM;
4282
4283         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4284         memcpy(skb_put(skb, 3), hw_err, 3);
4285
4286         /* Send Hardware Error to upper stack */
4287         return hci_recv_frame(hdev, skb);
4288 }
4289 EXPORT_SYMBOL(hci_reset_dev);
4290
4291 /* Receive frame from HCI drivers */
4292 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4293 {
4294         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4295                       && !test_bit(HCI_INIT, &hdev->flags))) {
4296                 kfree_skb(skb);
4297                 return -ENXIO;
4298         }
4299
4300         /* Incoming skb */
4301         bt_cb(skb)->incoming = 1;
4302
4303         /* Time stamp */
4304         __net_timestamp(skb);
4305
4306         skb_queue_tail(&hdev->rx_q, skb);
4307         queue_work(hdev->workqueue, &hdev->rx_work);
4308
4309         return 0;
4310 }
4311 EXPORT_SYMBOL(hci_recv_frame);
4312
4313 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4314                           int count, __u8 index)
4315 {
4316         int len = 0;
4317         int hlen = 0;
4318         int remain = count;
4319         struct sk_buff *skb;
4320         struct bt_skb_cb *scb;
4321
4322         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4323             index >= NUM_REASSEMBLY)
4324                 return -EILSEQ;
4325
4326         skb = hdev->reassembly[index];
4327
4328         if (!skb) {
4329                 switch (type) {
4330                 case HCI_ACLDATA_PKT:
4331                         len = HCI_MAX_FRAME_SIZE;
4332                         hlen = HCI_ACL_HDR_SIZE;
4333                         break;
4334                 case HCI_EVENT_PKT:
4335                         len = HCI_MAX_EVENT_SIZE;
4336                         hlen = HCI_EVENT_HDR_SIZE;
4337                         break;
4338                 case HCI_SCODATA_PKT:
4339                         len = HCI_MAX_SCO_SIZE;
4340                         hlen = HCI_SCO_HDR_SIZE;
4341                         break;
4342                 }
4343
4344                 skb = bt_skb_alloc(len, GFP_ATOMIC);
4345                 if (!skb)
4346                         return -ENOMEM;
4347
4348                 scb = (void *) skb->cb;
4349                 scb->expect = hlen;
4350                 scb->pkt_type = type;
4351
4352                 hdev->reassembly[index] = skb;
4353         }
4354
4355         while (count) {
4356                 scb = (void *) skb->cb;
4357                 len = min_t(uint, scb->expect, count);
4358
4359                 memcpy(skb_put(skb, len), data, len);
4360
4361                 count -= len;
4362                 data += len;
4363                 scb->expect -= len;
4364                 remain = count;
4365
4366                 switch (type) {
4367                 case HCI_EVENT_PKT:
4368                         if (skb->len == HCI_EVENT_HDR_SIZE) {
4369                                 struct hci_event_hdr *h = hci_event_hdr(skb);
4370                                 scb->expect = h->plen;
4371
4372                                 if (skb_tailroom(skb) < scb->expect) {
4373                                         kfree_skb(skb);
4374                                         hdev->reassembly[index] = NULL;
4375                                         return -ENOMEM;
4376                                 }
4377                         }
4378                         break;
4379
4380                 case HCI_ACLDATA_PKT:
4381                         if (skb->len  == HCI_ACL_HDR_SIZE) {
4382                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4383                                 scb->expect = __le16_to_cpu(h->dlen);
4384
4385                                 if (skb_tailroom(skb) < scb->expect) {
4386                                         kfree_skb(skb);
4387                                         hdev->reassembly[index] = NULL;
4388                                         return -ENOMEM;
4389                                 }
4390                         }
4391                         break;
4392
4393                 case HCI_SCODATA_PKT:
4394                         if (skb->len == HCI_SCO_HDR_SIZE) {
4395                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4396                                 scb->expect = h->dlen;
4397
4398                                 if (skb_tailroom(skb) < scb->expect) {
4399                                         kfree_skb(skb);
4400                                         hdev->reassembly[index] = NULL;
4401                                         return -ENOMEM;
4402                                 }
4403                         }
4404                         break;
4405                 }
4406
4407                 if (scb->expect == 0) {
4408                         /* Complete frame */
4409
4410                         bt_cb(skb)->pkt_type = type;
4411                         hci_recv_frame(hdev, skb);
4412
4413                         hdev->reassembly[index] = NULL;
4414                         return remain;
4415                 }
4416         }
4417
4418         return remain;
4419 }
4420
4421 #define STREAM_REASSEMBLY 0
4422
4423 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4424 {
4425         int type;
4426         int rem = 0;
4427
4428         while (count) {
4429                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4430
4431                 if (!skb) {
4432                         struct { char type; } *pkt;
4433
4434                         /* Start of the frame */
4435                         pkt = data;
4436                         type = pkt->type;
4437
4438                         data++;
4439                         count--;
4440                 } else
4441                         type = bt_cb(skb)->pkt_type;
4442
4443                 rem = hci_reassembly(hdev, type, data, count,
4444                                      STREAM_REASSEMBLY);
4445                 if (rem < 0)
4446                         return rem;
4447
4448                 data += (count - rem);
4449                 count = rem;
4450         }
4451
4452         return rem;
4453 }
4454 EXPORT_SYMBOL(hci_recv_stream_fragment);
4455
4456 /* ---- Interface to upper protocols ---- */
4457
4458 int hci_register_cb(struct hci_cb *cb)
4459 {
4460         BT_DBG("%p name %s", cb, cb->name);
4461
4462         write_lock(&hci_cb_list_lock);
4463         list_add(&cb->list, &hci_cb_list);
4464         write_unlock(&hci_cb_list_lock);
4465
4466         return 0;
4467 }
4468 EXPORT_SYMBOL(hci_register_cb);
4469
4470 int hci_unregister_cb(struct hci_cb *cb)
4471 {
4472         BT_DBG("%p name %s", cb, cb->name);
4473
4474         write_lock(&hci_cb_list_lock);
4475         list_del(&cb->list);
4476         write_unlock(&hci_cb_list_lock);
4477
4478         return 0;
4479 }
4480 EXPORT_SYMBOL(hci_unregister_cb);
4481
4482 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4483 {
4484         int err;
4485
4486         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4487
4488         /* Time stamp */
4489         __net_timestamp(skb);
4490
4491         /* Send copy to monitor */
4492         hci_send_to_monitor(hdev, skb);
4493
4494         if (atomic_read(&hdev->promisc)) {
4495                 /* Send copy to the sockets */
4496                 hci_send_to_sock(hdev, skb);
4497         }
4498
4499         /* Get rid of skb owner, prior to sending to the driver. */
4500         skb_orphan(skb);
4501
4502         err = hdev->send(hdev, skb);
4503         if (err < 0) {
4504                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4505                 kfree_skb(skb);
4506         }
4507 }
4508
4509 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4510 {
4511         skb_queue_head_init(&req->cmd_q);
4512         req->hdev = hdev;
4513         req->err = 0;
4514 }
4515
4516 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4517 {
4518         struct hci_dev *hdev = req->hdev;
4519         struct sk_buff *skb;
4520         unsigned long flags;
4521
4522         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4523
4524         /* If an error occurred during request building, remove all HCI
4525          * commands queued on the HCI request queue.
4526          */
4527         if (req->err) {
4528                 skb_queue_purge(&req->cmd_q);
4529                 return req->err;
4530         }
4531
4532         /* Do not allow empty requests */
4533         if (skb_queue_empty(&req->cmd_q))
4534                 return -ENODATA;
4535
4536         skb = skb_peek_tail(&req->cmd_q);
4537         bt_cb(skb)->req.complete = complete;
4538
4539         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4540         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4541         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4542
4543         queue_work(hdev->workqueue, &hdev->cmd_work);
4544
4545         return 0;
4546 }
4547
4548 bool hci_req_pending(struct hci_dev *hdev)
4549 {
4550         return (hdev->req_status == HCI_REQ_PEND);
4551 }
4552
4553 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4554                                        u32 plen, const void *param)
4555 {
4556         int len = HCI_COMMAND_HDR_SIZE + plen;
4557         struct hci_command_hdr *hdr;
4558         struct sk_buff *skb;
4559
4560         skb = bt_skb_alloc(len, GFP_ATOMIC);
4561         if (!skb)
4562                 return NULL;
4563
4564         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4565         hdr->opcode = cpu_to_le16(opcode);
4566         hdr->plen   = plen;
4567
4568         if (plen)
4569                 memcpy(skb_put(skb, plen), param, plen);
4570
4571         BT_DBG("skb len %d", skb->len);
4572
4573         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4574         bt_cb(skb)->opcode = opcode;
4575
4576         return skb;
4577 }
4578
4579 /* Send HCI command */
4580 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4581                  const void *param)
4582 {
4583         struct sk_buff *skb;
4584
4585         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4586
4587         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4588         if (!skb) {
4589                 BT_ERR("%s no memory for command", hdev->name);
4590                 return -ENOMEM;
4591         }
4592
4593         /* Stand-alone HCI commands must be flagged as
4594          * single-command requests.
4595          */
4596         bt_cb(skb)->req.start = true;
4597
4598         skb_queue_tail(&hdev->cmd_q, skb);
4599         queue_work(hdev->workqueue, &hdev->cmd_work);
4600
4601         return 0;
4602 }
4603
4604 /* Queue a command to an asynchronous HCI request */
4605 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4606                     const void *param, u8 event)
4607 {
4608         struct hci_dev *hdev = req->hdev;
4609         struct sk_buff *skb;
4610
4611         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4612
4613         /* If an error occurred during request building, there is no point in
4614          * queueing the HCI command. We can simply return.
4615          */
4616         if (req->err)
4617                 return;
4618
4619         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4620         if (!skb) {
4621                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4622                        hdev->name, opcode);
4623                 req->err = -ENOMEM;
4624                 return;
4625         }
4626
4627         if (skb_queue_empty(&req->cmd_q))
4628                 bt_cb(skb)->req.start = true;
4629
4630         bt_cb(skb)->req.event = event;
4631
4632         skb_queue_tail(&req->cmd_q, skb);
4633 }
4634
4635 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4636                  const void *param)
4637 {
4638         hci_req_add_ev(req, opcode, plen, param, 0);
4639 }
4640
4641 /* Get data from the previously sent command */
4642 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4643 {
4644         struct hci_command_hdr *hdr;
4645
4646         if (!hdev->sent_cmd)
4647                 return NULL;
4648
4649         hdr = (void *) hdev->sent_cmd->data;
4650
4651         if (hdr->opcode != cpu_to_le16(opcode))
4652                 return NULL;
4653
4654         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4655
4656         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4657 }
4658
4659 /* Send ACL data */
4660 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4661 {
4662         struct hci_acl_hdr *hdr;
4663         int len = skb->len;
4664
4665         skb_push(skb, HCI_ACL_HDR_SIZE);
4666         skb_reset_transport_header(skb);
4667         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4668         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4669         hdr->dlen   = cpu_to_le16(len);
4670 }
4671
4672 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4673                           struct sk_buff *skb, __u16 flags)
4674 {
4675         struct hci_conn *conn = chan->conn;
4676         struct hci_dev *hdev = conn->hdev;
4677         struct sk_buff *list;
4678
4679         skb->len = skb_headlen(skb);
4680         skb->data_len = 0;
4681
4682         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4683
4684         switch (hdev->dev_type) {
4685         case HCI_BREDR:
4686                 hci_add_acl_hdr(skb, conn->handle, flags);
4687                 break;
4688         case HCI_AMP:
4689                 hci_add_acl_hdr(skb, chan->handle, flags);
4690                 break;
4691         default:
4692                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4693                 return;
4694         }
4695
4696         list = skb_shinfo(skb)->frag_list;
4697         if (!list) {
4698                 /* Non fragmented */
4699                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4700
4701                 skb_queue_tail(queue, skb);
4702         } else {
4703                 /* Fragmented */
4704                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4705
4706                 skb_shinfo(skb)->frag_list = NULL;
4707
4708                 /* Queue all fragments atomically. We need to use spin_lock_bh
4709                  * here because of 6LoWPAN links, as there this function is
4710                  * called from softirq and using normal spin lock could cause
4711                  * deadlocks.
4712                  */
4713                 spin_lock_bh(&queue->lock);
4714
4715                 __skb_queue_tail(queue, skb);
4716
4717                 flags &= ~ACL_START;
4718                 flags |= ACL_CONT;
4719                 do {
4720                         skb = list; list = list->next;
4721
4722                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4723                         hci_add_acl_hdr(skb, conn->handle, flags);
4724
4725                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4726
4727                         __skb_queue_tail(queue, skb);
4728                 } while (list);
4729
4730                 spin_unlock_bh(&queue->lock);
4731         }
4732 }
4733
4734 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4735 {
4736         struct hci_dev *hdev = chan->conn->hdev;
4737
4738         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4739
4740         hci_queue_acl(chan, &chan->data_q, skb, flags);
4741
4742         queue_work(hdev->workqueue, &hdev->tx_work);
4743 }
4744
4745 /* Send SCO data */
4746 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4747 {
4748         struct hci_dev *hdev = conn->hdev;
4749         struct hci_sco_hdr hdr;
4750
4751         BT_DBG("%s len %d", hdev->name, skb->len);
4752
4753         hdr.handle = cpu_to_le16(conn->handle);
4754         hdr.dlen   = skb->len;
4755
4756         skb_push(skb, HCI_SCO_HDR_SIZE);
4757         skb_reset_transport_header(skb);
4758         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4759
4760         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4761
4762         skb_queue_tail(&conn->data_q, skb);
4763         queue_work(hdev->workqueue, &hdev->tx_work);
4764 }
4765
4766 /* ---- HCI TX task (outgoing data) ---- */
4767
4768 /* HCI Connection scheduler */
4769 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4770                                      int *quote)
4771 {
4772         struct hci_conn_hash *h = &hdev->conn_hash;
4773         struct hci_conn *conn = NULL, *c;
4774         unsigned int num = 0, min = ~0;
4775
4776         /* We don't have to lock device here. Connections are always
4777          * added and removed with TX task disabled. */
4778
4779         rcu_read_lock();
4780
4781         list_for_each_entry_rcu(c, &h->list, list) {
4782                 if (c->type != type || skb_queue_empty(&c->data_q))
4783                         continue;
4784
4785                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4786                         continue;
4787
4788                 num++;
4789
4790                 if (c->sent < min) {
4791                         min  = c->sent;
4792                         conn = c;
4793                 }
4794
4795                 if (hci_conn_num(hdev, type) == num)
4796                         break;
4797         }
4798
4799         rcu_read_unlock();
4800
4801         if (conn) {
4802                 int cnt, q;
4803
4804                 switch (conn->type) {
4805                 case ACL_LINK:
4806                         cnt = hdev->acl_cnt;
4807                         break;
4808                 case SCO_LINK:
4809                 case ESCO_LINK:
4810                         cnt = hdev->sco_cnt;
4811                         break;
4812                 case LE_LINK:
4813                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4814                         break;
4815                 default:
4816                         cnt = 0;
4817                         BT_ERR("Unknown link type");
4818                 }
4819
4820                 q = cnt / num;
4821                 *quote = q ? q : 1;
4822         } else
4823                 *quote = 0;
4824
4825         BT_DBG("conn %p quote %d", conn, *quote);
4826         return conn;
4827 }
4828
4829 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4830 {
4831         struct hci_conn_hash *h = &hdev->conn_hash;
4832         struct hci_conn *c;
4833
4834         BT_ERR("%s link tx timeout", hdev->name);
4835
4836         rcu_read_lock();
4837
4838         /* Kill stalled connections */
4839         list_for_each_entry_rcu(c, &h->list, list) {
4840                 if (c->type == type && c->sent) {
4841                         BT_ERR("%s killing stalled connection %pMR",
4842                                hdev->name, &c->dst);
4843                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4844                 }
4845         }
4846
4847         rcu_read_unlock();
4848 }
4849
4850 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4851                                       int *quote)
4852 {
4853         struct hci_conn_hash *h = &hdev->conn_hash;
4854         struct hci_chan *chan = NULL;
4855         unsigned int num = 0, min = ~0, cur_prio = 0;
4856         struct hci_conn *conn;
4857         int cnt, q, conn_num = 0;
4858
4859         BT_DBG("%s", hdev->name);
4860
4861         rcu_read_lock();
4862
4863         list_for_each_entry_rcu(conn, &h->list, list) {
4864                 struct hci_chan *tmp;
4865
4866                 if (conn->type != type)
4867                         continue;
4868
4869                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4870                         continue;
4871
4872                 conn_num++;
4873
4874                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4875                         struct sk_buff *skb;
4876
4877                         if (skb_queue_empty(&tmp->data_q))
4878                                 continue;
4879
4880                         skb = skb_peek(&tmp->data_q);
4881                         if (skb->priority < cur_prio)
4882                                 continue;
4883
4884                         if (skb->priority > cur_prio) {
4885                                 num = 0;
4886                                 min = ~0;
4887                                 cur_prio = skb->priority;
4888                         }
4889
4890                         num++;
4891
4892                         if (conn->sent < min) {
4893                                 min  = conn->sent;
4894                                 chan = tmp;
4895                         }
4896                 }
4897
4898                 if (hci_conn_num(hdev, type) == conn_num)
4899                         break;
4900         }
4901
4902         rcu_read_unlock();
4903
4904         if (!chan)
4905                 return NULL;
4906
4907         switch (chan->conn->type) {
4908         case ACL_LINK:
4909                 cnt = hdev->acl_cnt;
4910                 break;
4911         case AMP_LINK:
4912                 cnt = hdev->block_cnt;
4913                 break;
4914         case SCO_LINK:
4915         case ESCO_LINK:
4916                 cnt = hdev->sco_cnt;
4917                 break;
4918         case LE_LINK:
4919                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4920                 break;
4921         default:
4922                 cnt = 0;
4923                 BT_ERR("Unknown link type");
4924         }
4925
4926         q = cnt / num;
4927         *quote = q ? q : 1;
4928         BT_DBG("chan %p quote %d", chan, *quote);
4929         return chan;
4930 }
4931
4932 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4933 {
4934         struct hci_conn_hash *h = &hdev->conn_hash;
4935         struct hci_conn *conn;
4936         int num = 0;
4937
4938         BT_DBG("%s", hdev->name);
4939
4940         rcu_read_lock();
4941
4942         list_for_each_entry_rcu(conn, &h->list, list) {
4943                 struct hci_chan *chan;
4944
4945                 if (conn->type != type)
4946                         continue;
4947
4948                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4949                         continue;
4950
4951                 num++;
4952
4953                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4954                         struct sk_buff *skb;
4955
4956                         if (chan->sent) {
4957                                 chan->sent = 0;
4958                                 continue;
4959                         }
4960
4961                         if (skb_queue_empty(&chan->data_q))
4962                                 continue;
4963
4964                         skb = skb_peek(&chan->data_q);
4965                         if (skb->priority >= HCI_PRIO_MAX - 1)
4966                                 continue;
4967
4968                         skb->priority = HCI_PRIO_MAX - 1;
4969
4970                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4971                                skb->priority);
4972                 }
4973
4974                 if (hci_conn_num(hdev, type) == num)
4975                         break;
4976         }
4977
4978         rcu_read_unlock();
4979
4980 }
4981
4982 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4983 {
4984         /* Calculate count of blocks used by this packet */
4985         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4986 }
4987
4988 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4989 {
4990         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4991                 /* ACL tx timeout must be longer than maximum
4992                  * link supervision timeout (40.9 seconds) */
4993                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4994                                        HCI_ACL_TX_TIMEOUT))
4995                         hci_link_tx_to(hdev, ACL_LINK);
4996         }
4997 }
4998
4999 static void hci_sched_acl_pkt(struct hci_dev *hdev)
5000 {
5001         unsigned int cnt = hdev->acl_cnt;
5002         struct hci_chan *chan;
5003         struct sk_buff *skb;
5004         int quote;
5005
5006         __check_timeout(hdev, cnt);
5007
5008         while (hdev->acl_cnt &&
5009                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
5010                 u32 priority = (skb_peek(&chan->data_q))->priority;
5011                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5012                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5013                                skb->len, skb->priority);
5014
5015                         /* Stop if priority has changed */
5016                         if (skb->priority < priority)
5017                                 break;
5018
5019                         skb = skb_dequeue(&chan->data_q);
5020
5021                         hci_conn_enter_active_mode(chan->conn,
5022                                                    bt_cb(skb)->force_active);
5023
5024                         hci_send_frame(hdev, skb);
5025                         hdev->acl_last_tx = jiffies;
5026
5027                         hdev->acl_cnt--;
5028                         chan->sent++;
5029                         chan->conn->sent++;
5030                 }
5031         }
5032
5033         if (cnt != hdev->acl_cnt)
5034                 hci_prio_recalculate(hdev, ACL_LINK);
5035 }
5036
5037 static void hci_sched_acl_blk(struct hci_dev *hdev)
5038 {
5039         unsigned int cnt = hdev->block_cnt;
5040         struct hci_chan *chan;
5041         struct sk_buff *skb;
5042         int quote;
5043         u8 type;
5044
5045         __check_timeout(hdev, cnt);
5046
5047         BT_DBG("%s", hdev->name);
5048
5049         if (hdev->dev_type == HCI_AMP)
5050                 type = AMP_LINK;
5051         else
5052                 type = ACL_LINK;
5053
5054         while (hdev->block_cnt > 0 &&
5055                (chan = hci_chan_sent(hdev, type, &quote))) {
5056                 u32 priority = (skb_peek(&chan->data_q))->priority;
5057                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5058                         int blocks;
5059
5060                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5061                                skb->len, skb->priority);
5062
5063                         /* Stop if priority has changed */
5064                         if (skb->priority < priority)
5065                                 break;
5066
5067                         skb = skb_dequeue(&chan->data_q);
5068
5069                         blocks = __get_blocks(hdev, skb);
5070                         if (blocks > hdev->block_cnt)
5071                                 return;
5072
5073                         hci_conn_enter_active_mode(chan->conn,
5074                                                    bt_cb(skb)->force_active);
5075
5076                         hci_send_frame(hdev, skb);
5077                         hdev->acl_last_tx = jiffies;
5078
5079                         hdev->block_cnt -= blocks;
5080                         quote -= blocks;
5081
5082                         chan->sent += blocks;
5083                         chan->conn->sent += blocks;
5084                 }
5085         }
5086
5087         if (cnt != hdev->block_cnt)
5088                 hci_prio_recalculate(hdev, type);
5089 }
5090
5091 static void hci_sched_acl(struct hci_dev *hdev)
5092 {
5093         BT_DBG("%s", hdev->name);
5094
5095         /* No ACL link over BR/EDR controller */
5096         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5097                 return;
5098
5099         /* No AMP link over AMP controller */
5100         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
5101                 return;
5102
5103         switch (hdev->flow_ctl_mode) {
5104         case HCI_FLOW_CTL_MODE_PACKET_BASED:
5105                 hci_sched_acl_pkt(hdev);
5106                 break;
5107
5108         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5109                 hci_sched_acl_blk(hdev);
5110                 break;
5111         }
5112 }
5113
5114 /* Schedule SCO */
5115 static void hci_sched_sco(struct hci_dev *hdev)
5116 {
5117         struct hci_conn *conn;
5118         struct sk_buff *skb;
5119         int quote;
5120
5121         BT_DBG("%s", hdev->name);
5122
5123         if (!hci_conn_num(hdev, SCO_LINK))
5124                 return;
5125
5126         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5127                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5128                         BT_DBG("skb %p len %d", skb, skb->len);
5129                         hci_send_frame(hdev, skb);
5130
5131                         conn->sent++;
5132                         if (conn->sent == ~0)
5133                                 conn->sent = 0;
5134                 }
5135         }
5136 }
5137
5138 static void hci_sched_esco(struct hci_dev *hdev)
5139 {
5140         struct hci_conn *conn;
5141         struct sk_buff *skb;
5142         int quote;
5143
5144         BT_DBG("%s", hdev->name);
5145
5146         if (!hci_conn_num(hdev, ESCO_LINK))
5147                 return;
5148
5149         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5150                                                      &quote))) {
5151                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5152                         BT_DBG("skb %p len %d", skb, skb->len);
5153                         hci_send_frame(hdev, skb);
5154
5155                         conn->sent++;
5156                         if (conn->sent == ~0)
5157                                 conn->sent = 0;
5158                 }
5159         }
5160 }
5161
5162 static void hci_sched_le(struct hci_dev *hdev)
5163 {
5164         struct hci_chan *chan;
5165         struct sk_buff *skb;
5166         int quote, cnt, tmp;
5167
5168         BT_DBG("%s", hdev->name);
5169
5170         if (!hci_conn_num(hdev, LE_LINK))
5171                 return;
5172
5173         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5174                 /* LE tx timeout must be longer than maximum
5175                  * link supervision timeout (40.9 seconds) */
5176                 if (!hdev->le_cnt && hdev->le_pkts &&
5177                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
5178                         hci_link_tx_to(hdev, LE_LINK);
5179         }
5180
5181         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5182         tmp = cnt;
5183         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
5184                 u32 priority = (skb_peek(&chan->data_q))->priority;
5185                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5186                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5187                                skb->len, skb->priority);
5188
5189                         /* Stop if priority has changed */
5190                         if (skb->priority < priority)
5191                                 break;
5192
5193                         skb = skb_dequeue(&chan->data_q);
5194
5195                         hci_send_frame(hdev, skb);
5196                         hdev->le_last_tx = jiffies;
5197
5198                         cnt--;
5199                         chan->sent++;
5200                         chan->conn->sent++;
5201                 }
5202         }
5203
5204         if (hdev->le_pkts)
5205                 hdev->le_cnt = cnt;
5206         else
5207                 hdev->acl_cnt = cnt;
5208
5209         if (cnt != tmp)
5210                 hci_prio_recalculate(hdev, LE_LINK);
5211 }
5212
5213 static void hci_tx_work(struct work_struct *work)
5214 {
5215         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5216         struct sk_buff *skb;
5217
5218         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5219                hdev->sco_cnt, hdev->le_cnt);
5220
5221         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5222                 /* Schedule queues and send stuff to HCI driver */
5223                 hci_sched_acl(hdev);
5224                 hci_sched_sco(hdev);
5225                 hci_sched_esco(hdev);
5226                 hci_sched_le(hdev);
5227         }
5228
5229         /* Send next queued raw (unknown type) packet */
5230         while ((skb = skb_dequeue(&hdev->raw_q)))
5231                 hci_send_frame(hdev, skb);
5232 }
5233
5234 /* ----- HCI RX task (incoming data processing) ----- */
5235
5236 /* ACL data packet */
5237 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5238 {
5239         struct hci_acl_hdr *hdr = (void *) skb->data;
5240         struct hci_conn *conn;
5241         __u16 handle, flags;
5242
5243         skb_pull(skb, HCI_ACL_HDR_SIZE);
5244
5245         handle = __le16_to_cpu(hdr->handle);
5246         flags  = hci_flags(handle);
5247         handle = hci_handle(handle);
5248
5249         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5250                handle, flags);
5251
5252         hdev->stat.acl_rx++;
5253
5254         hci_dev_lock(hdev);
5255         conn = hci_conn_hash_lookup_handle(hdev, handle);
5256         hci_dev_unlock(hdev);
5257
5258         if (conn) {
5259                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5260
5261                 /* Send to upper protocol */
5262                 l2cap_recv_acldata(conn, skb, flags);
5263                 return;
5264         } else {
5265                 BT_ERR("%s ACL packet for unknown connection handle %d",
5266                        hdev->name, handle);
5267         }
5268
5269         kfree_skb(skb);
5270 }
5271
5272 /* SCO data packet */
5273 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5274 {
5275         struct hci_sco_hdr *hdr = (void *) skb->data;
5276         struct hci_conn *conn;
5277         __u16 handle;
5278
5279         skb_pull(skb, HCI_SCO_HDR_SIZE);
5280
5281         handle = __le16_to_cpu(hdr->handle);
5282
5283         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5284
5285         hdev->stat.sco_rx++;
5286
5287         hci_dev_lock(hdev);
5288         conn = hci_conn_hash_lookup_handle(hdev, handle);
5289         hci_dev_unlock(hdev);
5290
5291         if (conn) {
5292                 /* Send to upper protocol */
5293                 sco_recv_scodata(conn, skb);
5294                 return;
5295         } else {
5296                 BT_ERR("%s SCO packet for unknown connection handle %d",
5297                        hdev->name, handle);
5298         }
5299
5300         kfree_skb(skb);
5301 }
5302
5303 static bool hci_req_is_complete(struct hci_dev *hdev)
5304 {
5305         struct sk_buff *skb;
5306
5307         skb = skb_peek(&hdev->cmd_q);
5308         if (!skb)
5309                 return true;
5310
5311         return bt_cb(skb)->req.start;
5312 }
5313
5314 static void hci_resend_last(struct hci_dev *hdev)
5315 {
5316         struct hci_command_hdr *sent;
5317         struct sk_buff *skb;
5318         u16 opcode;
5319
5320         if (!hdev->sent_cmd)
5321                 return;
5322
5323         sent = (void *) hdev->sent_cmd->data;
5324         opcode = __le16_to_cpu(sent->opcode);
5325         if (opcode == HCI_OP_RESET)
5326                 return;
5327
5328         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5329         if (!skb)
5330                 return;
5331
5332         skb_queue_head(&hdev->cmd_q, skb);
5333         queue_work(hdev->workqueue, &hdev->cmd_work);
5334 }
5335
5336 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5337 {
5338         hci_req_complete_t req_complete = NULL;
5339         struct sk_buff *skb;
5340         unsigned long flags;
5341
5342         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5343
5344         /* If the completed command doesn't match the last one that was
5345          * sent we need to do special handling of it.
5346          */
5347         if (!hci_sent_cmd_data(hdev, opcode)) {
5348                 /* Some CSR based controllers generate a spontaneous
5349                  * reset complete event during init and any pending
5350                  * command will never be completed. In such a case we
5351                  * need to resend whatever was the last sent
5352                  * command.
5353                  */
5354                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5355                         hci_resend_last(hdev);
5356
5357                 return;
5358         }
5359
5360         /* If the command succeeded and there's still more commands in
5361          * this request the request is not yet complete.
5362          */
5363         if (!status && !hci_req_is_complete(hdev))
5364                 return;
5365
5366         /* If this was the last command in a request the complete
5367          * callback would be found in hdev->sent_cmd instead of the
5368          * command queue (hdev->cmd_q).
5369          */
5370         if (hdev->sent_cmd) {
5371                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5372
5373                 if (req_complete) {
5374                         /* We must set the complete callback to NULL to
5375                          * avoid calling the callback more than once if
5376                          * this function gets called again.
5377                          */
5378                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
5379
5380                         goto call_complete;
5381                 }
5382         }
5383
5384         /* Remove all pending commands belonging to this request */
5385         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5386         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5387                 if (bt_cb(skb)->req.start) {
5388                         __skb_queue_head(&hdev->cmd_q, skb);
5389                         break;
5390                 }
5391
5392                 req_complete = bt_cb(skb)->req.complete;
5393                 kfree_skb(skb);
5394         }
5395         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5396
5397 call_complete:
5398         if (req_complete)
5399                 req_complete(hdev, status);
5400 }
5401
5402 static void hci_rx_work(struct work_struct *work)
5403 {
5404         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5405         struct sk_buff *skb;
5406
5407         BT_DBG("%s", hdev->name);
5408
5409         while ((skb = skb_dequeue(&hdev->rx_q))) {
5410                 /* Send copy to monitor */
5411                 hci_send_to_monitor(hdev, skb);
5412
5413                 if (atomic_read(&hdev->promisc)) {
5414                         /* Send copy to the sockets */
5415                         hci_send_to_sock(hdev, skb);
5416                 }
5417
5418                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5419                         kfree_skb(skb);
5420                         continue;
5421                 }
5422
5423                 if (test_bit(HCI_INIT, &hdev->flags)) {
5424                         /* Don't process data packets in this states. */
5425                         switch (bt_cb(skb)->pkt_type) {
5426                         case HCI_ACLDATA_PKT:
5427                         case HCI_SCODATA_PKT:
5428                                 kfree_skb(skb);
5429                                 continue;
5430                         }
5431                 }
5432
5433                 /* Process frame */
5434                 switch (bt_cb(skb)->pkt_type) {
5435                 case HCI_EVENT_PKT:
5436                         BT_DBG("%s Event packet", hdev->name);
5437                         hci_event_packet(hdev, skb);
5438                         break;
5439
5440                 case HCI_ACLDATA_PKT:
5441                         BT_DBG("%s ACL data packet", hdev->name);
5442                         hci_acldata_packet(hdev, skb);
5443                         break;
5444
5445                 case HCI_SCODATA_PKT:
5446                         BT_DBG("%s SCO data packet", hdev->name);
5447                         hci_scodata_packet(hdev, skb);
5448                         break;
5449
5450                 default:
5451                         kfree_skb(skb);
5452                         break;
5453                 }
5454         }
5455 }
5456
5457 static void hci_cmd_work(struct work_struct *work)
5458 {
5459         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5460         struct sk_buff *skb;
5461
5462         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5463                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5464
5465         /* Send queued commands */
5466         if (atomic_read(&hdev->cmd_cnt)) {
5467                 skb = skb_dequeue(&hdev->cmd_q);
5468                 if (!skb)
5469                         return;
5470
5471                 kfree_skb(hdev->sent_cmd);
5472
5473                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5474                 if (hdev->sent_cmd) {
5475                         atomic_dec(&hdev->cmd_cnt);
5476                         hci_send_frame(hdev, skb);
5477                         if (test_bit(HCI_RESET, &hdev->flags))
5478                                 cancel_delayed_work(&hdev->cmd_timer);
5479                         else
5480                                 schedule_delayed_work(&hdev->cmd_timer,
5481                                                       HCI_CMD_TIMEOUT);
5482                 } else {
5483                         skb_queue_head(&hdev->cmd_q, skb);
5484                         queue_work(hdev->workqueue, &hdev->cmd_work);
5485                 }
5486         }
5487 }
5488
5489 void hci_req_add_le_scan_disable(struct hci_request *req)
5490 {
5491         struct hci_cp_le_set_scan_enable cp;
5492
5493         memset(&cp, 0, sizeof(cp));
5494         cp.enable = LE_SCAN_DISABLE;
5495         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5496 }
5497
5498 static void add_to_white_list(struct hci_request *req,
5499                               struct hci_conn_params *params)
5500 {
5501         struct hci_cp_le_add_to_white_list cp;
5502
5503         cp.bdaddr_type = params->addr_type;
5504         bacpy(&cp.bdaddr, &params->addr);
5505
5506         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5507 }
5508
5509 static u8 update_white_list(struct hci_request *req)
5510 {
5511         struct hci_dev *hdev = req->hdev;
5512         struct hci_conn_params *params;
5513         struct bdaddr_list *b;
5514         uint8_t white_list_entries = 0;
5515
5516         /* Go through the current white list programmed into the
5517          * controller one by one and check if that address is still
5518          * in the list of pending connections or list of devices to
5519          * report. If not present in either list, then queue the
5520          * command to remove it from the controller.
5521          */
5522         list_for_each_entry(b, &hdev->le_white_list, list) {
5523                 struct hci_cp_le_del_from_white_list cp;
5524
5525                 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5526                                               &b->bdaddr, b->bdaddr_type) ||
5527                     hci_pend_le_action_lookup(&hdev->pend_le_reports,
5528                                               &b->bdaddr, b->bdaddr_type)) {
5529                         white_list_entries++;
5530                         continue;
5531                 }
5532
5533                 cp.bdaddr_type = b->bdaddr_type;
5534                 bacpy(&cp.bdaddr, &b->bdaddr);
5535
5536                 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5537                             sizeof(cp), &cp);
5538         }
5539
5540         /* Since all no longer valid white list entries have been
5541          * removed, walk through the list of pending connections
5542          * and ensure that any new device gets programmed into
5543          * the controller.
5544          *
5545          * If the list of the devices is larger than the list of
5546          * available white list entries in the controller, then
5547          * just abort and return filer policy value to not use the
5548          * white list.
5549          */
5550         list_for_each_entry(params, &hdev->pend_le_conns, action) {
5551                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5552                                            &params->addr, params->addr_type))
5553                         continue;
5554
5555                 if (white_list_entries >= hdev->le_white_list_size) {
5556                         /* Select filter policy to accept all advertising */
5557                         return 0x00;
5558                 }
5559
5560                 if (hci_find_irk_by_addr(hdev, &params->addr,
5561                                          params->addr_type)) {
5562                         /* White list can not be used with RPAs */
5563                         return 0x00;
5564                 }
5565
5566                 white_list_entries++;
5567                 add_to_white_list(req, params);
5568         }
5569
5570         /* After adding all new pending connections, walk through
5571          * the list of pending reports and also add these to the
5572          * white list if there is still space.
5573          */
5574         list_for_each_entry(params, &hdev->pend_le_reports, action) {
5575                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5576                                            &params->addr, params->addr_type))
5577                         continue;
5578
5579                 if (white_list_entries >= hdev->le_white_list_size) {
5580                         /* Select filter policy to accept all advertising */
5581                         return 0x00;
5582                 }
5583
5584                 if (hci_find_irk_by_addr(hdev, &params->addr,
5585                                          params->addr_type)) {
5586                         /* White list can not be used with RPAs */
5587                         return 0x00;
5588                 }
5589
5590                 white_list_entries++;
5591                 add_to_white_list(req, params);
5592         }
5593
5594         /* Select filter policy to use white list */
5595         return 0x01;
5596 }
5597
5598 void hci_req_add_le_passive_scan(struct hci_request *req)
5599 {
5600         struct hci_cp_le_set_scan_param param_cp;
5601         struct hci_cp_le_set_scan_enable enable_cp;
5602         struct hci_dev *hdev = req->hdev;
5603         u8 own_addr_type;
5604         u8 filter_policy;
5605
5606         /* Set require_privacy to false since no SCAN_REQ are send
5607          * during passive scanning. Not using an unresolvable address
5608          * here is important so that peer devices using direct
5609          * advertising with our address will be correctly reported
5610          * by the controller.
5611          */
5612         if (hci_update_random_address(req, false, &own_addr_type))
5613                 return;
5614
5615         /* Adding or removing entries from the white list must
5616          * happen before enabling scanning. The controller does
5617          * not allow white list modification while scanning.
5618          */
5619         filter_policy = update_white_list(req);
5620
5621         memset(&param_cp, 0, sizeof(param_cp));
5622         param_cp.type = LE_SCAN_PASSIVE;
5623         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5624         param_cp.window = cpu_to_le16(hdev->le_scan_window);
5625         param_cp.own_address_type = own_addr_type;
5626         param_cp.filter_policy = filter_policy;
5627         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5628                     &param_cp);
5629
5630         memset(&enable_cp, 0, sizeof(enable_cp));
5631         enable_cp.enable = LE_SCAN_ENABLE;
5632         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5633         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5634                     &enable_cp);
5635 }
5636
5637 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5638 {
5639         if (status)
5640                 BT_DBG("HCI request failed to update background scanning: "
5641                        "status 0x%2.2x", status);
5642 }
5643
5644 /* This function controls the background scanning based on hdev->pend_le_conns
5645  * list. If there are pending LE connection we start the background scanning,
5646  * otherwise we stop it.
5647  *
5648  * This function requires the caller holds hdev->lock.
5649  */
5650 void hci_update_background_scan(struct hci_dev *hdev)
5651 {
5652         struct hci_request req;
5653         struct hci_conn *conn;
5654         int err;
5655
5656         if (!test_bit(HCI_UP, &hdev->flags) ||
5657             test_bit(HCI_INIT, &hdev->flags) ||
5658             test_bit(HCI_SETUP, &hdev->dev_flags) ||
5659             test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5660             test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5661             test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5662                 return;
5663
5664         /* No point in doing scanning if LE support hasn't been enabled */
5665         if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5666                 return;
5667
5668         /* If discovery is active don't interfere with it */
5669         if (hdev->discovery.state != DISCOVERY_STOPPED)
5670                 return;
5671
5672         hci_req_init(&req, hdev);
5673
5674         if (list_empty(&hdev->pend_le_conns) &&
5675             list_empty(&hdev->pend_le_reports)) {
5676                 /* If there is no pending LE connections or devices
5677                  * to be scanned for, we should stop the background
5678                  * scanning.
5679                  */
5680
5681                 /* If controller is not scanning we are done. */
5682                 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5683                         return;
5684
5685                 hci_req_add_le_scan_disable(&req);
5686
5687                 BT_DBG("%s stopping background scanning", hdev->name);
5688         } else {
5689                 /* If there is at least one pending LE connection, we should
5690                  * keep the background scan running.
5691                  */
5692
5693                 /* If controller is connecting, we should not start scanning
5694                  * since some controllers are not able to scan and connect at
5695                  * the same time.
5696                  */
5697                 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5698                 if (conn)
5699                         return;
5700
5701                 /* If controller is currently scanning, we stop it to ensure we
5702                  * don't miss any advertising (due to duplicates filter).
5703                  */
5704                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5705                         hci_req_add_le_scan_disable(&req);
5706
5707                 hci_req_add_le_passive_scan(&req);
5708
5709                 BT_DBG("%s starting background scanning", hdev->name);
5710         }
5711
5712         err = hci_req_run(&req, update_background_scan_complete);
5713         if (err)
5714                 BT_ERR("Failed to run HCI request: err %d", err);
5715 }
5716
5717 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5718 {
5719         struct bdaddr_list *b;
5720
5721         list_for_each_entry(b, &hdev->whitelist, list) {
5722                 struct hci_conn *conn;
5723
5724                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5725                 if (!conn)
5726                         return true;
5727
5728                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5729                         return true;
5730         }
5731
5732         return false;
5733 }
5734
5735 void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5736 {
5737         u8 scan;
5738
5739         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5740                 return;
5741
5742         if (!hdev_is_powered(hdev))
5743                 return;
5744
5745         if (mgmt_powering_down(hdev))
5746                 return;
5747
5748         if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
5749             disconnected_whitelist_entries(hdev))
5750                 scan = SCAN_PAGE;
5751         else
5752                 scan = SCAN_DISABLED;
5753
5754         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5755                 return;
5756
5757         if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5758                 scan |= SCAN_INQUIRY;
5759
5760         if (req)
5761                 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5762         else
5763                 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5764 }