Merge branch 'for-3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
[cascardo/linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "smp.h"
41
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
45
46 /* HCI device list */
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
49
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
53
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
56
57 /* ----- HCI requests ----- */
58
59 #define HCI_REQ_DONE      0
60 #define HCI_REQ_PEND      1
61 #define HCI_REQ_CANCELED  2
62
63 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
64 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
65
66 /* ---- HCI notifications ---- */
67
68 static void hci_notify(struct hci_dev *hdev, int event)
69 {
70         hci_sock_dev_event(hdev, event);
71 }
72
73 /* ---- HCI debugfs entries ---- */
74
75 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76                              size_t count, loff_t *ppos)
77 {
78         struct hci_dev *hdev = file->private_data;
79         char buf[3];
80
81         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
82         buf[1] = '\n';
83         buf[2] = '\0';
84         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85 }
86
87 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88                               size_t count, loff_t *ppos)
89 {
90         struct hci_dev *hdev = file->private_data;
91         struct sk_buff *skb;
92         char buf[32];
93         size_t buf_size = min(count, (sizeof(buf)-1));
94         bool enable;
95         int err;
96
97         if (!test_bit(HCI_UP, &hdev->flags))
98                 return -ENETDOWN;
99
100         if (copy_from_user(buf, user_buf, buf_size))
101                 return -EFAULT;
102
103         buf[buf_size] = '\0';
104         if (strtobool(buf, &enable))
105                 return -EINVAL;
106
107         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
108                 return -EALREADY;
109
110         hci_req_lock(hdev);
111         if (enable)
112                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113                                      HCI_CMD_TIMEOUT);
114         else
115                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116                                      HCI_CMD_TIMEOUT);
117         hci_req_unlock(hdev);
118
119         if (IS_ERR(skb))
120                 return PTR_ERR(skb);
121
122         err = -bt_to_errno(skb->data[0]);
123         kfree_skb(skb);
124
125         if (err < 0)
126                 return err;
127
128         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
129
130         return count;
131 }
132
133 static const struct file_operations dut_mode_fops = {
134         .open           = simple_open,
135         .read           = dut_mode_read,
136         .write          = dut_mode_write,
137         .llseek         = default_llseek,
138 };
139
140 static int features_show(struct seq_file *f, void *ptr)
141 {
142         struct hci_dev *hdev = f->private;
143         u8 p;
144
145         hci_dev_lock(hdev);
146         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
147                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
148                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149                            hdev->features[p][0], hdev->features[p][1],
150                            hdev->features[p][2], hdev->features[p][3],
151                            hdev->features[p][4], hdev->features[p][5],
152                            hdev->features[p][6], hdev->features[p][7]);
153         }
154         if (lmp_le_capable(hdev))
155                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157                            hdev->le_features[0], hdev->le_features[1],
158                            hdev->le_features[2], hdev->le_features[3],
159                            hdev->le_features[4], hdev->le_features[5],
160                            hdev->le_features[6], hdev->le_features[7]);
161         hci_dev_unlock(hdev);
162
163         return 0;
164 }
165
166 static int features_open(struct inode *inode, struct file *file)
167 {
168         return single_open(file, features_show, inode->i_private);
169 }
170
171 static const struct file_operations features_fops = {
172         .open           = features_open,
173         .read           = seq_read,
174         .llseek         = seq_lseek,
175         .release        = single_release,
176 };
177
178 static int blacklist_show(struct seq_file *f, void *p)
179 {
180         struct hci_dev *hdev = f->private;
181         struct bdaddr_list *b;
182
183         hci_dev_lock(hdev);
184         list_for_each_entry(b, &hdev->blacklist, list)
185                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
186         hci_dev_unlock(hdev);
187
188         return 0;
189 }
190
191 static int blacklist_open(struct inode *inode, struct file *file)
192 {
193         return single_open(file, blacklist_show, inode->i_private);
194 }
195
196 static const struct file_operations blacklist_fops = {
197         .open           = blacklist_open,
198         .read           = seq_read,
199         .llseek         = seq_lseek,
200         .release        = single_release,
201 };
202
203 static int uuids_show(struct seq_file *f, void *p)
204 {
205         struct hci_dev *hdev = f->private;
206         struct bt_uuid *uuid;
207
208         hci_dev_lock(hdev);
209         list_for_each_entry(uuid, &hdev->uuids, list) {
210                 u8 i, val[16];
211
212                 /* The Bluetooth UUID values are stored in big endian,
213                  * but with reversed byte order. So convert them into
214                  * the right order for the %pUb modifier.
215                  */
216                 for (i = 0; i < 16; i++)
217                         val[i] = uuid->uuid[15 - i];
218
219                 seq_printf(f, "%pUb\n", val);
220         }
221         hci_dev_unlock(hdev);
222
223         return 0;
224 }
225
226 static int uuids_open(struct inode *inode, struct file *file)
227 {
228         return single_open(file, uuids_show, inode->i_private);
229 }
230
231 static const struct file_operations uuids_fops = {
232         .open           = uuids_open,
233         .read           = seq_read,
234         .llseek         = seq_lseek,
235         .release        = single_release,
236 };
237
238 static int inquiry_cache_show(struct seq_file *f, void *p)
239 {
240         struct hci_dev *hdev = f->private;
241         struct discovery_state *cache = &hdev->discovery;
242         struct inquiry_entry *e;
243
244         hci_dev_lock(hdev);
245
246         list_for_each_entry(e, &cache->all, all) {
247                 struct inquiry_data *data = &e->data;
248                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
249                            &data->bdaddr,
250                            data->pscan_rep_mode, data->pscan_period_mode,
251                            data->pscan_mode, data->dev_class[2],
252                            data->dev_class[1], data->dev_class[0],
253                            __le16_to_cpu(data->clock_offset),
254                            data->rssi, data->ssp_mode, e->timestamp);
255         }
256
257         hci_dev_unlock(hdev);
258
259         return 0;
260 }
261
262 static int inquiry_cache_open(struct inode *inode, struct file *file)
263 {
264         return single_open(file, inquiry_cache_show, inode->i_private);
265 }
266
267 static const struct file_operations inquiry_cache_fops = {
268         .open           = inquiry_cache_open,
269         .read           = seq_read,
270         .llseek         = seq_lseek,
271         .release        = single_release,
272 };
273
274 static int link_keys_show(struct seq_file *f, void *ptr)
275 {
276         struct hci_dev *hdev = f->private;
277         struct link_key *key;
278
279         rcu_read_lock();
280         list_for_each_entry_rcu(key, &hdev->link_keys, list)
281                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
282                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
283         rcu_read_unlock();
284
285         return 0;
286 }
287
288 static int link_keys_open(struct inode *inode, struct file *file)
289 {
290         return single_open(file, link_keys_show, inode->i_private);
291 }
292
293 static const struct file_operations link_keys_fops = {
294         .open           = link_keys_open,
295         .read           = seq_read,
296         .llseek         = seq_lseek,
297         .release        = single_release,
298 };
299
300 static int dev_class_show(struct seq_file *f, void *ptr)
301 {
302         struct hci_dev *hdev = f->private;
303
304         hci_dev_lock(hdev);
305         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
306                    hdev->dev_class[1], hdev->dev_class[0]);
307         hci_dev_unlock(hdev);
308
309         return 0;
310 }
311
312 static int dev_class_open(struct inode *inode, struct file *file)
313 {
314         return single_open(file, dev_class_show, inode->i_private);
315 }
316
317 static const struct file_operations dev_class_fops = {
318         .open           = dev_class_open,
319         .read           = seq_read,
320         .llseek         = seq_lseek,
321         .release        = single_release,
322 };
323
324 static int voice_setting_get(void *data, u64 *val)
325 {
326         struct hci_dev *hdev = data;
327
328         hci_dev_lock(hdev);
329         *val = hdev->voice_setting;
330         hci_dev_unlock(hdev);
331
332         return 0;
333 }
334
335 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
336                         NULL, "0x%4.4llx\n");
337
338 static int auto_accept_delay_set(void *data, u64 val)
339 {
340         struct hci_dev *hdev = data;
341
342         hci_dev_lock(hdev);
343         hdev->auto_accept_delay = val;
344         hci_dev_unlock(hdev);
345
346         return 0;
347 }
348
349 static int auto_accept_delay_get(void *data, u64 *val)
350 {
351         struct hci_dev *hdev = data;
352
353         hci_dev_lock(hdev);
354         *val = hdev->auto_accept_delay;
355         hci_dev_unlock(hdev);
356
357         return 0;
358 }
359
360 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
361                         auto_accept_delay_set, "%llu\n");
362
363 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
364                                      size_t count, loff_t *ppos)
365 {
366         struct hci_dev *hdev = file->private_data;
367         char buf[3];
368
369         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
370         buf[1] = '\n';
371         buf[2] = '\0';
372         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
373 }
374
375 static ssize_t force_sc_support_write(struct file *file,
376                                       const char __user *user_buf,
377                                       size_t count, loff_t *ppos)
378 {
379         struct hci_dev *hdev = file->private_data;
380         char buf[32];
381         size_t buf_size = min(count, (sizeof(buf)-1));
382         bool enable;
383
384         if (test_bit(HCI_UP, &hdev->flags))
385                 return -EBUSY;
386
387         if (copy_from_user(buf, user_buf, buf_size))
388                 return -EFAULT;
389
390         buf[buf_size] = '\0';
391         if (strtobool(buf, &enable))
392                 return -EINVAL;
393
394         if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
395                 return -EALREADY;
396
397         change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
398
399         return count;
400 }
401
402 static const struct file_operations force_sc_support_fops = {
403         .open           = simple_open,
404         .read           = force_sc_support_read,
405         .write          = force_sc_support_write,
406         .llseek         = default_llseek,
407 };
408
409 static ssize_t force_lesc_support_read(struct file *file, char __user *user_buf,
410                                        size_t count, loff_t *ppos)
411 {
412         struct hci_dev *hdev = file->private_data;
413         char buf[3];
414
415         buf[0] = test_bit(HCI_FORCE_LESC, &hdev->dbg_flags) ? 'Y': 'N';
416         buf[1] = '\n';
417         buf[2] = '\0';
418         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
419 }
420
421 static ssize_t force_lesc_support_write(struct file *file,
422                                         const char __user *user_buf,
423                                         size_t count, loff_t *ppos)
424 {
425         struct hci_dev *hdev = file->private_data;
426         char buf[32];
427         size_t buf_size = min(count, (sizeof(buf)-1));
428         bool enable;
429
430         if (copy_from_user(buf, user_buf, buf_size))
431                 return -EFAULT;
432
433         buf[buf_size] = '\0';
434         if (strtobool(buf, &enable))
435                 return -EINVAL;
436
437         if (enable == test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
438                 return -EALREADY;
439
440         change_bit(HCI_FORCE_LESC, &hdev->dbg_flags);
441
442         return count;
443 }
444
445 static const struct file_operations force_lesc_support_fops = {
446         .open           = simple_open,
447         .read           = force_lesc_support_read,
448         .write          = force_lesc_support_write,
449         .llseek         = default_llseek,
450 };
451
452 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
453                                  size_t count, loff_t *ppos)
454 {
455         struct hci_dev *hdev = file->private_data;
456         char buf[3];
457
458         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
459         buf[1] = '\n';
460         buf[2] = '\0';
461         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
462 }
463
464 static const struct file_operations sc_only_mode_fops = {
465         .open           = simple_open,
466         .read           = sc_only_mode_read,
467         .llseek         = default_llseek,
468 };
469
470 static int idle_timeout_set(void *data, u64 val)
471 {
472         struct hci_dev *hdev = data;
473
474         if (val != 0 && (val < 500 || val > 3600000))
475                 return -EINVAL;
476
477         hci_dev_lock(hdev);
478         hdev->idle_timeout = val;
479         hci_dev_unlock(hdev);
480
481         return 0;
482 }
483
484 static int idle_timeout_get(void *data, u64 *val)
485 {
486         struct hci_dev *hdev = data;
487
488         hci_dev_lock(hdev);
489         *val = hdev->idle_timeout;
490         hci_dev_unlock(hdev);
491
492         return 0;
493 }
494
495 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
496                         idle_timeout_set, "%llu\n");
497
498 static int rpa_timeout_set(void *data, u64 val)
499 {
500         struct hci_dev *hdev = data;
501
502         /* Require the RPA timeout to be at least 30 seconds and at most
503          * 24 hours.
504          */
505         if (val < 30 || val > (60 * 60 * 24))
506                 return -EINVAL;
507
508         hci_dev_lock(hdev);
509         hdev->rpa_timeout = val;
510         hci_dev_unlock(hdev);
511
512         return 0;
513 }
514
515 static int rpa_timeout_get(void *data, u64 *val)
516 {
517         struct hci_dev *hdev = data;
518
519         hci_dev_lock(hdev);
520         *val = hdev->rpa_timeout;
521         hci_dev_unlock(hdev);
522
523         return 0;
524 }
525
526 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
527                         rpa_timeout_set, "%llu\n");
528
529 static int sniff_min_interval_set(void *data, u64 val)
530 {
531         struct hci_dev *hdev = data;
532
533         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
534                 return -EINVAL;
535
536         hci_dev_lock(hdev);
537         hdev->sniff_min_interval = val;
538         hci_dev_unlock(hdev);
539
540         return 0;
541 }
542
543 static int sniff_min_interval_get(void *data, u64 *val)
544 {
545         struct hci_dev *hdev = data;
546
547         hci_dev_lock(hdev);
548         *val = hdev->sniff_min_interval;
549         hci_dev_unlock(hdev);
550
551         return 0;
552 }
553
554 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
555                         sniff_min_interval_set, "%llu\n");
556
557 static int sniff_max_interval_set(void *data, u64 val)
558 {
559         struct hci_dev *hdev = data;
560
561         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
562                 return -EINVAL;
563
564         hci_dev_lock(hdev);
565         hdev->sniff_max_interval = val;
566         hci_dev_unlock(hdev);
567
568         return 0;
569 }
570
571 static int sniff_max_interval_get(void *data, u64 *val)
572 {
573         struct hci_dev *hdev = data;
574
575         hci_dev_lock(hdev);
576         *val = hdev->sniff_max_interval;
577         hci_dev_unlock(hdev);
578
579         return 0;
580 }
581
582 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
583                         sniff_max_interval_set, "%llu\n");
584
585 static int conn_info_min_age_set(void *data, u64 val)
586 {
587         struct hci_dev *hdev = data;
588
589         if (val == 0 || val > hdev->conn_info_max_age)
590                 return -EINVAL;
591
592         hci_dev_lock(hdev);
593         hdev->conn_info_min_age = val;
594         hci_dev_unlock(hdev);
595
596         return 0;
597 }
598
599 static int conn_info_min_age_get(void *data, u64 *val)
600 {
601         struct hci_dev *hdev = data;
602
603         hci_dev_lock(hdev);
604         *val = hdev->conn_info_min_age;
605         hci_dev_unlock(hdev);
606
607         return 0;
608 }
609
610 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
611                         conn_info_min_age_set, "%llu\n");
612
613 static int conn_info_max_age_set(void *data, u64 val)
614 {
615         struct hci_dev *hdev = data;
616
617         if (val == 0 || val < hdev->conn_info_min_age)
618                 return -EINVAL;
619
620         hci_dev_lock(hdev);
621         hdev->conn_info_max_age = val;
622         hci_dev_unlock(hdev);
623
624         return 0;
625 }
626
627 static int conn_info_max_age_get(void *data, u64 *val)
628 {
629         struct hci_dev *hdev = data;
630
631         hci_dev_lock(hdev);
632         *val = hdev->conn_info_max_age;
633         hci_dev_unlock(hdev);
634
635         return 0;
636 }
637
638 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
639                         conn_info_max_age_set, "%llu\n");
640
641 static int identity_show(struct seq_file *f, void *p)
642 {
643         struct hci_dev *hdev = f->private;
644         bdaddr_t addr;
645         u8 addr_type;
646
647         hci_dev_lock(hdev);
648
649         hci_copy_identity_address(hdev, &addr, &addr_type);
650
651         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
652                    16, hdev->irk, &hdev->rpa);
653
654         hci_dev_unlock(hdev);
655
656         return 0;
657 }
658
659 static int identity_open(struct inode *inode, struct file *file)
660 {
661         return single_open(file, identity_show, inode->i_private);
662 }
663
664 static const struct file_operations identity_fops = {
665         .open           = identity_open,
666         .read           = seq_read,
667         .llseek         = seq_lseek,
668         .release        = single_release,
669 };
670
671 static int random_address_show(struct seq_file *f, void *p)
672 {
673         struct hci_dev *hdev = f->private;
674
675         hci_dev_lock(hdev);
676         seq_printf(f, "%pMR\n", &hdev->random_addr);
677         hci_dev_unlock(hdev);
678
679         return 0;
680 }
681
682 static int random_address_open(struct inode *inode, struct file *file)
683 {
684         return single_open(file, random_address_show, inode->i_private);
685 }
686
687 static const struct file_operations random_address_fops = {
688         .open           = random_address_open,
689         .read           = seq_read,
690         .llseek         = seq_lseek,
691         .release        = single_release,
692 };
693
694 static int static_address_show(struct seq_file *f, void *p)
695 {
696         struct hci_dev *hdev = f->private;
697
698         hci_dev_lock(hdev);
699         seq_printf(f, "%pMR\n", &hdev->static_addr);
700         hci_dev_unlock(hdev);
701
702         return 0;
703 }
704
705 static int static_address_open(struct inode *inode, struct file *file)
706 {
707         return single_open(file, static_address_show, inode->i_private);
708 }
709
710 static const struct file_operations static_address_fops = {
711         .open           = static_address_open,
712         .read           = seq_read,
713         .llseek         = seq_lseek,
714         .release        = single_release,
715 };
716
717 static ssize_t force_static_address_read(struct file *file,
718                                          char __user *user_buf,
719                                          size_t count, loff_t *ppos)
720 {
721         struct hci_dev *hdev = file->private_data;
722         char buf[3];
723
724         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
725         buf[1] = '\n';
726         buf[2] = '\0';
727         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
728 }
729
730 static ssize_t force_static_address_write(struct file *file,
731                                           const char __user *user_buf,
732                                           size_t count, loff_t *ppos)
733 {
734         struct hci_dev *hdev = file->private_data;
735         char buf[32];
736         size_t buf_size = min(count, (sizeof(buf)-1));
737         bool enable;
738
739         if (test_bit(HCI_UP, &hdev->flags))
740                 return -EBUSY;
741
742         if (copy_from_user(buf, user_buf, buf_size))
743                 return -EFAULT;
744
745         buf[buf_size] = '\0';
746         if (strtobool(buf, &enable))
747                 return -EINVAL;
748
749         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
750                 return -EALREADY;
751
752         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
753
754         return count;
755 }
756
757 static const struct file_operations force_static_address_fops = {
758         .open           = simple_open,
759         .read           = force_static_address_read,
760         .write          = force_static_address_write,
761         .llseek         = default_llseek,
762 };
763
764 static int white_list_show(struct seq_file *f, void *ptr)
765 {
766         struct hci_dev *hdev = f->private;
767         struct bdaddr_list *b;
768
769         hci_dev_lock(hdev);
770         list_for_each_entry(b, &hdev->le_white_list, list)
771                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
772         hci_dev_unlock(hdev);
773
774         return 0;
775 }
776
777 static int white_list_open(struct inode *inode, struct file *file)
778 {
779         return single_open(file, white_list_show, inode->i_private);
780 }
781
782 static const struct file_operations white_list_fops = {
783         .open           = white_list_open,
784         .read           = seq_read,
785         .llseek         = seq_lseek,
786         .release        = single_release,
787 };
788
789 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
790 {
791         struct hci_dev *hdev = f->private;
792         struct smp_irk *irk;
793
794         rcu_read_lock();
795         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
796                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
797                            &irk->bdaddr, irk->addr_type,
798                            16, irk->val, &irk->rpa);
799         }
800         rcu_read_unlock();
801
802         return 0;
803 }
804
805 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
806 {
807         return single_open(file, identity_resolving_keys_show,
808                            inode->i_private);
809 }
810
811 static const struct file_operations identity_resolving_keys_fops = {
812         .open           = identity_resolving_keys_open,
813         .read           = seq_read,
814         .llseek         = seq_lseek,
815         .release        = single_release,
816 };
817
818 static int long_term_keys_show(struct seq_file *f, void *ptr)
819 {
820         struct hci_dev *hdev = f->private;
821         struct smp_ltk *ltk;
822
823         rcu_read_lock();
824         list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
825                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
826                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
827                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
828                            __le64_to_cpu(ltk->rand), 16, ltk->val);
829         rcu_read_unlock();
830
831         return 0;
832 }
833
834 static int long_term_keys_open(struct inode *inode, struct file *file)
835 {
836         return single_open(file, long_term_keys_show, inode->i_private);
837 }
838
839 static const struct file_operations long_term_keys_fops = {
840         .open           = long_term_keys_open,
841         .read           = seq_read,
842         .llseek         = seq_lseek,
843         .release        = single_release,
844 };
845
846 static int conn_min_interval_set(void *data, u64 val)
847 {
848         struct hci_dev *hdev = data;
849
850         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
851                 return -EINVAL;
852
853         hci_dev_lock(hdev);
854         hdev->le_conn_min_interval = val;
855         hci_dev_unlock(hdev);
856
857         return 0;
858 }
859
860 static int conn_min_interval_get(void *data, u64 *val)
861 {
862         struct hci_dev *hdev = data;
863
864         hci_dev_lock(hdev);
865         *val = hdev->le_conn_min_interval;
866         hci_dev_unlock(hdev);
867
868         return 0;
869 }
870
871 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
872                         conn_min_interval_set, "%llu\n");
873
874 static int conn_max_interval_set(void *data, u64 val)
875 {
876         struct hci_dev *hdev = data;
877
878         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
879                 return -EINVAL;
880
881         hci_dev_lock(hdev);
882         hdev->le_conn_max_interval = val;
883         hci_dev_unlock(hdev);
884
885         return 0;
886 }
887
888 static int conn_max_interval_get(void *data, u64 *val)
889 {
890         struct hci_dev *hdev = data;
891
892         hci_dev_lock(hdev);
893         *val = hdev->le_conn_max_interval;
894         hci_dev_unlock(hdev);
895
896         return 0;
897 }
898
899 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
900                         conn_max_interval_set, "%llu\n");
901
902 static int conn_latency_set(void *data, u64 val)
903 {
904         struct hci_dev *hdev = data;
905
906         if (val > 0x01f3)
907                 return -EINVAL;
908
909         hci_dev_lock(hdev);
910         hdev->le_conn_latency = val;
911         hci_dev_unlock(hdev);
912
913         return 0;
914 }
915
916 static int conn_latency_get(void *data, u64 *val)
917 {
918         struct hci_dev *hdev = data;
919
920         hci_dev_lock(hdev);
921         *val = hdev->le_conn_latency;
922         hci_dev_unlock(hdev);
923
924         return 0;
925 }
926
927 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
928                         conn_latency_set, "%llu\n");
929
930 static int supervision_timeout_set(void *data, u64 val)
931 {
932         struct hci_dev *hdev = data;
933
934         if (val < 0x000a || val > 0x0c80)
935                 return -EINVAL;
936
937         hci_dev_lock(hdev);
938         hdev->le_supv_timeout = val;
939         hci_dev_unlock(hdev);
940
941         return 0;
942 }
943
944 static int supervision_timeout_get(void *data, u64 *val)
945 {
946         struct hci_dev *hdev = data;
947
948         hci_dev_lock(hdev);
949         *val = hdev->le_supv_timeout;
950         hci_dev_unlock(hdev);
951
952         return 0;
953 }
954
955 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
956                         supervision_timeout_set, "%llu\n");
957
958 static int adv_channel_map_set(void *data, u64 val)
959 {
960         struct hci_dev *hdev = data;
961
962         if (val < 0x01 || val > 0x07)
963                 return -EINVAL;
964
965         hci_dev_lock(hdev);
966         hdev->le_adv_channel_map = val;
967         hci_dev_unlock(hdev);
968
969         return 0;
970 }
971
972 static int adv_channel_map_get(void *data, u64 *val)
973 {
974         struct hci_dev *hdev = data;
975
976         hci_dev_lock(hdev);
977         *val = hdev->le_adv_channel_map;
978         hci_dev_unlock(hdev);
979
980         return 0;
981 }
982
983 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
984                         adv_channel_map_set, "%llu\n");
985
986 static int adv_min_interval_set(void *data, u64 val)
987 {
988         struct hci_dev *hdev = data;
989
990         if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
991                 return -EINVAL;
992
993         hci_dev_lock(hdev);
994         hdev->le_adv_min_interval = val;
995         hci_dev_unlock(hdev);
996
997         return 0;
998 }
999
1000 static int adv_min_interval_get(void *data, u64 *val)
1001 {
1002         struct hci_dev *hdev = data;
1003
1004         hci_dev_lock(hdev);
1005         *val = hdev->le_adv_min_interval;
1006         hci_dev_unlock(hdev);
1007
1008         return 0;
1009 }
1010
1011 DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
1012                         adv_min_interval_set, "%llu\n");
1013
1014 static int adv_max_interval_set(void *data, u64 val)
1015 {
1016         struct hci_dev *hdev = data;
1017
1018         if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
1019                 return -EINVAL;
1020
1021         hci_dev_lock(hdev);
1022         hdev->le_adv_max_interval = val;
1023         hci_dev_unlock(hdev);
1024
1025         return 0;
1026 }
1027
1028 static int adv_max_interval_get(void *data, u64 *val)
1029 {
1030         struct hci_dev *hdev = data;
1031
1032         hci_dev_lock(hdev);
1033         *val = hdev->le_adv_max_interval;
1034         hci_dev_unlock(hdev);
1035
1036         return 0;
1037 }
1038
1039 DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1040                         adv_max_interval_set, "%llu\n");
1041
1042 static int device_list_show(struct seq_file *f, void *ptr)
1043 {
1044         struct hci_dev *hdev = f->private;
1045         struct hci_conn_params *p;
1046         struct bdaddr_list *b;
1047
1048         hci_dev_lock(hdev);
1049         list_for_each_entry(b, &hdev->whitelist, list)
1050                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
1051         list_for_each_entry(p, &hdev->le_conn_params, list) {
1052                 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
1053                            p->auto_connect);
1054         }
1055         hci_dev_unlock(hdev);
1056
1057         return 0;
1058 }
1059
1060 static int device_list_open(struct inode *inode, struct file *file)
1061 {
1062         return single_open(file, device_list_show, inode->i_private);
1063 }
1064
1065 static const struct file_operations device_list_fops = {
1066         .open           = device_list_open,
1067         .read           = seq_read,
1068         .llseek         = seq_lseek,
1069         .release        = single_release,
1070 };
1071
1072 /* ---- HCI requests ---- */
1073
1074 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1075 {
1076         BT_DBG("%s result 0x%2.2x", hdev->name, result);
1077
1078         if (hdev->req_status == HCI_REQ_PEND) {
1079                 hdev->req_result = result;
1080                 hdev->req_status = HCI_REQ_DONE;
1081                 wake_up_interruptible(&hdev->req_wait_q);
1082         }
1083 }
1084
1085 static void hci_req_cancel(struct hci_dev *hdev, int err)
1086 {
1087         BT_DBG("%s err 0x%2.2x", hdev->name, err);
1088
1089         if (hdev->req_status == HCI_REQ_PEND) {
1090                 hdev->req_result = err;
1091                 hdev->req_status = HCI_REQ_CANCELED;
1092                 wake_up_interruptible(&hdev->req_wait_q);
1093         }
1094 }
1095
1096 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1097                                             u8 event)
1098 {
1099         struct hci_ev_cmd_complete *ev;
1100         struct hci_event_hdr *hdr;
1101         struct sk_buff *skb;
1102
1103         hci_dev_lock(hdev);
1104
1105         skb = hdev->recv_evt;
1106         hdev->recv_evt = NULL;
1107
1108         hci_dev_unlock(hdev);
1109
1110         if (!skb)
1111                 return ERR_PTR(-ENODATA);
1112
1113         if (skb->len < sizeof(*hdr)) {
1114                 BT_ERR("Too short HCI event");
1115                 goto failed;
1116         }
1117
1118         hdr = (void *) skb->data;
1119         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1120
1121         if (event) {
1122                 if (hdr->evt != event)
1123                         goto failed;
1124                 return skb;
1125         }
1126
1127         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1128                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1129                 goto failed;
1130         }
1131
1132         if (skb->len < sizeof(*ev)) {
1133                 BT_ERR("Too short cmd_complete event");
1134                 goto failed;
1135         }
1136
1137         ev = (void *) skb->data;
1138         skb_pull(skb, sizeof(*ev));
1139
1140         if (opcode == __le16_to_cpu(ev->opcode))
1141                 return skb;
1142
1143         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1144                __le16_to_cpu(ev->opcode));
1145
1146 failed:
1147         kfree_skb(skb);
1148         return ERR_PTR(-ENODATA);
1149 }
1150
1151 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1152                                   const void *param, u8 event, u32 timeout)
1153 {
1154         DECLARE_WAITQUEUE(wait, current);
1155         struct hci_request req;
1156         int err = 0;
1157
1158         BT_DBG("%s", hdev->name);
1159
1160         hci_req_init(&req, hdev);
1161
1162         hci_req_add_ev(&req, opcode, plen, param, event);
1163
1164         hdev->req_status = HCI_REQ_PEND;
1165
1166         add_wait_queue(&hdev->req_wait_q, &wait);
1167         set_current_state(TASK_INTERRUPTIBLE);
1168
1169         err = hci_req_run(&req, hci_req_sync_complete);
1170         if (err < 0) {
1171                 remove_wait_queue(&hdev->req_wait_q, &wait);
1172                 set_current_state(TASK_RUNNING);
1173                 return ERR_PTR(err);
1174         }
1175
1176         schedule_timeout(timeout);
1177
1178         remove_wait_queue(&hdev->req_wait_q, &wait);
1179
1180         if (signal_pending(current))
1181                 return ERR_PTR(-EINTR);
1182
1183         switch (hdev->req_status) {
1184         case HCI_REQ_DONE:
1185                 err = -bt_to_errno(hdev->req_result);
1186                 break;
1187
1188         case HCI_REQ_CANCELED:
1189                 err = -hdev->req_result;
1190                 break;
1191
1192         default:
1193                 err = -ETIMEDOUT;
1194                 break;
1195         }
1196
1197         hdev->req_status = hdev->req_result = 0;
1198
1199         BT_DBG("%s end: err %d", hdev->name, err);
1200
1201         if (err < 0)
1202                 return ERR_PTR(err);
1203
1204         return hci_get_cmd_complete(hdev, opcode, event);
1205 }
1206 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1207
1208 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1209                                const void *param, u32 timeout)
1210 {
1211         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1212 }
1213 EXPORT_SYMBOL(__hci_cmd_sync);
1214
1215 /* Execute request and wait for completion. */
1216 static int __hci_req_sync(struct hci_dev *hdev,
1217                           void (*func)(struct hci_request *req,
1218                                       unsigned long opt),
1219                           unsigned long opt, __u32 timeout)
1220 {
1221         struct hci_request req;
1222         DECLARE_WAITQUEUE(wait, current);
1223         int err = 0;
1224
1225         BT_DBG("%s start", hdev->name);
1226
1227         hci_req_init(&req, hdev);
1228
1229         hdev->req_status = HCI_REQ_PEND;
1230
1231         func(&req, opt);
1232
1233         add_wait_queue(&hdev->req_wait_q, &wait);
1234         set_current_state(TASK_INTERRUPTIBLE);
1235
1236         err = hci_req_run(&req, hci_req_sync_complete);
1237         if (err < 0) {
1238                 hdev->req_status = 0;
1239
1240                 remove_wait_queue(&hdev->req_wait_q, &wait);
1241                 set_current_state(TASK_RUNNING);
1242
1243                 /* ENODATA means the HCI request command queue is empty.
1244                  * This can happen when a request with conditionals doesn't
1245                  * trigger any commands to be sent. This is normal behavior
1246                  * and should not trigger an error return.
1247                  */
1248                 if (err == -ENODATA)
1249                         return 0;
1250
1251                 return err;
1252         }
1253
1254         schedule_timeout(timeout);
1255
1256         remove_wait_queue(&hdev->req_wait_q, &wait);
1257
1258         if (signal_pending(current))
1259                 return -EINTR;
1260
1261         switch (hdev->req_status) {
1262         case HCI_REQ_DONE:
1263                 err = -bt_to_errno(hdev->req_result);
1264                 break;
1265
1266         case HCI_REQ_CANCELED:
1267                 err = -hdev->req_result;
1268                 break;
1269
1270         default:
1271                 err = -ETIMEDOUT;
1272                 break;
1273         }
1274
1275         hdev->req_status = hdev->req_result = 0;
1276
1277         BT_DBG("%s end: err %d", hdev->name, err);
1278
1279         return err;
1280 }
1281
1282 static int hci_req_sync(struct hci_dev *hdev,
1283                         void (*req)(struct hci_request *req,
1284                                     unsigned long opt),
1285                         unsigned long opt, __u32 timeout)
1286 {
1287         int ret;
1288
1289         if (!test_bit(HCI_UP, &hdev->flags))
1290                 return -ENETDOWN;
1291
1292         /* Serialize all requests */
1293         hci_req_lock(hdev);
1294         ret = __hci_req_sync(hdev, req, opt, timeout);
1295         hci_req_unlock(hdev);
1296
1297         return ret;
1298 }
1299
1300 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1301 {
1302         BT_DBG("%s %ld", req->hdev->name, opt);
1303
1304         /* Reset device */
1305         set_bit(HCI_RESET, &req->hdev->flags);
1306         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1307 }
1308
1309 static void bredr_init(struct hci_request *req)
1310 {
1311         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1312
1313         /* Read Local Supported Features */
1314         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1315
1316         /* Read Local Version */
1317         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1318
1319         /* Read BD Address */
1320         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1321 }
1322
1323 static void amp_init(struct hci_request *req)
1324 {
1325         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1326
1327         /* Read Local Version */
1328         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1329
1330         /* Read Local Supported Commands */
1331         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1332
1333         /* Read Local Supported Features */
1334         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1335
1336         /* Read Local AMP Info */
1337         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1338
1339         /* Read Data Blk size */
1340         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1341
1342         /* Read Flow Control Mode */
1343         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1344
1345         /* Read Location Data */
1346         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1347 }
1348
1349 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1350 {
1351         struct hci_dev *hdev = req->hdev;
1352
1353         BT_DBG("%s %ld", hdev->name, opt);
1354
1355         /* Reset */
1356         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1357                 hci_reset_req(req, 0);
1358
1359         switch (hdev->dev_type) {
1360         case HCI_BREDR:
1361                 bredr_init(req);
1362                 break;
1363
1364         case HCI_AMP:
1365                 amp_init(req);
1366                 break;
1367
1368         default:
1369                 BT_ERR("Unknown device type %d", hdev->dev_type);
1370                 break;
1371         }
1372 }
1373
1374 static void bredr_setup(struct hci_request *req)
1375 {
1376         struct hci_dev *hdev = req->hdev;
1377
1378         __le16 param;
1379         __u8 flt_type;
1380
1381         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1382         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1383
1384         /* Read Class of Device */
1385         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1386
1387         /* Read Local Name */
1388         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1389
1390         /* Read Voice Setting */
1391         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1392
1393         /* Read Number of Supported IAC */
1394         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1395
1396         /* Read Current IAC LAP */
1397         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1398
1399         /* Clear Event Filters */
1400         flt_type = HCI_FLT_CLEAR_ALL;
1401         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1402
1403         /* Connection accept timeout ~20 secs */
1404         param = cpu_to_le16(0x7d00);
1405         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1406
1407         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1408          * but it does not support page scan related HCI commands.
1409          */
1410         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1411                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1412                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1413         }
1414 }
1415
1416 static void le_setup(struct hci_request *req)
1417 {
1418         struct hci_dev *hdev = req->hdev;
1419
1420         /* Read LE Buffer Size */
1421         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1422
1423         /* Read LE Local Supported Features */
1424         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1425
1426         /* Read LE Supported States */
1427         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1428
1429         /* Read LE White List Size */
1430         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1431
1432         /* Clear LE White List */
1433         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1434
1435         /* LE-only controllers have LE implicitly enabled */
1436         if (!lmp_bredr_capable(hdev))
1437                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1438 }
1439
1440 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1441 {
1442         if (lmp_ext_inq_capable(hdev))
1443                 return 0x02;
1444
1445         if (lmp_inq_rssi_capable(hdev))
1446                 return 0x01;
1447
1448         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1449             hdev->lmp_subver == 0x0757)
1450                 return 0x01;
1451
1452         if (hdev->manufacturer == 15) {
1453                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1454                         return 0x01;
1455                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1456                         return 0x01;
1457                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1458                         return 0x01;
1459         }
1460
1461         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1462             hdev->lmp_subver == 0x1805)
1463                 return 0x01;
1464
1465         return 0x00;
1466 }
1467
1468 static void hci_setup_inquiry_mode(struct hci_request *req)
1469 {
1470         u8 mode;
1471
1472         mode = hci_get_inquiry_mode(req->hdev);
1473
1474         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1475 }
1476
1477 static void hci_setup_event_mask(struct hci_request *req)
1478 {
1479         struct hci_dev *hdev = req->hdev;
1480
1481         /* The second byte is 0xff instead of 0x9f (two reserved bits
1482          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1483          * command otherwise.
1484          */
1485         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1486
1487         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1488          * any event mask for pre 1.2 devices.
1489          */
1490         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1491                 return;
1492
1493         if (lmp_bredr_capable(hdev)) {
1494                 events[4] |= 0x01; /* Flow Specification Complete */
1495                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1496                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1497                 events[5] |= 0x08; /* Synchronous Connection Complete */
1498                 events[5] |= 0x10; /* Synchronous Connection Changed */
1499         } else {
1500                 /* Use a different default for LE-only devices */
1501                 memset(events, 0, sizeof(events));
1502                 events[0] |= 0x10; /* Disconnection Complete */
1503                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1504                 events[1] |= 0x20; /* Command Complete */
1505                 events[1] |= 0x40; /* Command Status */
1506                 events[1] |= 0x80; /* Hardware Error */
1507                 events[2] |= 0x04; /* Number of Completed Packets */
1508                 events[3] |= 0x02; /* Data Buffer Overflow */
1509
1510                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1511                         events[0] |= 0x80; /* Encryption Change */
1512                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
1513                 }
1514         }
1515
1516         if (lmp_inq_rssi_capable(hdev))
1517                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1518
1519         if (lmp_sniffsubr_capable(hdev))
1520                 events[5] |= 0x20; /* Sniff Subrating */
1521
1522         if (lmp_pause_enc_capable(hdev))
1523                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1524
1525         if (lmp_ext_inq_capable(hdev))
1526                 events[5] |= 0x40; /* Extended Inquiry Result */
1527
1528         if (lmp_no_flush_capable(hdev))
1529                 events[7] |= 0x01; /* Enhanced Flush Complete */
1530
1531         if (lmp_lsto_capable(hdev))
1532                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1533
1534         if (lmp_ssp_capable(hdev)) {
1535                 events[6] |= 0x01;      /* IO Capability Request */
1536                 events[6] |= 0x02;      /* IO Capability Response */
1537                 events[6] |= 0x04;      /* User Confirmation Request */
1538                 events[6] |= 0x08;      /* User Passkey Request */
1539                 events[6] |= 0x10;      /* Remote OOB Data Request */
1540                 events[6] |= 0x20;      /* Simple Pairing Complete */
1541                 events[7] |= 0x04;      /* User Passkey Notification */
1542                 events[7] |= 0x08;      /* Keypress Notification */
1543                 events[7] |= 0x10;      /* Remote Host Supported
1544                                          * Features Notification
1545                                          */
1546         }
1547
1548         if (lmp_le_capable(hdev))
1549                 events[7] |= 0x20;      /* LE Meta-Event */
1550
1551         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1552 }
1553
1554 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1555 {
1556         struct hci_dev *hdev = req->hdev;
1557
1558         if (lmp_bredr_capable(hdev))
1559                 bredr_setup(req);
1560         else
1561                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1562
1563         if (lmp_le_capable(hdev))
1564                 le_setup(req);
1565
1566         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1567          * local supported commands HCI command.
1568          */
1569         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1570                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1571
1572         if (lmp_ssp_capable(hdev)) {
1573                 /* When SSP is available, then the host features page
1574                  * should also be available as well. However some
1575                  * controllers list the max_page as 0 as long as SSP
1576                  * has not been enabled. To achieve proper debugging
1577                  * output, force the minimum max_page to 1 at least.
1578                  */
1579                 hdev->max_page = 0x01;
1580
1581                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1582                         u8 mode = 0x01;
1583                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1584                                     sizeof(mode), &mode);
1585                 } else {
1586                         struct hci_cp_write_eir cp;
1587
1588                         memset(hdev->eir, 0, sizeof(hdev->eir));
1589                         memset(&cp, 0, sizeof(cp));
1590
1591                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1592                 }
1593         }
1594
1595         if (lmp_inq_rssi_capable(hdev))
1596                 hci_setup_inquiry_mode(req);
1597
1598         if (lmp_inq_tx_pwr_capable(hdev))
1599                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1600
1601         if (lmp_ext_feat_capable(hdev)) {
1602                 struct hci_cp_read_local_ext_features cp;
1603
1604                 cp.page = 0x01;
1605                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1606                             sizeof(cp), &cp);
1607         }
1608
1609         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1610                 u8 enable = 1;
1611                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1612                             &enable);
1613         }
1614 }
1615
1616 static void hci_setup_link_policy(struct hci_request *req)
1617 {
1618         struct hci_dev *hdev = req->hdev;
1619         struct hci_cp_write_def_link_policy cp;
1620         u16 link_policy = 0;
1621
1622         if (lmp_rswitch_capable(hdev))
1623                 link_policy |= HCI_LP_RSWITCH;
1624         if (lmp_hold_capable(hdev))
1625                 link_policy |= HCI_LP_HOLD;
1626         if (lmp_sniff_capable(hdev))
1627                 link_policy |= HCI_LP_SNIFF;
1628         if (lmp_park_capable(hdev))
1629                 link_policy |= HCI_LP_PARK;
1630
1631         cp.policy = cpu_to_le16(link_policy);
1632         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1633 }
1634
1635 static void hci_set_le_support(struct hci_request *req)
1636 {
1637         struct hci_dev *hdev = req->hdev;
1638         struct hci_cp_write_le_host_supported cp;
1639
1640         /* LE-only devices do not support explicit enablement */
1641         if (!lmp_bredr_capable(hdev))
1642                 return;
1643
1644         memset(&cp, 0, sizeof(cp));
1645
1646         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1647                 cp.le = 0x01;
1648                 cp.simul = 0x00;
1649         }
1650
1651         if (cp.le != lmp_host_le_capable(hdev))
1652                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1653                             &cp);
1654 }
1655
1656 static void hci_set_event_mask_page_2(struct hci_request *req)
1657 {
1658         struct hci_dev *hdev = req->hdev;
1659         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1660
1661         /* If Connectionless Slave Broadcast master role is supported
1662          * enable all necessary events for it.
1663          */
1664         if (lmp_csb_master_capable(hdev)) {
1665                 events[1] |= 0x40;      /* Triggered Clock Capture */
1666                 events[1] |= 0x80;      /* Synchronization Train Complete */
1667                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1668                 events[2] |= 0x20;      /* CSB Channel Map Change */
1669         }
1670
1671         /* If Connectionless Slave Broadcast slave role is supported
1672          * enable all necessary events for it.
1673          */
1674         if (lmp_csb_slave_capable(hdev)) {
1675                 events[2] |= 0x01;      /* Synchronization Train Received */
1676                 events[2] |= 0x02;      /* CSB Receive */
1677                 events[2] |= 0x04;      /* CSB Timeout */
1678                 events[2] |= 0x08;      /* Truncated Page Complete */
1679         }
1680
1681         /* Enable Authenticated Payload Timeout Expired event if supported */
1682         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1683                 events[2] |= 0x80;
1684
1685         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1686 }
1687
1688 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1689 {
1690         struct hci_dev *hdev = req->hdev;
1691         u8 p;
1692
1693         hci_setup_event_mask(req);
1694
1695         /* Some Broadcom based Bluetooth controllers do not support the
1696          * Delete Stored Link Key command. They are clearly indicating its
1697          * absence in the bit mask of supported commands.
1698          *
1699          * Check the supported commands and only if the the command is marked
1700          * as supported send it. If not supported assume that the controller
1701          * does not have actual support for stored link keys which makes this
1702          * command redundant anyway.
1703          *
1704          * Some controllers indicate that they support handling deleting
1705          * stored link keys, but they don't. The quirk lets a driver
1706          * just disable this command.
1707          */
1708         if (hdev->commands[6] & 0x80 &&
1709             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1710                 struct hci_cp_delete_stored_link_key cp;
1711
1712                 bacpy(&cp.bdaddr, BDADDR_ANY);
1713                 cp.delete_all = 0x01;
1714                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1715                             sizeof(cp), &cp);
1716         }
1717
1718         if (hdev->commands[5] & 0x10)
1719                 hci_setup_link_policy(req);
1720
1721         if (lmp_le_capable(hdev)) {
1722                 u8 events[8];
1723
1724                 memset(events, 0, sizeof(events));
1725                 events[0] = 0x0f;
1726
1727                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1728                         events[0] |= 0x10;      /* LE Long Term Key Request */
1729
1730                 /* If controller supports the Connection Parameters Request
1731                  * Link Layer Procedure, enable the corresponding event.
1732                  */
1733                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1734                         events[0] |= 0x20;      /* LE Remote Connection
1735                                                  * Parameter Request
1736                                                  */
1737
1738                 /* If the controller supports Extended Scanner Filter
1739                  * Policies, enable the correspondig event.
1740                  */
1741                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
1742                         events[1] |= 0x04;      /* LE Direct Advertising
1743                                                  * Report
1744                                                  */
1745
1746                 /* If the controller supports the LE Read Local P-256
1747                  * Public Key command, enable the corresponding event.
1748                  */
1749                 if (hdev->commands[34] & 0x02)
1750                         events[0] |= 0x80;      /* LE Read Local P-256
1751                                                  * Public Key Complete
1752                                                  */
1753
1754                 /* If the controller supports the LE Generate DHKey
1755                  * command, enable the corresponding event.
1756                  */
1757                 if (hdev->commands[34] & 0x04)
1758                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
1759
1760                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1761                             events);
1762
1763                 if (hdev->commands[25] & 0x40) {
1764                         /* Read LE Advertising Channel TX Power */
1765                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1766                 }
1767
1768                 hci_set_le_support(req);
1769         }
1770
1771         /* Read features beyond page 1 if available */
1772         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1773                 struct hci_cp_read_local_ext_features cp;
1774
1775                 cp.page = p;
1776                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1777                             sizeof(cp), &cp);
1778         }
1779 }
1780
1781 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1782 {
1783         struct hci_dev *hdev = req->hdev;
1784
1785         /* Set event mask page 2 if the HCI command for it is supported */
1786         if (hdev->commands[22] & 0x04)
1787                 hci_set_event_mask_page_2(req);
1788
1789         /* Read local codec list if the HCI command is supported */
1790         if (hdev->commands[29] & 0x20)
1791                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1792
1793         /* Get MWS transport configuration if the HCI command is supported */
1794         if (hdev->commands[30] & 0x08)
1795                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1796
1797         /* Check for Synchronization Train support */
1798         if (lmp_sync_train_capable(hdev))
1799                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1800
1801         /* Enable Secure Connections if supported and configured */
1802         if (bredr_sc_enabled(hdev)) {
1803                 u8 support = 0x01;
1804                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1805                             sizeof(support), &support);
1806         }
1807 }
1808
1809 static int __hci_init(struct hci_dev *hdev)
1810 {
1811         int err;
1812
1813         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1814         if (err < 0)
1815                 return err;
1816
1817         /* The Device Under Test (DUT) mode is special and available for
1818          * all controller types. So just create it early on.
1819          */
1820         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1821                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1822                                     &dut_mode_fops);
1823         }
1824
1825         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1826          * BR/EDR/LE type controllers. AMP controllers only need the
1827          * first stage init.
1828          */
1829         if (hdev->dev_type != HCI_BREDR)
1830                 return 0;
1831
1832         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1833         if (err < 0)
1834                 return err;
1835
1836         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1837         if (err < 0)
1838                 return err;
1839
1840         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1841         if (err < 0)
1842                 return err;
1843
1844         /* Only create debugfs entries during the initial setup
1845          * phase and not every time the controller gets powered on.
1846          */
1847         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1848                 return 0;
1849
1850         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1851                             &features_fops);
1852         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1853                            &hdev->manufacturer);
1854         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1855         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1856         debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1857                             &device_list_fops);
1858         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1859                             &blacklist_fops);
1860         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1861
1862         debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1863                             &conn_info_min_age_fops);
1864         debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1865                             &conn_info_max_age_fops);
1866
1867         if (lmp_bredr_capable(hdev)) {
1868                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1869                                     hdev, &inquiry_cache_fops);
1870                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1871                                     hdev, &link_keys_fops);
1872                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1873                                     hdev, &dev_class_fops);
1874                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1875                                     hdev, &voice_setting_fops);
1876         }
1877
1878         if (lmp_ssp_capable(hdev)) {
1879                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1880                                     hdev, &auto_accept_delay_fops);
1881                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1882                                     hdev, &force_sc_support_fops);
1883                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1884                                     hdev, &sc_only_mode_fops);
1885                 if (lmp_le_capable(hdev))
1886                         debugfs_create_file("force_lesc_support", 0644,
1887                                             hdev->debugfs, hdev,
1888                                             &force_lesc_support_fops);
1889         }
1890
1891         if (lmp_sniff_capable(hdev)) {
1892                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1893                                     hdev, &idle_timeout_fops);
1894                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1895                                     hdev, &sniff_min_interval_fops);
1896                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1897                                     hdev, &sniff_max_interval_fops);
1898         }
1899
1900         if (lmp_le_capable(hdev)) {
1901                 debugfs_create_file("identity", 0400, hdev->debugfs,
1902                                     hdev, &identity_fops);
1903                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1904                                     hdev, &rpa_timeout_fops);
1905                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1906                                     hdev, &random_address_fops);
1907                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1908                                     hdev, &static_address_fops);
1909
1910                 /* For controllers with a public address, provide a debug
1911                  * option to force the usage of the configured static
1912                  * address. By default the public address is used.
1913                  */
1914                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1915                         debugfs_create_file("force_static_address", 0644,
1916                                             hdev->debugfs, hdev,
1917                                             &force_static_address_fops);
1918
1919                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1920                                   &hdev->le_white_list_size);
1921                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1922                                     &white_list_fops);
1923                 debugfs_create_file("identity_resolving_keys", 0400,
1924                                     hdev->debugfs, hdev,
1925                                     &identity_resolving_keys_fops);
1926                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1927                                     hdev, &long_term_keys_fops);
1928                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1929                                     hdev, &conn_min_interval_fops);
1930                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1931                                     hdev, &conn_max_interval_fops);
1932                 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1933                                     hdev, &conn_latency_fops);
1934                 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1935                                     hdev, &supervision_timeout_fops);
1936                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1937                                     hdev, &adv_channel_map_fops);
1938                 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1939                                     hdev, &adv_min_interval_fops);
1940                 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1941                                     hdev, &adv_max_interval_fops);
1942                 debugfs_create_u16("discov_interleaved_timeout", 0644,
1943                                    hdev->debugfs,
1944                                    &hdev->discov_interleaved_timeout);
1945
1946                 smp_register(hdev);
1947         }
1948
1949         return 0;
1950 }
1951
1952 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1953 {
1954         struct hci_dev *hdev = req->hdev;
1955
1956         BT_DBG("%s %ld", hdev->name, opt);
1957
1958         /* Reset */
1959         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1960                 hci_reset_req(req, 0);
1961
1962         /* Read Local Version */
1963         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1964
1965         /* Read BD Address */
1966         if (hdev->set_bdaddr)
1967                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1968 }
1969
1970 static int __hci_unconf_init(struct hci_dev *hdev)
1971 {
1972         int err;
1973
1974         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1975                 return 0;
1976
1977         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1978         if (err < 0)
1979                 return err;
1980
1981         return 0;
1982 }
1983
1984 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1985 {
1986         __u8 scan = opt;
1987
1988         BT_DBG("%s %x", req->hdev->name, scan);
1989
1990         /* Inquiry and Page scans */
1991         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1992 }
1993
1994 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1995 {
1996         __u8 auth = opt;
1997
1998         BT_DBG("%s %x", req->hdev->name, auth);
1999
2000         /* Authentication */
2001         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
2002 }
2003
2004 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
2005 {
2006         __u8 encrypt = opt;
2007
2008         BT_DBG("%s %x", req->hdev->name, encrypt);
2009
2010         /* Encryption */
2011         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
2012 }
2013
2014 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
2015 {
2016         __le16 policy = cpu_to_le16(opt);
2017
2018         BT_DBG("%s %x", req->hdev->name, policy);
2019
2020         /* Default link policy */
2021         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
2022 }
2023
2024 /* Get HCI device by index.
2025  * Device is held on return. */
2026 struct hci_dev *hci_dev_get(int index)
2027 {
2028         struct hci_dev *hdev = NULL, *d;
2029
2030         BT_DBG("%d", index);
2031
2032         if (index < 0)
2033                 return NULL;
2034
2035         read_lock(&hci_dev_list_lock);
2036         list_for_each_entry(d, &hci_dev_list, list) {
2037                 if (d->id == index) {
2038                         hdev = hci_dev_hold(d);
2039                         break;
2040                 }
2041         }
2042         read_unlock(&hci_dev_list_lock);
2043         return hdev;
2044 }
2045
2046 /* ---- Inquiry support ---- */
2047
2048 bool hci_discovery_active(struct hci_dev *hdev)
2049 {
2050         struct discovery_state *discov = &hdev->discovery;
2051
2052         switch (discov->state) {
2053         case DISCOVERY_FINDING:
2054         case DISCOVERY_RESOLVING:
2055                 return true;
2056
2057         default:
2058                 return false;
2059         }
2060 }
2061
2062 void hci_discovery_set_state(struct hci_dev *hdev, int state)
2063 {
2064         int old_state = hdev->discovery.state;
2065
2066         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2067
2068         if (old_state == state)
2069                 return;
2070
2071         hdev->discovery.state = state;
2072
2073         switch (state) {
2074         case DISCOVERY_STOPPED:
2075                 hci_update_background_scan(hdev);
2076
2077                 if (old_state != DISCOVERY_STARTING)
2078                         mgmt_discovering(hdev, 0);
2079                 break;
2080         case DISCOVERY_STARTING:
2081                 break;
2082         case DISCOVERY_FINDING:
2083                 mgmt_discovering(hdev, 1);
2084                 break;
2085         case DISCOVERY_RESOLVING:
2086                 break;
2087         case DISCOVERY_STOPPING:
2088                 break;
2089         }
2090 }
2091
2092 void hci_inquiry_cache_flush(struct hci_dev *hdev)
2093 {
2094         struct discovery_state *cache = &hdev->discovery;
2095         struct inquiry_entry *p, *n;
2096
2097         list_for_each_entry_safe(p, n, &cache->all, all) {
2098                 list_del(&p->all);
2099                 kfree(p);
2100         }
2101
2102         INIT_LIST_HEAD(&cache->unknown);
2103         INIT_LIST_HEAD(&cache->resolve);
2104 }
2105
2106 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2107                                                bdaddr_t *bdaddr)
2108 {
2109         struct discovery_state *cache = &hdev->discovery;
2110         struct inquiry_entry *e;
2111
2112         BT_DBG("cache %p, %pMR", cache, bdaddr);
2113
2114         list_for_each_entry(e, &cache->all, all) {
2115                 if (!bacmp(&e->data.bdaddr, bdaddr))
2116                         return e;
2117         }
2118
2119         return NULL;
2120 }
2121
2122 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2123                                                        bdaddr_t *bdaddr)
2124 {
2125         struct discovery_state *cache = &hdev->discovery;
2126         struct inquiry_entry *e;
2127
2128         BT_DBG("cache %p, %pMR", cache, bdaddr);
2129
2130         list_for_each_entry(e, &cache->unknown, list) {
2131                 if (!bacmp(&e->data.bdaddr, bdaddr))
2132                         return e;
2133         }
2134
2135         return NULL;
2136 }
2137
2138 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2139                                                        bdaddr_t *bdaddr,
2140                                                        int state)
2141 {
2142         struct discovery_state *cache = &hdev->discovery;
2143         struct inquiry_entry *e;
2144
2145         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2146
2147         list_for_each_entry(e, &cache->resolve, list) {
2148                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2149                         return e;
2150                 if (!bacmp(&e->data.bdaddr, bdaddr))
2151                         return e;
2152         }
2153
2154         return NULL;
2155 }
2156
2157 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2158                                       struct inquiry_entry *ie)
2159 {
2160         struct discovery_state *cache = &hdev->discovery;
2161         struct list_head *pos = &cache->resolve;
2162         struct inquiry_entry *p;
2163
2164         list_del(&ie->list);
2165
2166         list_for_each_entry(p, &cache->resolve, list) {
2167                 if (p->name_state != NAME_PENDING &&
2168                     abs(p->data.rssi) >= abs(ie->data.rssi))
2169                         break;
2170                 pos = &p->list;
2171         }
2172
2173         list_add(&ie->list, pos);
2174 }
2175
2176 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2177                              bool name_known)
2178 {
2179         struct discovery_state *cache = &hdev->discovery;
2180         struct inquiry_entry *ie;
2181         u32 flags = 0;
2182
2183         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2184
2185         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2186
2187         if (!data->ssp_mode)
2188                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2189
2190         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2191         if (ie) {
2192                 if (!ie->data.ssp_mode)
2193                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2194
2195                 if (ie->name_state == NAME_NEEDED &&
2196                     data->rssi != ie->data.rssi) {
2197                         ie->data.rssi = data->rssi;
2198                         hci_inquiry_cache_update_resolve(hdev, ie);
2199                 }
2200
2201                 goto update;
2202         }
2203
2204         /* Entry not in the cache. Add new one. */
2205         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
2206         if (!ie) {
2207                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2208                 goto done;
2209         }
2210
2211         list_add(&ie->all, &cache->all);
2212
2213         if (name_known) {
2214                 ie->name_state = NAME_KNOWN;
2215         } else {
2216                 ie->name_state = NAME_NOT_KNOWN;
2217                 list_add(&ie->list, &cache->unknown);
2218         }
2219
2220 update:
2221         if (name_known && ie->name_state != NAME_KNOWN &&
2222             ie->name_state != NAME_PENDING) {
2223                 ie->name_state = NAME_KNOWN;
2224                 list_del(&ie->list);
2225         }
2226
2227         memcpy(&ie->data, data, sizeof(*data));
2228         ie->timestamp = jiffies;
2229         cache->timestamp = jiffies;
2230
2231         if (ie->name_state == NAME_NOT_KNOWN)
2232                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2233
2234 done:
2235         return flags;
2236 }
2237
2238 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2239 {
2240         struct discovery_state *cache = &hdev->discovery;
2241         struct inquiry_info *info = (struct inquiry_info *) buf;
2242         struct inquiry_entry *e;
2243         int copied = 0;
2244
2245         list_for_each_entry(e, &cache->all, all) {
2246                 struct inquiry_data *data = &e->data;
2247
2248                 if (copied >= num)
2249                         break;
2250
2251                 bacpy(&info->bdaddr, &data->bdaddr);
2252                 info->pscan_rep_mode    = data->pscan_rep_mode;
2253                 info->pscan_period_mode = data->pscan_period_mode;
2254                 info->pscan_mode        = data->pscan_mode;
2255                 memcpy(info->dev_class, data->dev_class, 3);
2256                 info->clock_offset      = data->clock_offset;
2257
2258                 info++;
2259                 copied++;
2260         }
2261
2262         BT_DBG("cache %p, copied %d", cache, copied);
2263         return copied;
2264 }
2265
2266 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2267 {
2268         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2269         struct hci_dev *hdev = req->hdev;
2270         struct hci_cp_inquiry cp;
2271
2272         BT_DBG("%s", hdev->name);
2273
2274         if (test_bit(HCI_INQUIRY, &hdev->flags))
2275                 return;
2276
2277         /* Start Inquiry */
2278         memcpy(&cp.lap, &ir->lap, 3);
2279         cp.length  = ir->length;
2280         cp.num_rsp = ir->num_rsp;
2281         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2282 }
2283
2284 int hci_inquiry(void __user *arg)
2285 {
2286         __u8 __user *ptr = arg;
2287         struct hci_inquiry_req ir;
2288         struct hci_dev *hdev;
2289         int err = 0, do_inquiry = 0, max_rsp;
2290         long timeo;
2291         __u8 *buf;
2292
2293         if (copy_from_user(&ir, ptr, sizeof(ir)))
2294                 return -EFAULT;
2295
2296         hdev = hci_dev_get(ir.dev_id);
2297         if (!hdev)
2298                 return -ENODEV;
2299
2300         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2301                 err = -EBUSY;
2302                 goto done;
2303         }
2304
2305         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2306                 err = -EOPNOTSUPP;
2307                 goto done;
2308         }
2309
2310         if (hdev->dev_type != HCI_BREDR) {
2311                 err = -EOPNOTSUPP;
2312                 goto done;
2313         }
2314
2315         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2316                 err = -EOPNOTSUPP;
2317                 goto done;
2318         }
2319
2320         hci_dev_lock(hdev);
2321         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2322             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2323                 hci_inquiry_cache_flush(hdev);
2324                 do_inquiry = 1;
2325         }
2326         hci_dev_unlock(hdev);
2327
2328         timeo = ir.length * msecs_to_jiffies(2000);
2329
2330         if (do_inquiry) {
2331                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2332                                    timeo);
2333                 if (err < 0)
2334                         goto done;
2335
2336                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2337                  * cleared). If it is interrupted by a signal, return -EINTR.
2338                  */
2339                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
2340                                 TASK_INTERRUPTIBLE))
2341                         return -EINTR;
2342         }
2343
2344         /* for unlimited number of responses we will use buffer with
2345          * 255 entries
2346          */
2347         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2348
2349         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2350          * copy it to the user space.
2351          */
2352         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2353         if (!buf) {
2354                 err = -ENOMEM;
2355                 goto done;
2356         }
2357
2358         hci_dev_lock(hdev);
2359         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2360         hci_dev_unlock(hdev);
2361
2362         BT_DBG("num_rsp %d", ir.num_rsp);
2363
2364         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2365                 ptr += sizeof(ir);
2366                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2367                                  ir.num_rsp))
2368                         err = -EFAULT;
2369         } else
2370                 err = -EFAULT;
2371
2372         kfree(buf);
2373
2374 done:
2375         hci_dev_put(hdev);
2376         return err;
2377 }
2378
2379 static int hci_dev_do_open(struct hci_dev *hdev)
2380 {
2381         int ret = 0;
2382
2383         BT_DBG("%s %p", hdev->name, hdev);
2384
2385         hci_req_lock(hdev);
2386
2387         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2388                 ret = -ENODEV;
2389                 goto done;
2390         }
2391
2392         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2393             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2394                 /* Check for rfkill but allow the HCI setup stage to
2395                  * proceed (which in itself doesn't cause any RF activity).
2396                  */
2397                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2398                         ret = -ERFKILL;
2399                         goto done;
2400                 }
2401
2402                 /* Check for valid public address or a configured static
2403                  * random adddress, but let the HCI setup proceed to
2404                  * be able to determine if there is a public address
2405                  * or not.
2406                  *
2407                  * In case of user channel usage, it is not important
2408                  * if a public address or static random address is
2409                  * available.
2410                  *
2411                  * This check is only valid for BR/EDR controllers
2412                  * since AMP controllers do not have an address.
2413                  */
2414                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2415                     hdev->dev_type == HCI_BREDR &&
2416                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2417                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2418                         ret = -EADDRNOTAVAIL;
2419                         goto done;
2420                 }
2421         }
2422
2423         if (test_bit(HCI_UP, &hdev->flags)) {
2424                 ret = -EALREADY;
2425                 goto done;
2426         }
2427
2428         if (hdev->open(hdev)) {
2429                 ret = -EIO;
2430                 goto done;
2431         }
2432
2433         atomic_set(&hdev->cmd_cnt, 1);
2434         set_bit(HCI_INIT, &hdev->flags);
2435
2436         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2437                 if (hdev->setup)
2438                         ret = hdev->setup(hdev);
2439
2440                 /* The transport driver can set these quirks before
2441                  * creating the HCI device or in its setup callback.
2442                  *
2443                  * In case any of them is set, the controller has to
2444                  * start up as unconfigured.
2445                  */
2446                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2447                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2448                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2449
2450                 /* For an unconfigured controller it is required to
2451                  * read at least the version information provided by
2452                  * the Read Local Version Information command.
2453                  *
2454                  * If the set_bdaddr driver callback is provided, then
2455                  * also the original Bluetooth public device address
2456                  * will be read using the Read BD Address command.
2457                  */
2458                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2459                         ret = __hci_unconf_init(hdev);
2460         }
2461
2462         if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2463                 /* If public address change is configured, ensure that
2464                  * the address gets programmed. If the driver does not
2465                  * support changing the public address, fail the power
2466                  * on procedure.
2467                  */
2468                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2469                     hdev->set_bdaddr)
2470                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2471                 else
2472                         ret = -EADDRNOTAVAIL;
2473         }
2474
2475         if (!ret) {
2476                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2477                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2478                         ret = __hci_init(hdev);
2479         }
2480
2481         clear_bit(HCI_INIT, &hdev->flags);
2482
2483         if (!ret) {
2484                 hci_dev_hold(hdev);
2485                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2486                 set_bit(HCI_UP, &hdev->flags);
2487                 hci_notify(hdev, HCI_DEV_UP);
2488                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2489                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2490                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2491                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2492                     hdev->dev_type == HCI_BREDR) {
2493                         hci_dev_lock(hdev);
2494                         mgmt_powered(hdev, 1);
2495                         hci_dev_unlock(hdev);
2496                 }
2497         } else {
2498                 /* Init failed, cleanup */
2499                 flush_work(&hdev->tx_work);
2500                 flush_work(&hdev->cmd_work);
2501                 flush_work(&hdev->rx_work);
2502
2503                 skb_queue_purge(&hdev->cmd_q);
2504                 skb_queue_purge(&hdev->rx_q);
2505
2506                 if (hdev->flush)
2507                         hdev->flush(hdev);
2508
2509                 if (hdev->sent_cmd) {
2510                         kfree_skb(hdev->sent_cmd);
2511                         hdev->sent_cmd = NULL;
2512                 }
2513
2514                 hdev->close(hdev);
2515                 hdev->flags &= BIT(HCI_RAW);
2516         }
2517
2518 done:
2519         hci_req_unlock(hdev);
2520         return ret;
2521 }
2522
2523 /* ---- HCI ioctl helpers ---- */
2524
2525 int hci_dev_open(__u16 dev)
2526 {
2527         struct hci_dev *hdev;
2528         int err;
2529
2530         hdev = hci_dev_get(dev);
2531         if (!hdev)
2532                 return -ENODEV;
2533
2534         /* Devices that are marked as unconfigured can only be powered
2535          * up as user channel. Trying to bring them up as normal devices
2536          * will result into a failure. Only user channel operation is
2537          * possible.
2538          *
2539          * When this function is called for a user channel, the flag
2540          * HCI_USER_CHANNEL will be set first before attempting to
2541          * open the device.
2542          */
2543         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2544             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2545                 err = -EOPNOTSUPP;
2546                 goto done;
2547         }
2548
2549         /* We need to ensure that no other power on/off work is pending
2550          * before proceeding to call hci_dev_do_open. This is
2551          * particularly important if the setup procedure has not yet
2552          * completed.
2553          */
2554         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2555                 cancel_delayed_work(&hdev->power_off);
2556
2557         /* After this call it is guaranteed that the setup procedure
2558          * has finished. This means that error conditions like RFKILL
2559          * or no valid public or static random address apply.
2560          */
2561         flush_workqueue(hdev->req_workqueue);
2562
2563         /* For controllers not using the management interface and that
2564          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
2565          * so that pairing works for them. Once the management interface
2566          * is in use this bit will be cleared again and userspace has
2567          * to explicitly enable it.
2568          */
2569         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2570             !test_bit(HCI_MGMT, &hdev->dev_flags))
2571                 set_bit(HCI_BONDABLE, &hdev->dev_flags);
2572
2573         err = hci_dev_do_open(hdev);
2574
2575 done:
2576         hci_dev_put(hdev);
2577         return err;
2578 }
2579
2580 /* This function requires the caller holds hdev->lock */
2581 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2582 {
2583         struct hci_conn_params *p;
2584
2585         list_for_each_entry(p, &hdev->le_conn_params, list) {
2586                 if (p->conn) {
2587                         hci_conn_drop(p->conn);
2588                         hci_conn_put(p->conn);
2589                         p->conn = NULL;
2590                 }
2591                 list_del_init(&p->action);
2592         }
2593
2594         BT_DBG("All LE pending actions cleared");
2595 }
2596
2597 static int hci_dev_do_close(struct hci_dev *hdev)
2598 {
2599         BT_DBG("%s %p", hdev->name, hdev);
2600
2601         cancel_delayed_work(&hdev->power_off);
2602
2603         hci_req_cancel(hdev, ENODEV);
2604         hci_req_lock(hdev);
2605
2606         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2607                 cancel_delayed_work_sync(&hdev->cmd_timer);
2608                 hci_req_unlock(hdev);
2609                 return 0;
2610         }
2611
2612         /* Flush RX and TX works */
2613         flush_work(&hdev->tx_work);
2614         flush_work(&hdev->rx_work);
2615
2616         if (hdev->discov_timeout > 0) {
2617                 cancel_delayed_work(&hdev->discov_off);
2618                 hdev->discov_timeout = 0;
2619                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2620                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2621         }
2622
2623         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2624                 cancel_delayed_work(&hdev->service_cache);
2625
2626         cancel_delayed_work_sync(&hdev->le_scan_disable);
2627
2628         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2629                 cancel_delayed_work_sync(&hdev->rpa_expired);
2630
2631         /* Avoid potential lockdep warnings from the *_flush() calls by
2632          * ensuring the workqueue is empty up front.
2633          */
2634         drain_workqueue(hdev->workqueue);
2635
2636         hci_dev_lock(hdev);
2637         hci_inquiry_cache_flush(hdev);
2638         hci_pend_le_actions_clear(hdev);
2639         hci_conn_hash_flush(hdev);
2640         hci_dev_unlock(hdev);
2641
2642         hci_notify(hdev, HCI_DEV_DOWN);
2643
2644         if (hdev->flush)
2645                 hdev->flush(hdev);
2646
2647         /* Reset device */
2648         skb_queue_purge(&hdev->cmd_q);
2649         atomic_set(&hdev->cmd_cnt, 1);
2650         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2651             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2652             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2653                 set_bit(HCI_INIT, &hdev->flags);
2654                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2655                 clear_bit(HCI_INIT, &hdev->flags);
2656         }
2657
2658         /* flush cmd  work */
2659         flush_work(&hdev->cmd_work);
2660
2661         /* Drop queues */
2662         skb_queue_purge(&hdev->rx_q);
2663         skb_queue_purge(&hdev->cmd_q);
2664         skb_queue_purge(&hdev->raw_q);
2665
2666         /* Drop last sent command */
2667         if (hdev->sent_cmd) {
2668                 cancel_delayed_work_sync(&hdev->cmd_timer);
2669                 kfree_skb(hdev->sent_cmd);
2670                 hdev->sent_cmd = NULL;
2671         }
2672
2673         kfree_skb(hdev->recv_evt);
2674         hdev->recv_evt = NULL;
2675
2676         /* After this point our queues are empty
2677          * and no tasks are scheduled. */
2678         hdev->close(hdev);
2679
2680         /* Clear flags */
2681         hdev->flags &= BIT(HCI_RAW);
2682         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2683
2684         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2685                 if (hdev->dev_type == HCI_BREDR) {
2686                         hci_dev_lock(hdev);
2687                         mgmt_powered(hdev, 0);
2688                         hci_dev_unlock(hdev);
2689                 }
2690         }
2691
2692         /* Controller radio is available but is currently powered down */
2693         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2694
2695         memset(hdev->eir, 0, sizeof(hdev->eir));
2696         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2697         bacpy(&hdev->random_addr, BDADDR_ANY);
2698
2699         hci_req_unlock(hdev);
2700
2701         hci_dev_put(hdev);
2702         return 0;
2703 }
2704
2705 int hci_dev_close(__u16 dev)
2706 {
2707         struct hci_dev *hdev;
2708         int err;
2709
2710         hdev = hci_dev_get(dev);
2711         if (!hdev)
2712                 return -ENODEV;
2713
2714         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2715                 err = -EBUSY;
2716                 goto done;
2717         }
2718
2719         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2720                 cancel_delayed_work(&hdev->power_off);
2721
2722         err = hci_dev_do_close(hdev);
2723
2724 done:
2725         hci_dev_put(hdev);
2726         return err;
2727 }
2728
2729 int hci_dev_reset(__u16 dev)
2730 {
2731         struct hci_dev *hdev;
2732         int ret = 0;
2733
2734         hdev = hci_dev_get(dev);
2735         if (!hdev)
2736                 return -ENODEV;
2737
2738         hci_req_lock(hdev);
2739
2740         if (!test_bit(HCI_UP, &hdev->flags)) {
2741                 ret = -ENETDOWN;
2742                 goto done;
2743         }
2744
2745         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2746                 ret = -EBUSY;
2747                 goto done;
2748         }
2749
2750         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2751                 ret = -EOPNOTSUPP;
2752                 goto done;
2753         }
2754
2755         /* Drop queues */
2756         skb_queue_purge(&hdev->rx_q);
2757         skb_queue_purge(&hdev->cmd_q);
2758
2759         /* Avoid potential lockdep warnings from the *_flush() calls by
2760          * ensuring the workqueue is empty up front.
2761          */
2762         drain_workqueue(hdev->workqueue);
2763
2764         hci_dev_lock(hdev);
2765         hci_inquiry_cache_flush(hdev);
2766         hci_conn_hash_flush(hdev);
2767         hci_dev_unlock(hdev);
2768
2769         if (hdev->flush)
2770                 hdev->flush(hdev);
2771
2772         atomic_set(&hdev->cmd_cnt, 1);
2773         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2774
2775         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2776
2777 done:
2778         hci_req_unlock(hdev);
2779         hci_dev_put(hdev);
2780         return ret;
2781 }
2782
2783 int hci_dev_reset_stat(__u16 dev)
2784 {
2785         struct hci_dev *hdev;
2786         int ret = 0;
2787
2788         hdev = hci_dev_get(dev);
2789         if (!hdev)
2790                 return -ENODEV;
2791
2792         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2793                 ret = -EBUSY;
2794                 goto done;
2795         }
2796
2797         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2798                 ret = -EOPNOTSUPP;
2799                 goto done;
2800         }
2801
2802         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2803
2804 done:
2805         hci_dev_put(hdev);
2806         return ret;
2807 }
2808
2809 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2810 {
2811         bool conn_changed, discov_changed;
2812
2813         BT_DBG("%s scan 0x%02x", hdev->name, scan);
2814
2815         if ((scan & SCAN_PAGE))
2816                 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2817                                                  &hdev->dev_flags);
2818         else
2819                 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2820                                                   &hdev->dev_flags);
2821
2822         if ((scan & SCAN_INQUIRY)) {
2823                 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2824                                                    &hdev->dev_flags);
2825         } else {
2826                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2827                 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2828                                                     &hdev->dev_flags);
2829         }
2830
2831         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2832                 return;
2833
2834         if (conn_changed || discov_changed) {
2835                 /* In case this was disabled through mgmt */
2836                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2837
2838                 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2839                         mgmt_update_adv_data(hdev);
2840
2841                 mgmt_new_settings(hdev);
2842         }
2843 }
2844
2845 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2846 {
2847         struct hci_dev *hdev;
2848         struct hci_dev_req dr;
2849         int err = 0;
2850
2851         if (copy_from_user(&dr, arg, sizeof(dr)))
2852                 return -EFAULT;
2853
2854         hdev = hci_dev_get(dr.dev_id);
2855         if (!hdev)
2856                 return -ENODEV;
2857
2858         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2859                 err = -EBUSY;
2860                 goto done;
2861         }
2862
2863         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2864                 err = -EOPNOTSUPP;
2865                 goto done;
2866         }
2867
2868         if (hdev->dev_type != HCI_BREDR) {
2869                 err = -EOPNOTSUPP;
2870                 goto done;
2871         }
2872
2873         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2874                 err = -EOPNOTSUPP;
2875                 goto done;
2876         }
2877
2878         switch (cmd) {
2879         case HCISETAUTH:
2880                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2881                                    HCI_INIT_TIMEOUT);
2882                 break;
2883
2884         case HCISETENCRYPT:
2885                 if (!lmp_encrypt_capable(hdev)) {
2886                         err = -EOPNOTSUPP;
2887                         break;
2888                 }
2889
2890                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2891                         /* Auth must be enabled first */
2892                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2893                                            HCI_INIT_TIMEOUT);
2894                         if (err)
2895                                 break;
2896                 }
2897
2898                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2899                                    HCI_INIT_TIMEOUT);
2900                 break;
2901
2902         case HCISETSCAN:
2903                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2904                                    HCI_INIT_TIMEOUT);
2905
2906                 /* Ensure that the connectable and discoverable states
2907                  * get correctly modified as this was a non-mgmt change.
2908                  */
2909                 if (!err)
2910                         hci_update_scan_state(hdev, dr.dev_opt);
2911                 break;
2912
2913         case HCISETLINKPOL:
2914                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2915                                    HCI_INIT_TIMEOUT);
2916                 break;
2917
2918         case HCISETLINKMODE:
2919                 hdev->link_mode = ((__u16) dr.dev_opt) &
2920                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2921                 break;
2922
2923         case HCISETPTYPE:
2924                 hdev->pkt_type = (__u16) dr.dev_opt;
2925                 break;
2926
2927         case HCISETACLMTU:
2928                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2929                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2930                 break;
2931
2932         case HCISETSCOMTU:
2933                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2934                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2935                 break;
2936
2937         default:
2938                 err = -EINVAL;
2939                 break;
2940         }
2941
2942 done:
2943         hci_dev_put(hdev);
2944         return err;
2945 }
2946
2947 int hci_get_dev_list(void __user *arg)
2948 {
2949         struct hci_dev *hdev;
2950         struct hci_dev_list_req *dl;
2951         struct hci_dev_req *dr;
2952         int n = 0, size, err;
2953         __u16 dev_num;
2954
2955         if (get_user(dev_num, (__u16 __user *) arg))
2956                 return -EFAULT;
2957
2958         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2959                 return -EINVAL;
2960
2961         size = sizeof(*dl) + dev_num * sizeof(*dr);
2962
2963         dl = kzalloc(size, GFP_KERNEL);
2964         if (!dl)
2965                 return -ENOMEM;
2966
2967         dr = dl->dev_req;
2968
2969         read_lock(&hci_dev_list_lock);
2970         list_for_each_entry(hdev, &hci_dev_list, list) {
2971                 unsigned long flags = hdev->flags;
2972
2973                 /* When the auto-off is configured it means the transport
2974                  * is running, but in that case still indicate that the
2975                  * device is actually down.
2976                  */
2977                 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2978                         flags &= ~BIT(HCI_UP);
2979
2980                 (dr + n)->dev_id  = hdev->id;
2981                 (dr + n)->dev_opt = flags;
2982
2983                 if (++n >= dev_num)
2984                         break;
2985         }
2986         read_unlock(&hci_dev_list_lock);
2987
2988         dl->dev_num = n;
2989         size = sizeof(*dl) + n * sizeof(*dr);
2990
2991         err = copy_to_user(arg, dl, size);
2992         kfree(dl);
2993
2994         return err ? -EFAULT : 0;
2995 }
2996
2997 int hci_get_dev_info(void __user *arg)
2998 {
2999         struct hci_dev *hdev;
3000         struct hci_dev_info di;
3001         unsigned long flags;
3002         int err = 0;
3003
3004         if (copy_from_user(&di, arg, sizeof(di)))
3005                 return -EFAULT;
3006
3007         hdev = hci_dev_get(di.dev_id);
3008         if (!hdev)
3009                 return -ENODEV;
3010
3011         /* When the auto-off is configured it means the transport
3012          * is running, but in that case still indicate that the
3013          * device is actually down.
3014          */
3015         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3016                 flags = hdev->flags & ~BIT(HCI_UP);
3017         else
3018                 flags = hdev->flags;
3019
3020         strcpy(di.name, hdev->name);
3021         di.bdaddr   = hdev->bdaddr;
3022         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
3023         di.flags    = flags;
3024         di.pkt_type = hdev->pkt_type;
3025         if (lmp_bredr_capable(hdev)) {
3026                 di.acl_mtu  = hdev->acl_mtu;
3027                 di.acl_pkts = hdev->acl_pkts;
3028                 di.sco_mtu  = hdev->sco_mtu;
3029                 di.sco_pkts = hdev->sco_pkts;
3030         } else {
3031                 di.acl_mtu  = hdev->le_mtu;
3032                 di.acl_pkts = hdev->le_pkts;
3033                 di.sco_mtu  = 0;
3034                 di.sco_pkts = 0;
3035         }
3036         di.link_policy = hdev->link_policy;
3037         di.link_mode   = hdev->link_mode;
3038
3039         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
3040         memcpy(&di.features, &hdev->features, sizeof(di.features));
3041
3042         if (copy_to_user(arg, &di, sizeof(di)))
3043                 err = -EFAULT;
3044
3045         hci_dev_put(hdev);
3046
3047         return err;
3048 }
3049
3050 /* ---- Interface to HCI drivers ---- */
3051
3052 static int hci_rfkill_set_block(void *data, bool blocked)
3053 {
3054         struct hci_dev *hdev = data;
3055
3056         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
3057
3058         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
3059                 return -EBUSY;
3060
3061         if (blocked) {
3062                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3063                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3064                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
3065                         hci_dev_do_close(hdev);
3066         } else {
3067                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
3068         }
3069
3070         return 0;
3071 }
3072
3073 static const struct rfkill_ops hci_rfkill_ops = {
3074         .set_block = hci_rfkill_set_block,
3075 };
3076
3077 static void hci_power_on(struct work_struct *work)
3078 {
3079         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
3080         int err;
3081
3082         BT_DBG("%s", hdev->name);
3083
3084         err = hci_dev_do_open(hdev);
3085         if (err < 0) {
3086                 mgmt_set_powered_failed(hdev, err);
3087                 return;
3088         }
3089
3090         /* During the HCI setup phase, a few error conditions are
3091          * ignored and they need to be checked now. If they are still
3092          * valid, it is important to turn the device back off.
3093          */
3094         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
3095             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
3096             (hdev->dev_type == HCI_BREDR &&
3097              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3098              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
3099                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3100                 hci_dev_do_close(hdev);
3101         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
3102                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3103                                    HCI_AUTO_OFF_TIMEOUT);
3104         }
3105
3106         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
3107                 /* For unconfigured devices, set the HCI_RAW flag
3108                  * so that userspace can easily identify them.
3109                  */
3110                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3111                         set_bit(HCI_RAW, &hdev->flags);
3112
3113                 /* For fully configured devices, this will send
3114                  * the Index Added event. For unconfigured devices,
3115                  * it will send Unconfigued Index Added event.
3116                  *
3117                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3118                  * and no event will be send.
3119                  */
3120                 mgmt_index_added(hdev);
3121         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
3122                 /* When the controller is now configured, then it
3123                  * is important to clear the HCI_RAW flag.
3124                  */
3125                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3126                         clear_bit(HCI_RAW, &hdev->flags);
3127
3128                 /* Powering on the controller with HCI_CONFIG set only
3129                  * happens with the transition from unconfigured to
3130                  * configured. This will send the Index Added event.
3131                  */
3132                 mgmt_index_added(hdev);
3133         }
3134 }
3135
3136 static void hci_power_off(struct work_struct *work)
3137 {
3138         struct hci_dev *hdev = container_of(work, struct hci_dev,
3139                                             power_off.work);
3140
3141         BT_DBG("%s", hdev->name);
3142
3143         hci_dev_do_close(hdev);
3144 }
3145
3146 static void hci_discov_off(struct work_struct *work)
3147 {
3148         struct hci_dev *hdev;
3149
3150         hdev = container_of(work, struct hci_dev, discov_off.work);
3151
3152         BT_DBG("%s", hdev->name);
3153
3154         mgmt_discoverable_timeout(hdev);
3155 }
3156
3157 void hci_uuids_clear(struct hci_dev *hdev)
3158 {
3159         struct bt_uuid *uuid, *tmp;
3160
3161         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3162                 list_del(&uuid->list);
3163                 kfree(uuid);
3164         }
3165 }
3166
3167 void hci_link_keys_clear(struct hci_dev *hdev)
3168 {
3169         struct link_key *key;
3170
3171         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
3172                 list_del_rcu(&key->list);
3173                 kfree_rcu(key, rcu);
3174         }
3175 }
3176
3177 void hci_smp_ltks_clear(struct hci_dev *hdev)
3178 {
3179         struct smp_ltk *k;
3180
3181         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3182                 list_del_rcu(&k->list);
3183                 kfree_rcu(k, rcu);
3184         }
3185 }
3186
3187 void hci_smp_irks_clear(struct hci_dev *hdev)
3188 {
3189         struct smp_irk *k;
3190
3191         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3192                 list_del_rcu(&k->list);
3193                 kfree_rcu(k, rcu);
3194         }
3195 }
3196
3197 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3198 {
3199         struct link_key *k;
3200
3201         rcu_read_lock();
3202         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
3203                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
3204                         rcu_read_unlock();
3205                         return k;
3206                 }
3207         }
3208         rcu_read_unlock();
3209
3210         return NULL;
3211 }
3212
3213 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3214                                u8 key_type, u8 old_key_type)
3215 {
3216         /* Legacy key */
3217         if (key_type < 0x03)
3218                 return true;
3219
3220         /* Debug keys are insecure so don't store them persistently */
3221         if (key_type == HCI_LK_DEBUG_COMBINATION)
3222                 return false;
3223
3224         /* Changed combination key and there's no previous one */
3225         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3226                 return false;
3227
3228         /* Security mode 3 case */
3229         if (!conn)
3230                 return true;
3231
3232         /* BR/EDR key derived using SC from an LE link */
3233         if (conn->type == LE_LINK)
3234                 return true;
3235
3236         /* Neither local nor remote side had no-bonding as requirement */
3237         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3238                 return true;
3239
3240         /* Local side had dedicated bonding as requirement */
3241         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3242                 return true;
3243
3244         /* Remote side had dedicated bonding as requirement */
3245         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3246                 return true;
3247
3248         /* If none of the above criteria match, then don't store the key
3249          * persistently */
3250         return false;
3251 }
3252
3253 static u8 ltk_role(u8 type)
3254 {
3255         if (type == SMP_LTK)
3256                 return HCI_ROLE_MASTER;
3257
3258         return HCI_ROLE_SLAVE;
3259 }
3260
3261 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3262                              u8 addr_type, u8 role)
3263 {
3264         struct smp_ltk *k;
3265
3266         rcu_read_lock();
3267         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3268                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
3269                         continue;
3270
3271                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
3272                         rcu_read_unlock();
3273                         return k;
3274                 }
3275         }
3276         rcu_read_unlock();
3277
3278         return NULL;
3279 }
3280
3281 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3282 {
3283         struct smp_irk *irk;
3284
3285         rcu_read_lock();
3286         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3287                 if (!bacmp(&irk->rpa, rpa)) {
3288                         rcu_read_unlock();
3289                         return irk;
3290                 }
3291         }
3292
3293         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3294                 if (smp_irk_matches(hdev, irk->val, rpa)) {
3295                         bacpy(&irk->rpa, rpa);
3296                         rcu_read_unlock();
3297                         return irk;
3298                 }
3299         }
3300         rcu_read_unlock();
3301
3302         return NULL;
3303 }
3304
3305 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3306                                      u8 addr_type)
3307 {
3308         struct smp_irk *irk;
3309
3310         /* Identity Address must be public or static random */
3311         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3312                 return NULL;
3313
3314         rcu_read_lock();
3315         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3316                 if (addr_type == irk->addr_type &&
3317                     bacmp(bdaddr, &irk->bdaddr) == 0) {
3318                         rcu_read_unlock();
3319                         return irk;
3320                 }
3321         }
3322         rcu_read_unlock();
3323
3324         return NULL;
3325 }
3326
3327 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3328                                   bdaddr_t *bdaddr, u8 *val, u8 type,
3329                                   u8 pin_len, bool *persistent)
3330 {
3331         struct link_key *key, *old_key;
3332         u8 old_key_type;
3333
3334         old_key = hci_find_link_key(hdev, bdaddr);
3335         if (old_key) {
3336                 old_key_type = old_key->type;
3337                 key = old_key;
3338         } else {
3339                 old_key_type = conn ? conn->key_type : 0xff;
3340                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3341                 if (!key)
3342                         return NULL;
3343                 list_add_rcu(&key->list, &hdev->link_keys);
3344         }
3345
3346         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3347
3348         /* Some buggy controller combinations generate a changed
3349          * combination key for legacy pairing even when there's no
3350          * previous key */
3351         if (type == HCI_LK_CHANGED_COMBINATION &&
3352             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3353                 type = HCI_LK_COMBINATION;
3354                 if (conn)
3355                         conn->key_type = type;
3356         }
3357
3358         bacpy(&key->bdaddr, bdaddr);
3359         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3360         key->pin_len = pin_len;
3361
3362         if (type == HCI_LK_CHANGED_COMBINATION)
3363                 key->type = old_key_type;
3364         else
3365                 key->type = type;
3366
3367         if (persistent)
3368                 *persistent = hci_persistent_key(hdev, conn, type,
3369                                                  old_key_type);
3370
3371         return key;
3372 }
3373
3374 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3375                             u8 addr_type, u8 type, u8 authenticated,
3376                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3377 {
3378         struct smp_ltk *key, *old_key;
3379         u8 role = ltk_role(type);
3380
3381         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
3382         if (old_key)
3383                 key = old_key;
3384         else {
3385                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3386                 if (!key)
3387                         return NULL;
3388                 list_add_rcu(&key->list, &hdev->long_term_keys);
3389         }
3390
3391         bacpy(&key->bdaddr, bdaddr);
3392         key->bdaddr_type = addr_type;
3393         memcpy(key->val, tk, sizeof(key->val));
3394         key->authenticated = authenticated;
3395         key->ediv = ediv;
3396         key->rand = rand;
3397         key->enc_size = enc_size;
3398         key->type = type;
3399
3400         return key;
3401 }
3402
3403 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3404                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3405 {
3406         struct smp_irk *irk;
3407
3408         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3409         if (!irk) {
3410                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3411                 if (!irk)
3412                         return NULL;
3413
3414                 bacpy(&irk->bdaddr, bdaddr);
3415                 irk->addr_type = addr_type;
3416
3417                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
3418         }
3419
3420         memcpy(irk->val, val, 16);
3421         bacpy(&irk->rpa, rpa);
3422
3423         return irk;
3424 }
3425
3426 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3427 {
3428         struct link_key *key;
3429
3430         key = hci_find_link_key(hdev, bdaddr);
3431         if (!key)
3432                 return -ENOENT;
3433
3434         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3435
3436         list_del_rcu(&key->list);
3437         kfree_rcu(key, rcu);
3438
3439         return 0;
3440 }
3441
3442 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3443 {
3444         struct smp_ltk *k;
3445         int removed = 0;
3446
3447         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3448                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3449                         continue;
3450
3451                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3452
3453                 list_del_rcu(&k->list);
3454                 kfree_rcu(k, rcu);
3455                 removed++;
3456         }
3457
3458         return removed ? 0 : -ENOENT;
3459 }
3460
3461 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3462 {
3463         struct smp_irk *k;
3464
3465         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3466                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3467                         continue;
3468
3469                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3470
3471                 list_del_rcu(&k->list);
3472                 kfree_rcu(k, rcu);
3473         }
3474 }
3475
3476 /* HCI command timer function */
3477 static void hci_cmd_timeout(struct work_struct *work)
3478 {
3479         struct hci_dev *hdev = container_of(work, struct hci_dev,
3480                                             cmd_timer.work);
3481
3482         if (hdev->sent_cmd) {
3483                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3484                 u16 opcode = __le16_to_cpu(sent->opcode);
3485
3486                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3487         } else {
3488                 BT_ERR("%s command tx timeout", hdev->name);
3489         }
3490
3491         atomic_set(&hdev->cmd_cnt, 1);
3492         queue_work(hdev->workqueue, &hdev->cmd_work);
3493 }
3494
3495 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3496                                           bdaddr_t *bdaddr, u8 bdaddr_type)
3497 {
3498         struct oob_data *data;
3499
3500         list_for_each_entry(data, &hdev->remote_oob_data, list) {
3501                 if (bacmp(bdaddr, &data->bdaddr) != 0)
3502                         continue;
3503                 if (data->bdaddr_type != bdaddr_type)
3504                         continue;
3505                 return data;
3506         }
3507
3508         return NULL;
3509 }
3510
3511 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3512                                u8 bdaddr_type)
3513 {
3514         struct oob_data *data;
3515
3516         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
3517         if (!data)
3518                 return -ENOENT;
3519
3520         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
3521
3522         list_del(&data->list);
3523         kfree(data);
3524
3525         return 0;
3526 }
3527
3528 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3529 {
3530         struct oob_data *data, *n;
3531
3532         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3533                 list_del(&data->list);
3534                 kfree(data);
3535         }
3536 }
3537
3538 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3539                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
3540                             u8 *hash256, u8 *rand256)
3541 {
3542         struct oob_data *data;
3543
3544         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
3545         if (!data) {
3546                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3547                 if (!data)
3548                         return -ENOMEM;
3549
3550                 bacpy(&data->bdaddr, bdaddr);
3551                 data->bdaddr_type = bdaddr_type;
3552                 list_add(&data->list, &hdev->remote_oob_data);
3553         }
3554
3555         if (hash192 && rand192) {
3556                 memcpy(data->hash192, hash192, sizeof(data->hash192));
3557                 memcpy(data->rand192, rand192, sizeof(data->rand192));
3558         } else {
3559                 memset(data->hash192, 0, sizeof(data->hash192));
3560                 memset(data->rand192, 0, sizeof(data->rand192));
3561         }
3562
3563         if (hash256 && rand256) {
3564                 memcpy(data->hash256, hash256, sizeof(data->hash256));
3565                 memcpy(data->rand256, rand256, sizeof(data->rand256));
3566         } else {
3567                 memset(data->hash256, 0, sizeof(data->hash256));
3568                 memset(data->rand256, 0, sizeof(data->rand256));
3569         }
3570
3571         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3572
3573         return 0;
3574 }
3575
3576 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3577                                          bdaddr_t *bdaddr, u8 type)
3578 {
3579         struct bdaddr_list *b;
3580
3581         list_for_each_entry(b, bdaddr_list, list) {
3582                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3583                         return b;
3584         }
3585
3586         return NULL;
3587 }
3588
3589 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3590 {
3591         struct list_head *p, *n;
3592
3593         list_for_each_safe(p, n, bdaddr_list) {
3594                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3595
3596                 list_del(p);
3597                 kfree(b);
3598         }
3599 }
3600
3601 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3602 {
3603         struct bdaddr_list *entry;
3604
3605         if (!bacmp(bdaddr, BDADDR_ANY))
3606                 return -EBADF;
3607
3608         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3609                 return -EEXIST;
3610
3611         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3612         if (!entry)
3613                 return -ENOMEM;
3614
3615         bacpy(&entry->bdaddr, bdaddr);
3616         entry->bdaddr_type = type;
3617
3618         list_add(&entry->list, list);
3619
3620         return 0;
3621 }
3622
3623 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3624 {
3625         struct bdaddr_list *entry;
3626
3627         if (!bacmp(bdaddr, BDADDR_ANY)) {
3628                 hci_bdaddr_list_clear(list);
3629                 return 0;
3630         }
3631
3632         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3633         if (!entry)
3634                 return -ENOENT;
3635
3636         list_del(&entry->list);
3637         kfree(entry);
3638
3639         return 0;
3640 }
3641
3642 /* This function requires the caller holds hdev->lock */
3643 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3644                                                bdaddr_t *addr, u8 addr_type)
3645 {
3646         struct hci_conn_params *params;
3647
3648         /* The conn params list only contains identity addresses */
3649         if (!hci_is_identity_address(addr, addr_type))
3650                 return NULL;
3651
3652         list_for_each_entry(params, &hdev->le_conn_params, list) {
3653                 if (bacmp(&params->addr, addr) == 0 &&
3654                     params->addr_type == addr_type) {
3655                         return params;
3656                 }
3657         }
3658
3659         return NULL;
3660 }
3661
3662 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3663 {
3664         struct hci_conn *conn;
3665
3666         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3667         if (!conn)
3668                 return false;
3669
3670         if (conn->dst_type != type)
3671                 return false;
3672
3673         if (conn->state != BT_CONNECTED)
3674                 return false;
3675
3676         return true;
3677 }
3678
3679 /* This function requires the caller holds hdev->lock */
3680 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3681                                                   bdaddr_t *addr, u8 addr_type)
3682 {
3683         struct hci_conn_params *param;
3684
3685         /* The list only contains identity addresses */
3686         if (!hci_is_identity_address(addr, addr_type))
3687                 return NULL;
3688
3689         list_for_each_entry(param, list, action) {
3690                 if (bacmp(&param->addr, addr) == 0 &&
3691                     param->addr_type == addr_type)
3692                         return param;
3693         }
3694
3695         return NULL;
3696 }
3697
3698 /* This function requires the caller holds hdev->lock */
3699 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3700                                             bdaddr_t *addr, u8 addr_type)
3701 {
3702         struct hci_conn_params *params;
3703
3704         if (!hci_is_identity_address(addr, addr_type))
3705                 return NULL;
3706
3707         params = hci_conn_params_lookup(hdev, addr, addr_type);
3708         if (params)
3709                 return params;
3710
3711         params = kzalloc(sizeof(*params), GFP_KERNEL);
3712         if (!params) {
3713                 BT_ERR("Out of memory");
3714                 return NULL;
3715         }
3716
3717         bacpy(&params->addr, addr);
3718         params->addr_type = addr_type;
3719
3720         list_add(&params->list, &hdev->le_conn_params);
3721         INIT_LIST_HEAD(&params->action);
3722
3723         params->conn_min_interval = hdev->le_conn_min_interval;
3724         params->conn_max_interval = hdev->le_conn_max_interval;
3725         params->conn_latency = hdev->le_conn_latency;
3726         params->supervision_timeout = hdev->le_supv_timeout;
3727         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3728
3729         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3730
3731         return params;
3732 }
3733
3734 /* This function requires the caller holds hdev->lock */
3735 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3736                         u8 auto_connect)
3737 {
3738         struct hci_conn_params *params;
3739
3740         params = hci_conn_params_add(hdev, addr, addr_type);
3741         if (!params)
3742                 return -EIO;
3743
3744         if (params->auto_connect == auto_connect)
3745                 return 0;
3746
3747         list_del_init(&params->action);
3748
3749         switch (auto_connect) {
3750         case HCI_AUTO_CONN_DISABLED:
3751         case HCI_AUTO_CONN_LINK_LOSS:
3752                 hci_update_background_scan(hdev);
3753                 break;
3754         case HCI_AUTO_CONN_REPORT:
3755                 list_add(&params->action, &hdev->pend_le_reports);
3756                 hci_update_background_scan(hdev);
3757                 break;
3758         case HCI_AUTO_CONN_DIRECT:
3759         case HCI_AUTO_CONN_ALWAYS:
3760                 if (!is_connected(hdev, addr, addr_type)) {
3761                         list_add(&params->action, &hdev->pend_le_conns);
3762                         hci_update_background_scan(hdev);
3763                 }
3764                 break;
3765         }
3766
3767         params->auto_connect = auto_connect;
3768
3769         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3770                auto_connect);
3771
3772         return 0;
3773 }
3774
3775 static void hci_conn_params_free(struct hci_conn_params *params)
3776 {
3777         if (params->conn) {
3778                 hci_conn_drop(params->conn);
3779                 hci_conn_put(params->conn);
3780         }
3781
3782         list_del(&params->action);
3783         list_del(&params->list);
3784         kfree(params);
3785 }
3786
3787 /* This function requires the caller holds hdev->lock */
3788 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3789 {
3790         struct hci_conn_params *params;
3791
3792         params = hci_conn_params_lookup(hdev, addr, addr_type);
3793         if (!params)
3794                 return;
3795
3796         hci_conn_params_free(params);
3797
3798         hci_update_background_scan(hdev);
3799
3800         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3801 }
3802
3803 /* This function requires the caller holds hdev->lock */
3804 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3805 {
3806         struct hci_conn_params *params, *tmp;
3807
3808         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3809                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3810                         continue;
3811                 list_del(&params->list);
3812                 kfree(params);
3813         }
3814
3815         BT_DBG("All LE disabled connection parameters were removed");
3816 }
3817
3818 /* This function requires the caller holds hdev->lock */
3819 void hci_conn_params_clear_all(struct hci_dev *hdev)
3820 {
3821         struct hci_conn_params *params, *tmp;
3822
3823         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3824                 hci_conn_params_free(params);
3825
3826         hci_update_background_scan(hdev);
3827
3828         BT_DBG("All LE connection parameters were removed");
3829 }
3830
3831 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3832 {
3833         if (status) {
3834                 BT_ERR("Failed to start inquiry: status %d", status);
3835
3836                 hci_dev_lock(hdev);
3837                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3838                 hci_dev_unlock(hdev);
3839                 return;
3840         }
3841 }
3842
3843 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3844 {
3845         /* General inquiry access code (GIAC) */
3846         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3847         struct hci_request req;
3848         struct hci_cp_inquiry cp;
3849         int err;
3850
3851         if (status) {
3852                 BT_ERR("Failed to disable LE scanning: status %d", status);
3853                 return;
3854         }
3855
3856         switch (hdev->discovery.type) {
3857         case DISCOV_TYPE_LE:
3858                 hci_dev_lock(hdev);
3859                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3860                 hci_dev_unlock(hdev);
3861                 break;
3862
3863         case DISCOV_TYPE_INTERLEAVED:
3864                 hci_req_init(&req, hdev);
3865
3866                 memset(&cp, 0, sizeof(cp));
3867                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3868                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3869                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3870
3871                 hci_dev_lock(hdev);
3872
3873                 hci_inquiry_cache_flush(hdev);
3874
3875                 err = hci_req_run(&req, inquiry_complete);
3876                 if (err) {
3877                         BT_ERR("Inquiry request failed: err %d", err);
3878                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3879                 }
3880
3881                 hci_dev_unlock(hdev);
3882                 break;
3883         }
3884 }
3885
3886 static void le_scan_disable_work(struct work_struct *work)
3887 {
3888         struct hci_dev *hdev = container_of(work, struct hci_dev,
3889                                             le_scan_disable.work);
3890         struct hci_request req;
3891         int err;
3892
3893         BT_DBG("%s", hdev->name);
3894
3895         hci_req_init(&req, hdev);
3896
3897         hci_req_add_le_scan_disable(&req);
3898
3899         err = hci_req_run(&req, le_scan_disable_work_complete);
3900         if (err)
3901                 BT_ERR("Disable LE scanning request failed: err %d", err);
3902 }
3903
3904 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3905 {
3906         struct hci_dev *hdev = req->hdev;
3907
3908         /* If we're advertising or initiating an LE connection we can't
3909          * go ahead and change the random address at this time. This is
3910          * because the eventual initiator address used for the
3911          * subsequently created connection will be undefined (some
3912          * controllers use the new address and others the one we had
3913          * when the operation started).
3914          *
3915          * In this kind of scenario skip the update and let the random
3916          * address be updated at the next cycle.
3917          */
3918         if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3919             hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3920                 BT_DBG("Deferring random address update");
3921                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
3922                 return;
3923         }
3924
3925         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3926 }
3927
3928 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3929                               u8 *own_addr_type)
3930 {
3931         struct hci_dev *hdev = req->hdev;
3932         int err;
3933
3934         /* If privacy is enabled use a resolvable private address. If
3935          * current RPA has expired or there is something else than
3936          * the current RPA in use, then generate a new one.
3937          */
3938         if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3939                 int to;
3940
3941                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3942
3943                 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3944                     !bacmp(&hdev->random_addr, &hdev->rpa))
3945                         return 0;
3946
3947                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
3948                 if (err < 0) {
3949                         BT_ERR("%s failed to generate new RPA", hdev->name);
3950                         return err;
3951                 }
3952
3953                 set_random_addr(req, &hdev->rpa);
3954
3955                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3956                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3957
3958                 return 0;
3959         }
3960
3961         /* In case of required privacy without resolvable private address,
3962          * use an unresolvable private address. This is useful for active
3963          * scanning and non-connectable advertising.
3964          */
3965         if (require_privacy) {
3966                 bdaddr_t urpa;
3967
3968                 get_random_bytes(&urpa, 6);
3969                 urpa.b[5] &= 0x3f;      /* Clear two most significant bits */
3970
3971                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3972                 set_random_addr(req, &urpa);
3973                 return 0;
3974         }
3975
3976         /* If forcing static address is in use or there is no public
3977          * address use the static address as random address (but skip
3978          * the HCI command if the current random address is already the
3979          * static one.
3980          */
3981         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3982             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3983                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3984                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3985                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3986                                     &hdev->static_addr);
3987                 return 0;
3988         }
3989
3990         /* Neither privacy nor static address is being used so use a
3991          * public address.
3992          */
3993         *own_addr_type = ADDR_LE_DEV_PUBLIC;
3994
3995         return 0;
3996 }
3997
3998 /* Copy the Identity Address of the controller.
3999  *
4000  * If the controller has a public BD_ADDR, then by default use that one.
4001  * If this is a LE only controller without a public address, default to
4002  * the static random address.
4003  *
4004  * For debugging purposes it is possible to force controllers with a
4005  * public address to use the static random address instead.
4006  */
4007 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
4008                                u8 *bdaddr_type)
4009 {
4010         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
4011             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
4012                 bacpy(bdaddr, &hdev->static_addr);
4013                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
4014         } else {
4015                 bacpy(bdaddr, &hdev->bdaddr);
4016                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
4017         }
4018 }
4019
4020 /* Alloc HCI device */
4021 struct hci_dev *hci_alloc_dev(void)
4022 {
4023         struct hci_dev *hdev;
4024
4025         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
4026         if (!hdev)
4027                 return NULL;
4028
4029         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
4030         hdev->esco_type = (ESCO_HV1);
4031         hdev->link_mode = (HCI_LM_ACCEPT);
4032         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
4033         hdev->io_capability = 0x03;     /* No Input No Output */
4034         hdev->manufacturer = 0xffff;    /* Default to internal use */
4035         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
4036         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
4037
4038         hdev->sniff_max_interval = 800;
4039         hdev->sniff_min_interval = 80;
4040
4041         hdev->le_adv_channel_map = 0x07;
4042         hdev->le_adv_min_interval = 0x0800;
4043         hdev->le_adv_max_interval = 0x0800;
4044         hdev->le_scan_interval = 0x0060;
4045         hdev->le_scan_window = 0x0030;
4046         hdev->le_conn_min_interval = 0x0028;
4047         hdev->le_conn_max_interval = 0x0038;
4048         hdev->le_conn_latency = 0x0000;
4049         hdev->le_supv_timeout = 0x002a;
4050
4051         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
4052         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
4053         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4054         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
4055
4056         mutex_init(&hdev->lock);
4057         mutex_init(&hdev->req_lock);
4058
4059         INIT_LIST_HEAD(&hdev->mgmt_pending);
4060         INIT_LIST_HEAD(&hdev->blacklist);
4061         INIT_LIST_HEAD(&hdev->whitelist);
4062         INIT_LIST_HEAD(&hdev->uuids);
4063         INIT_LIST_HEAD(&hdev->link_keys);
4064         INIT_LIST_HEAD(&hdev->long_term_keys);
4065         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
4066         INIT_LIST_HEAD(&hdev->remote_oob_data);
4067         INIT_LIST_HEAD(&hdev->le_white_list);
4068         INIT_LIST_HEAD(&hdev->le_conn_params);
4069         INIT_LIST_HEAD(&hdev->pend_le_conns);
4070         INIT_LIST_HEAD(&hdev->pend_le_reports);
4071         INIT_LIST_HEAD(&hdev->conn_hash.list);
4072
4073         INIT_WORK(&hdev->rx_work, hci_rx_work);
4074         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4075         INIT_WORK(&hdev->tx_work, hci_tx_work);
4076         INIT_WORK(&hdev->power_on, hci_power_on);
4077
4078         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4079         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4080         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4081
4082         skb_queue_head_init(&hdev->rx_q);
4083         skb_queue_head_init(&hdev->cmd_q);
4084         skb_queue_head_init(&hdev->raw_q);
4085
4086         init_waitqueue_head(&hdev->req_wait_q);
4087
4088         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
4089
4090         hci_init_sysfs(hdev);
4091         discovery_init(hdev);
4092
4093         return hdev;
4094 }
4095 EXPORT_SYMBOL(hci_alloc_dev);
4096
4097 /* Free HCI device */
4098 void hci_free_dev(struct hci_dev *hdev)
4099 {
4100         /* will free via device release */
4101         put_device(&hdev->dev);
4102 }
4103 EXPORT_SYMBOL(hci_free_dev);
4104
4105 /* Register HCI device */
4106 int hci_register_dev(struct hci_dev *hdev)
4107 {
4108         int id, error;
4109
4110         if (!hdev->open || !hdev->close || !hdev->send)
4111                 return -EINVAL;
4112
4113         /* Do not allow HCI_AMP devices to register at index 0,
4114          * so the index can be used as the AMP controller ID.
4115          */
4116         switch (hdev->dev_type) {
4117         case HCI_BREDR:
4118                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4119                 break;
4120         case HCI_AMP:
4121                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4122                 break;
4123         default:
4124                 return -EINVAL;
4125         }
4126
4127         if (id < 0)
4128                 return id;
4129
4130         sprintf(hdev->name, "hci%d", id);
4131         hdev->id = id;
4132
4133         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4134
4135         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4136                                           WQ_MEM_RECLAIM, 1, hdev->name);
4137         if (!hdev->workqueue) {
4138                 error = -ENOMEM;
4139                 goto err;
4140         }
4141
4142         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4143                                               WQ_MEM_RECLAIM, 1, hdev->name);
4144         if (!hdev->req_workqueue) {
4145                 destroy_workqueue(hdev->workqueue);
4146                 error = -ENOMEM;
4147                 goto err;
4148         }
4149
4150         if (!IS_ERR_OR_NULL(bt_debugfs))
4151                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4152
4153         dev_set_name(&hdev->dev, "%s", hdev->name);
4154
4155         error = device_add(&hdev->dev);
4156         if (error < 0)
4157                 goto err_wqueue;
4158
4159         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4160                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4161                                     hdev);
4162         if (hdev->rfkill) {
4163                 if (rfkill_register(hdev->rfkill) < 0) {
4164                         rfkill_destroy(hdev->rfkill);
4165                         hdev->rfkill = NULL;
4166                 }
4167         }
4168
4169         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4170                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4171
4172         set_bit(HCI_SETUP, &hdev->dev_flags);
4173         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4174
4175         if (hdev->dev_type == HCI_BREDR) {
4176                 /* Assume BR/EDR support until proven otherwise (such as
4177                  * through reading supported features during init.
4178                  */
4179                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4180         }
4181
4182         write_lock(&hci_dev_list_lock);
4183         list_add(&hdev->list, &hci_dev_list);
4184         write_unlock(&hci_dev_list_lock);
4185
4186         /* Devices that are marked for raw-only usage are unconfigured
4187          * and should not be included in normal operation.
4188          */
4189         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4190                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4191
4192         hci_notify(hdev, HCI_DEV_REG);
4193         hci_dev_hold(hdev);
4194
4195         queue_work(hdev->req_workqueue, &hdev->power_on);
4196
4197         return id;
4198
4199 err_wqueue:
4200         destroy_workqueue(hdev->workqueue);
4201         destroy_workqueue(hdev->req_workqueue);
4202 err:
4203         ida_simple_remove(&hci_index_ida, hdev->id);
4204
4205         return error;
4206 }
4207 EXPORT_SYMBOL(hci_register_dev);
4208
4209 /* Unregister HCI device */
4210 void hci_unregister_dev(struct hci_dev *hdev)
4211 {
4212         int i, id;
4213
4214         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4215
4216         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4217
4218         id = hdev->id;
4219
4220         write_lock(&hci_dev_list_lock);
4221         list_del(&hdev->list);
4222         write_unlock(&hci_dev_list_lock);
4223
4224         hci_dev_do_close(hdev);
4225
4226         for (i = 0; i < NUM_REASSEMBLY; i++)
4227                 kfree_skb(hdev->reassembly[i]);
4228
4229         cancel_work_sync(&hdev->power_on);
4230
4231         if (!test_bit(HCI_INIT, &hdev->flags) &&
4232             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4233             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4234                 hci_dev_lock(hdev);
4235                 mgmt_index_removed(hdev);
4236                 hci_dev_unlock(hdev);
4237         }
4238
4239         /* mgmt_index_removed should take care of emptying the
4240          * pending list */
4241         BUG_ON(!list_empty(&hdev->mgmt_pending));
4242
4243         hci_notify(hdev, HCI_DEV_UNREG);
4244
4245         if (hdev->rfkill) {
4246                 rfkill_unregister(hdev->rfkill);
4247                 rfkill_destroy(hdev->rfkill);
4248         }
4249
4250         smp_unregister(hdev);
4251
4252         device_del(&hdev->dev);
4253
4254         debugfs_remove_recursive(hdev->debugfs);
4255
4256         destroy_workqueue(hdev->workqueue);
4257         destroy_workqueue(hdev->req_workqueue);
4258
4259         hci_dev_lock(hdev);
4260         hci_bdaddr_list_clear(&hdev->blacklist);
4261         hci_bdaddr_list_clear(&hdev->whitelist);
4262         hci_uuids_clear(hdev);
4263         hci_link_keys_clear(hdev);
4264         hci_smp_ltks_clear(hdev);
4265         hci_smp_irks_clear(hdev);
4266         hci_remote_oob_data_clear(hdev);
4267         hci_bdaddr_list_clear(&hdev->le_white_list);
4268         hci_conn_params_clear_all(hdev);
4269         hci_discovery_filter_clear(hdev);
4270         hci_dev_unlock(hdev);
4271
4272         hci_dev_put(hdev);
4273
4274         ida_simple_remove(&hci_index_ida, id);
4275 }
4276 EXPORT_SYMBOL(hci_unregister_dev);
4277
4278 /* Suspend HCI device */
4279 int hci_suspend_dev(struct hci_dev *hdev)
4280 {
4281         hci_notify(hdev, HCI_DEV_SUSPEND);
4282         return 0;
4283 }
4284 EXPORT_SYMBOL(hci_suspend_dev);
4285
4286 /* Resume HCI device */
4287 int hci_resume_dev(struct hci_dev *hdev)
4288 {
4289         hci_notify(hdev, HCI_DEV_RESUME);
4290         return 0;
4291 }
4292 EXPORT_SYMBOL(hci_resume_dev);
4293
4294 /* Reset HCI device */
4295 int hci_reset_dev(struct hci_dev *hdev)
4296 {
4297         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4298         struct sk_buff *skb;
4299
4300         skb = bt_skb_alloc(3, GFP_ATOMIC);
4301         if (!skb)
4302                 return -ENOMEM;
4303
4304         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4305         memcpy(skb_put(skb, 3), hw_err, 3);
4306
4307         /* Send Hardware Error to upper stack */
4308         return hci_recv_frame(hdev, skb);
4309 }
4310 EXPORT_SYMBOL(hci_reset_dev);
4311
4312 /* Receive frame from HCI drivers */
4313 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4314 {
4315         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4316                       && !test_bit(HCI_INIT, &hdev->flags))) {
4317                 kfree_skb(skb);
4318                 return -ENXIO;
4319         }
4320
4321         /* Incoming skb */
4322         bt_cb(skb)->incoming = 1;
4323
4324         /* Time stamp */
4325         __net_timestamp(skb);
4326
4327         skb_queue_tail(&hdev->rx_q, skb);
4328         queue_work(hdev->workqueue, &hdev->rx_work);
4329
4330         return 0;
4331 }
4332 EXPORT_SYMBOL(hci_recv_frame);
4333
4334 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4335                           int count, __u8 index)
4336 {
4337         int len = 0;
4338         int hlen = 0;
4339         int remain = count;
4340         struct sk_buff *skb;
4341         struct bt_skb_cb *scb;
4342
4343         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4344             index >= NUM_REASSEMBLY)
4345                 return -EILSEQ;
4346
4347         skb = hdev->reassembly[index];
4348
4349         if (!skb) {
4350                 switch (type) {
4351                 case HCI_ACLDATA_PKT:
4352                         len = HCI_MAX_FRAME_SIZE;
4353                         hlen = HCI_ACL_HDR_SIZE;
4354                         break;
4355                 case HCI_EVENT_PKT:
4356                         len = HCI_MAX_EVENT_SIZE;
4357                         hlen = HCI_EVENT_HDR_SIZE;
4358                         break;
4359                 case HCI_SCODATA_PKT:
4360                         len = HCI_MAX_SCO_SIZE;
4361                         hlen = HCI_SCO_HDR_SIZE;
4362                         break;
4363                 }
4364
4365                 skb = bt_skb_alloc(len, GFP_ATOMIC);
4366                 if (!skb)
4367                         return -ENOMEM;
4368
4369                 scb = (void *) skb->cb;
4370                 scb->expect = hlen;
4371                 scb->pkt_type = type;
4372
4373                 hdev->reassembly[index] = skb;
4374         }
4375
4376         while (count) {
4377                 scb = (void *) skb->cb;
4378                 len = min_t(uint, scb->expect, count);
4379
4380                 memcpy(skb_put(skb, len), data, len);
4381
4382                 count -= len;
4383                 data += len;
4384                 scb->expect -= len;
4385                 remain = count;
4386
4387                 switch (type) {
4388                 case HCI_EVENT_PKT:
4389                         if (skb->len == HCI_EVENT_HDR_SIZE) {
4390                                 struct hci_event_hdr *h = hci_event_hdr(skb);
4391                                 scb->expect = h->plen;
4392
4393                                 if (skb_tailroom(skb) < scb->expect) {
4394                                         kfree_skb(skb);
4395                                         hdev->reassembly[index] = NULL;
4396                                         return -ENOMEM;
4397                                 }
4398                         }
4399                         break;
4400
4401                 case HCI_ACLDATA_PKT:
4402                         if (skb->len  == HCI_ACL_HDR_SIZE) {
4403                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4404                                 scb->expect = __le16_to_cpu(h->dlen);
4405
4406                                 if (skb_tailroom(skb) < scb->expect) {
4407                                         kfree_skb(skb);
4408                                         hdev->reassembly[index] = NULL;
4409                                         return -ENOMEM;
4410                                 }
4411                         }
4412                         break;
4413
4414                 case HCI_SCODATA_PKT:
4415                         if (skb->len == HCI_SCO_HDR_SIZE) {
4416                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4417                                 scb->expect = h->dlen;
4418
4419                                 if (skb_tailroom(skb) < scb->expect) {
4420                                         kfree_skb(skb);
4421                                         hdev->reassembly[index] = NULL;
4422                                         return -ENOMEM;
4423                                 }
4424                         }
4425                         break;
4426                 }
4427
4428                 if (scb->expect == 0) {
4429                         /* Complete frame */
4430
4431                         bt_cb(skb)->pkt_type = type;
4432                         hci_recv_frame(hdev, skb);
4433
4434                         hdev->reassembly[index] = NULL;
4435                         return remain;
4436                 }
4437         }
4438
4439         return remain;
4440 }
4441
4442 #define STREAM_REASSEMBLY 0
4443
4444 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4445 {
4446         int type;
4447         int rem = 0;
4448
4449         while (count) {
4450                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4451
4452                 if (!skb) {
4453                         struct { char type; } *pkt;
4454
4455                         /* Start of the frame */
4456                         pkt = data;
4457                         type = pkt->type;
4458
4459                         data++;
4460                         count--;
4461                 } else
4462                         type = bt_cb(skb)->pkt_type;
4463
4464                 rem = hci_reassembly(hdev, type, data, count,
4465                                      STREAM_REASSEMBLY);
4466                 if (rem < 0)
4467                         return rem;
4468
4469                 data += (count - rem);
4470                 count = rem;
4471         }
4472
4473         return rem;
4474 }
4475 EXPORT_SYMBOL(hci_recv_stream_fragment);
4476
4477 /* ---- Interface to upper protocols ---- */
4478
4479 int hci_register_cb(struct hci_cb *cb)
4480 {
4481         BT_DBG("%p name %s", cb, cb->name);
4482
4483         write_lock(&hci_cb_list_lock);
4484         list_add(&cb->list, &hci_cb_list);
4485         write_unlock(&hci_cb_list_lock);
4486
4487         return 0;
4488 }
4489 EXPORT_SYMBOL(hci_register_cb);
4490
4491 int hci_unregister_cb(struct hci_cb *cb)
4492 {
4493         BT_DBG("%p name %s", cb, cb->name);
4494
4495         write_lock(&hci_cb_list_lock);
4496         list_del(&cb->list);
4497         write_unlock(&hci_cb_list_lock);
4498
4499         return 0;
4500 }
4501 EXPORT_SYMBOL(hci_unregister_cb);
4502
4503 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4504 {
4505         int err;
4506
4507         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4508
4509         /* Time stamp */
4510         __net_timestamp(skb);
4511
4512         /* Send copy to monitor */
4513         hci_send_to_monitor(hdev, skb);
4514
4515         if (atomic_read(&hdev->promisc)) {
4516                 /* Send copy to the sockets */
4517                 hci_send_to_sock(hdev, skb);
4518         }
4519
4520         /* Get rid of skb owner, prior to sending to the driver. */
4521         skb_orphan(skb);
4522
4523         err = hdev->send(hdev, skb);
4524         if (err < 0) {
4525                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4526                 kfree_skb(skb);
4527         }
4528 }
4529
4530 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4531 {
4532         skb_queue_head_init(&req->cmd_q);
4533         req->hdev = hdev;
4534         req->err = 0;
4535 }
4536
4537 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4538 {
4539         struct hci_dev *hdev = req->hdev;
4540         struct sk_buff *skb;
4541         unsigned long flags;
4542
4543         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4544
4545         /* If an error occurred during request building, remove all HCI
4546          * commands queued on the HCI request queue.
4547          */
4548         if (req->err) {
4549                 skb_queue_purge(&req->cmd_q);
4550                 return req->err;
4551         }
4552
4553         /* Do not allow empty requests */
4554         if (skb_queue_empty(&req->cmd_q))
4555                 return -ENODATA;
4556
4557         skb = skb_peek_tail(&req->cmd_q);
4558         bt_cb(skb)->req.complete = complete;
4559
4560         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4561         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4562         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4563
4564         queue_work(hdev->workqueue, &hdev->cmd_work);
4565
4566         return 0;
4567 }
4568
4569 bool hci_req_pending(struct hci_dev *hdev)
4570 {
4571         return (hdev->req_status == HCI_REQ_PEND);
4572 }
4573
4574 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4575                                        u32 plen, const void *param)
4576 {
4577         int len = HCI_COMMAND_HDR_SIZE + plen;
4578         struct hci_command_hdr *hdr;
4579         struct sk_buff *skb;
4580
4581         skb = bt_skb_alloc(len, GFP_ATOMIC);
4582         if (!skb)
4583                 return NULL;
4584
4585         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4586         hdr->opcode = cpu_to_le16(opcode);
4587         hdr->plen   = plen;
4588
4589         if (plen)
4590                 memcpy(skb_put(skb, plen), param, plen);
4591
4592         BT_DBG("skb len %d", skb->len);
4593
4594         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4595         bt_cb(skb)->opcode = opcode;
4596
4597         return skb;
4598 }
4599
4600 /* Send HCI command */
4601 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4602                  const void *param)
4603 {
4604         struct sk_buff *skb;
4605
4606         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4607
4608         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4609         if (!skb) {
4610                 BT_ERR("%s no memory for command", hdev->name);
4611                 return -ENOMEM;
4612         }
4613
4614         /* Stand-alone HCI commands must be flagged as
4615          * single-command requests.
4616          */
4617         bt_cb(skb)->req.start = true;
4618
4619         skb_queue_tail(&hdev->cmd_q, skb);
4620         queue_work(hdev->workqueue, &hdev->cmd_work);
4621
4622         return 0;
4623 }
4624
4625 /* Queue a command to an asynchronous HCI request */
4626 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4627                     const void *param, u8 event)
4628 {
4629         struct hci_dev *hdev = req->hdev;
4630         struct sk_buff *skb;
4631
4632         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4633
4634         /* If an error occurred during request building, there is no point in
4635          * queueing the HCI command. We can simply return.
4636          */
4637         if (req->err)
4638                 return;
4639
4640         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4641         if (!skb) {
4642                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4643                        hdev->name, opcode);
4644                 req->err = -ENOMEM;
4645                 return;
4646         }
4647
4648         if (skb_queue_empty(&req->cmd_q))
4649                 bt_cb(skb)->req.start = true;
4650
4651         bt_cb(skb)->req.event = event;
4652
4653         skb_queue_tail(&req->cmd_q, skb);
4654 }
4655
4656 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4657                  const void *param)
4658 {
4659         hci_req_add_ev(req, opcode, plen, param, 0);
4660 }
4661
4662 /* Get data from the previously sent command */
4663 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4664 {
4665         struct hci_command_hdr *hdr;
4666
4667         if (!hdev->sent_cmd)
4668                 return NULL;
4669
4670         hdr = (void *) hdev->sent_cmd->data;
4671
4672         if (hdr->opcode != cpu_to_le16(opcode))
4673                 return NULL;
4674
4675         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4676
4677         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4678 }
4679
4680 /* Send ACL data */
4681 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4682 {
4683         struct hci_acl_hdr *hdr;
4684         int len = skb->len;
4685
4686         skb_push(skb, HCI_ACL_HDR_SIZE);
4687         skb_reset_transport_header(skb);
4688         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4689         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4690         hdr->dlen   = cpu_to_le16(len);
4691 }
4692
4693 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4694                           struct sk_buff *skb, __u16 flags)
4695 {
4696         struct hci_conn *conn = chan->conn;
4697         struct hci_dev *hdev = conn->hdev;
4698         struct sk_buff *list;
4699
4700         skb->len = skb_headlen(skb);
4701         skb->data_len = 0;
4702
4703         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4704
4705         switch (hdev->dev_type) {
4706         case HCI_BREDR:
4707                 hci_add_acl_hdr(skb, conn->handle, flags);
4708                 break;
4709         case HCI_AMP:
4710                 hci_add_acl_hdr(skb, chan->handle, flags);
4711                 break;
4712         default:
4713                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4714                 return;
4715         }
4716
4717         list = skb_shinfo(skb)->frag_list;
4718         if (!list) {
4719                 /* Non fragmented */
4720                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4721
4722                 skb_queue_tail(queue, skb);
4723         } else {
4724                 /* Fragmented */
4725                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4726
4727                 skb_shinfo(skb)->frag_list = NULL;
4728
4729                 /* Queue all fragments atomically. We need to use spin_lock_bh
4730                  * here because of 6LoWPAN links, as there this function is
4731                  * called from softirq and using normal spin lock could cause
4732                  * deadlocks.
4733                  */
4734                 spin_lock_bh(&queue->lock);
4735
4736                 __skb_queue_tail(queue, skb);
4737
4738                 flags &= ~ACL_START;
4739                 flags |= ACL_CONT;
4740                 do {
4741                         skb = list; list = list->next;
4742
4743                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4744                         hci_add_acl_hdr(skb, conn->handle, flags);
4745
4746                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4747
4748                         __skb_queue_tail(queue, skb);
4749                 } while (list);
4750
4751                 spin_unlock_bh(&queue->lock);
4752         }
4753 }
4754
4755 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4756 {
4757         struct hci_dev *hdev = chan->conn->hdev;
4758
4759         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4760
4761         hci_queue_acl(chan, &chan->data_q, skb, flags);
4762
4763         queue_work(hdev->workqueue, &hdev->tx_work);
4764 }
4765
4766 /* Send SCO data */
4767 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4768 {
4769         struct hci_dev *hdev = conn->hdev;
4770         struct hci_sco_hdr hdr;
4771
4772         BT_DBG("%s len %d", hdev->name, skb->len);
4773
4774         hdr.handle = cpu_to_le16(conn->handle);
4775         hdr.dlen   = skb->len;
4776
4777         skb_push(skb, HCI_SCO_HDR_SIZE);
4778         skb_reset_transport_header(skb);
4779         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4780
4781         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4782
4783         skb_queue_tail(&conn->data_q, skb);
4784         queue_work(hdev->workqueue, &hdev->tx_work);
4785 }
4786
4787 /* ---- HCI TX task (outgoing data) ---- */
4788
4789 /* HCI Connection scheduler */
4790 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4791                                      int *quote)
4792 {
4793         struct hci_conn_hash *h = &hdev->conn_hash;
4794         struct hci_conn *conn = NULL, *c;
4795         unsigned int num = 0, min = ~0;
4796
4797         /* We don't have to lock device here. Connections are always
4798          * added and removed with TX task disabled. */
4799
4800         rcu_read_lock();
4801
4802         list_for_each_entry_rcu(c, &h->list, list) {
4803                 if (c->type != type || skb_queue_empty(&c->data_q))
4804                         continue;
4805
4806                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4807                         continue;
4808
4809                 num++;
4810
4811                 if (c->sent < min) {
4812                         min  = c->sent;
4813                         conn = c;
4814                 }
4815
4816                 if (hci_conn_num(hdev, type) == num)
4817                         break;
4818         }
4819
4820         rcu_read_unlock();
4821
4822         if (conn) {
4823                 int cnt, q;
4824
4825                 switch (conn->type) {
4826                 case ACL_LINK:
4827                         cnt = hdev->acl_cnt;
4828                         break;
4829                 case SCO_LINK:
4830                 case ESCO_LINK:
4831                         cnt = hdev->sco_cnt;
4832                         break;
4833                 case LE_LINK:
4834                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4835                         break;
4836                 default:
4837                         cnt = 0;
4838                         BT_ERR("Unknown link type");
4839                 }
4840
4841                 q = cnt / num;
4842                 *quote = q ? q : 1;
4843         } else
4844                 *quote = 0;
4845
4846         BT_DBG("conn %p quote %d", conn, *quote);
4847         return conn;
4848 }
4849
4850 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4851 {
4852         struct hci_conn_hash *h = &hdev->conn_hash;
4853         struct hci_conn *c;
4854
4855         BT_ERR("%s link tx timeout", hdev->name);
4856
4857         rcu_read_lock();
4858
4859         /* Kill stalled connections */
4860         list_for_each_entry_rcu(c, &h->list, list) {
4861                 if (c->type == type && c->sent) {
4862                         BT_ERR("%s killing stalled connection %pMR",
4863                                hdev->name, &c->dst);
4864                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4865                 }
4866         }
4867
4868         rcu_read_unlock();
4869 }
4870
4871 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4872                                       int *quote)
4873 {
4874         struct hci_conn_hash *h = &hdev->conn_hash;
4875         struct hci_chan *chan = NULL;
4876         unsigned int num = 0, min = ~0, cur_prio = 0;
4877         struct hci_conn *conn;
4878         int cnt, q, conn_num = 0;
4879
4880         BT_DBG("%s", hdev->name);
4881
4882         rcu_read_lock();
4883
4884         list_for_each_entry_rcu(conn, &h->list, list) {
4885                 struct hci_chan *tmp;
4886
4887                 if (conn->type != type)
4888                         continue;
4889
4890                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4891                         continue;
4892
4893                 conn_num++;
4894
4895                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4896                         struct sk_buff *skb;
4897
4898                         if (skb_queue_empty(&tmp->data_q))
4899                                 continue;
4900
4901                         skb = skb_peek(&tmp->data_q);
4902                         if (skb->priority < cur_prio)
4903                                 continue;
4904
4905                         if (skb->priority > cur_prio) {
4906                                 num = 0;
4907                                 min = ~0;
4908                                 cur_prio = skb->priority;
4909                         }
4910
4911                         num++;
4912
4913                         if (conn->sent < min) {
4914                                 min  = conn->sent;
4915                                 chan = tmp;
4916                         }
4917                 }
4918
4919                 if (hci_conn_num(hdev, type) == conn_num)
4920                         break;
4921         }
4922
4923         rcu_read_unlock();
4924
4925         if (!chan)
4926                 return NULL;
4927
4928         switch (chan->conn->type) {
4929         case ACL_LINK:
4930                 cnt = hdev->acl_cnt;
4931                 break;
4932         case AMP_LINK:
4933                 cnt = hdev->block_cnt;
4934                 break;
4935         case SCO_LINK:
4936         case ESCO_LINK:
4937                 cnt = hdev->sco_cnt;
4938                 break;
4939         case LE_LINK:
4940                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4941                 break;
4942         default:
4943                 cnt = 0;
4944                 BT_ERR("Unknown link type");
4945         }
4946
4947         q = cnt / num;
4948         *quote = q ? q : 1;
4949         BT_DBG("chan %p quote %d", chan, *quote);
4950         return chan;
4951 }
4952
4953 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4954 {
4955         struct hci_conn_hash *h = &hdev->conn_hash;
4956         struct hci_conn *conn;
4957         int num = 0;
4958
4959         BT_DBG("%s", hdev->name);
4960
4961         rcu_read_lock();
4962
4963         list_for_each_entry_rcu(conn, &h->list, list) {
4964                 struct hci_chan *chan;
4965
4966                 if (conn->type != type)
4967                         continue;
4968
4969                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4970                         continue;
4971
4972                 num++;
4973
4974                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4975                         struct sk_buff *skb;
4976
4977                         if (chan->sent) {
4978                                 chan->sent = 0;
4979                                 continue;
4980                         }
4981
4982                         if (skb_queue_empty(&chan->data_q))
4983                                 continue;
4984
4985                         skb = skb_peek(&chan->data_q);
4986                         if (skb->priority >= HCI_PRIO_MAX - 1)
4987                                 continue;
4988
4989                         skb->priority = HCI_PRIO_MAX - 1;
4990
4991                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4992                                skb->priority);
4993                 }
4994
4995                 if (hci_conn_num(hdev, type) == num)
4996                         break;
4997         }
4998
4999         rcu_read_unlock();
5000
5001 }
5002
5003 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
5004 {
5005         /* Calculate count of blocks used by this packet */
5006         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
5007 }
5008
5009 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
5010 {
5011         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5012                 /* ACL tx timeout must be longer than maximum
5013                  * link supervision timeout (40.9 seconds) */
5014                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5015                                        HCI_ACL_TX_TIMEOUT))
5016                         hci_link_tx_to(hdev, ACL_LINK);
5017         }
5018 }
5019
5020 static void hci_sched_acl_pkt(struct hci_dev *hdev)
5021 {
5022         unsigned int cnt = hdev->acl_cnt;
5023         struct hci_chan *chan;
5024         struct sk_buff *skb;
5025         int quote;
5026
5027         __check_timeout(hdev, cnt);
5028
5029         while (hdev->acl_cnt &&
5030                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
5031                 u32 priority = (skb_peek(&chan->data_q))->priority;
5032                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5033                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5034                                skb->len, skb->priority);
5035
5036                         /* Stop if priority has changed */
5037                         if (skb->priority < priority)
5038                                 break;
5039
5040                         skb = skb_dequeue(&chan->data_q);
5041
5042                         hci_conn_enter_active_mode(chan->conn,
5043                                                    bt_cb(skb)->force_active);
5044
5045                         hci_send_frame(hdev, skb);
5046                         hdev->acl_last_tx = jiffies;
5047
5048                         hdev->acl_cnt--;
5049                         chan->sent++;
5050                         chan->conn->sent++;
5051                 }
5052         }
5053
5054         if (cnt != hdev->acl_cnt)
5055                 hci_prio_recalculate(hdev, ACL_LINK);
5056 }
5057
5058 static void hci_sched_acl_blk(struct hci_dev *hdev)
5059 {
5060         unsigned int cnt = hdev->block_cnt;
5061         struct hci_chan *chan;
5062         struct sk_buff *skb;
5063         int quote;
5064         u8 type;
5065
5066         __check_timeout(hdev, cnt);
5067
5068         BT_DBG("%s", hdev->name);
5069
5070         if (hdev->dev_type == HCI_AMP)
5071                 type = AMP_LINK;
5072         else
5073                 type = ACL_LINK;
5074
5075         while (hdev->block_cnt > 0 &&
5076                (chan = hci_chan_sent(hdev, type, &quote))) {
5077                 u32 priority = (skb_peek(&chan->data_q))->priority;
5078                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5079                         int blocks;
5080
5081                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5082                                skb->len, skb->priority);
5083
5084                         /* Stop if priority has changed */
5085                         if (skb->priority < priority)
5086                                 break;
5087
5088                         skb = skb_dequeue(&chan->data_q);
5089
5090                         blocks = __get_blocks(hdev, skb);
5091                         if (blocks > hdev->block_cnt)
5092                                 return;
5093
5094                         hci_conn_enter_active_mode(chan->conn,
5095                                                    bt_cb(skb)->force_active);
5096
5097                         hci_send_frame(hdev, skb);
5098                         hdev->acl_last_tx = jiffies;
5099
5100                         hdev->block_cnt -= blocks;
5101                         quote -= blocks;
5102
5103                         chan->sent += blocks;
5104                         chan->conn->sent += blocks;
5105                 }
5106         }
5107
5108         if (cnt != hdev->block_cnt)
5109                 hci_prio_recalculate(hdev, type);
5110 }
5111
5112 static void hci_sched_acl(struct hci_dev *hdev)
5113 {
5114         BT_DBG("%s", hdev->name);
5115
5116         /* No ACL link over BR/EDR controller */
5117         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5118                 return;
5119
5120         /* No AMP link over AMP controller */
5121         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
5122                 return;
5123
5124         switch (hdev->flow_ctl_mode) {
5125         case HCI_FLOW_CTL_MODE_PACKET_BASED:
5126                 hci_sched_acl_pkt(hdev);
5127                 break;
5128
5129         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5130                 hci_sched_acl_blk(hdev);
5131                 break;
5132         }
5133 }
5134
5135 /* Schedule SCO */
5136 static void hci_sched_sco(struct hci_dev *hdev)
5137 {
5138         struct hci_conn *conn;
5139         struct sk_buff *skb;
5140         int quote;
5141
5142         BT_DBG("%s", hdev->name);
5143
5144         if (!hci_conn_num(hdev, SCO_LINK))
5145                 return;
5146
5147         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5148                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5149                         BT_DBG("skb %p len %d", skb, skb->len);
5150                         hci_send_frame(hdev, skb);
5151
5152                         conn->sent++;
5153                         if (conn->sent == ~0)
5154                                 conn->sent = 0;
5155                 }
5156         }
5157 }
5158
5159 static void hci_sched_esco(struct hci_dev *hdev)
5160 {
5161         struct hci_conn *conn;
5162         struct sk_buff *skb;
5163         int quote;
5164
5165         BT_DBG("%s", hdev->name);
5166
5167         if (!hci_conn_num(hdev, ESCO_LINK))
5168                 return;
5169
5170         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5171                                                      &quote))) {
5172                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5173                         BT_DBG("skb %p len %d", skb, skb->len);
5174                         hci_send_frame(hdev, skb);
5175
5176                         conn->sent++;
5177                         if (conn->sent == ~0)
5178                                 conn->sent = 0;
5179                 }
5180         }
5181 }
5182
5183 static void hci_sched_le(struct hci_dev *hdev)
5184 {
5185         struct hci_chan *chan;
5186         struct sk_buff *skb;
5187         int quote, cnt, tmp;
5188
5189         BT_DBG("%s", hdev->name);
5190
5191         if (!hci_conn_num(hdev, LE_LINK))
5192                 return;
5193
5194         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5195                 /* LE tx timeout must be longer than maximum
5196                  * link supervision timeout (40.9 seconds) */
5197                 if (!hdev->le_cnt && hdev->le_pkts &&
5198                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
5199                         hci_link_tx_to(hdev, LE_LINK);
5200         }
5201
5202         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5203         tmp = cnt;
5204         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
5205                 u32 priority = (skb_peek(&chan->data_q))->priority;
5206                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5207                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5208                                skb->len, skb->priority);
5209
5210                         /* Stop if priority has changed */
5211                         if (skb->priority < priority)
5212                                 break;
5213
5214                         skb = skb_dequeue(&chan->data_q);
5215
5216                         hci_send_frame(hdev, skb);
5217                         hdev->le_last_tx = jiffies;
5218
5219                         cnt--;
5220                         chan->sent++;
5221                         chan->conn->sent++;
5222                 }
5223         }
5224
5225         if (hdev->le_pkts)
5226                 hdev->le_cnt = cnt;
5227         else
5228                 hdev->acl_cnt = cnt;
5229
5230         if (cnt != tmp)
5231                 hci_prio_recalculate(hdev, LE_LINK);
5232 }
5233
5234 static void hci_tx_work(struct work_struct *work)
5235 {
5236         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5237         struct sk_buff *skb;
5238
5239         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5240                hdev->sco_cnt, hdev->le_cnt);
5241
5242         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5243                 /* Schedule queues and send stuff to HCI driver */
5244                 hci_sched_acl(hdev);
5245                 hci_sched_sco(hdev);
5246                 hci_sched_esco(hdev);
5247                 hci_sched_le(hdev);
5248         }
5249
5250         /* Send next queued raw (unknown type) packet */
5251         while ((skb = skb_dequeue(&hdev->raw_q)))
5252                 hci_send_frame(hdev, skb);
5253 }
5254
5255 /* ----- HCI RX task (incoming data processing) ----- */
5256
5257 /* ACL data packet */
5258 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5259 {
5260         struct hci_acl_hdr *hdr = (void *) skb->data;
5261         struct hci_conn *conn;
5262         __u16 handle, flags;
5263
5264         skb_pull(skb, HCI_ACL_HDR_SIZE);
5265
5266         handle = __le16_to_cpu(hdr->handle);
5267         flags  = hci_flags(handle);
5268         handle = hci_handle(handle);
5269
5270         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5271                handle, flags);
5272
5273         hdev->stat.acl_rx++;
5274
5275         hci_dev_lock(hdev);
5276         conn = hci_conn_hash_lookup_handle(hdev, handle);
5277         hci_dev_unlock(hdev);
5278
5279         if (conn) {
5280                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5281
5282                 /* Send to upper protocol */
5283                 l2cap_recv_acldata(conn, skb, flags);
5284                 return;
5285         } else {
5286                 BT_ERR("%s ACL packet for unknown connection handle %d",
5287                        hdev->name, handle);
5288         }
5289
5290         kfree_skb(skb);
5291 }
5292
5293 /* SCO data packet */
5294 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5295 {
5296         struct hci_sco_hdr *hdr = (void *) skb->data;
5297         struct hci_conn *conn;
5298         __u16 handle;
5299
5300         skb_pull(skb, HCI_SCO_HDR_SIZE);
5301
5302         handle = __le16_to_cpu(hdr->handle);
5303
5304         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5305
5306         hdev->stat.sco_rx++;
5307
5308         hci_dev_lock(hdev);
5309         conn = hci_conn_hash_lookup_handle(hdev, handle);
5310         hci_dev_unlock(hdev);
5311
5312         if (conn) {
5313                 /* Send to upper protocol */
5314                 sco_recv_scodata(conn, skb);
5315                 return;
5316         } else {
5317                 BT_ERR("%s SCO packet for unknown connection handle %d",
5318                        hdev->name, handle);
5319         }
5320
5321         kfree_skb(skb);
5322 }
5323
5324 static bool hci_req_is_complete(struct hci_dev *hdev)
5325 {
5326         struct sk_buff *skb;
5327
5328         skb = skb_peek(&hdev->cmd_q);
5329         if (!skb)
5330                 return true;
5331
5332         return bt_cb(skb)->req.start;
5333 }
5334
5335 static void hci_resend_last(struct hci_dev *hdev)
5336 {
5337         struct hci_command_hdr *sent;
5338         struct sk_buff *skb;
5339         u16 opcode;
5340
5341         if (!hdev->sent_cmd)
5342                 return;
5343
5344         sent = (void *) hdev->sent_cmd->data;
5345         opcode = __le16_to_cpu(sent->opcode);
5346         if (opcode == HCI_OP_RESET)
5347                 return;
5348
5349         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5350         if (!skb)
5351                 return;
5352
5353         skb_queue_head(&hdev->cmd_q, skb);
5354         queue_work(hdev->workqueue, &hdev->cmd_work);
5355 }
5356
5357 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5358 {
5359         hci_req_complete_t req_complete = NULL;
5360         struct sk_buff *skb;
5361         unsigned long flags;
5362
5363         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5364
5365         /* If the completed command doesn't match the last one that was
5366          * sent we need to do special handling of it.
5367          */
5368         if (!hci_sent_cmd_data(hdev, opcode)) {
5369                 /* Some CSR based controllers generate a spontaneous
5370                  * reset complete event during init and any pending
5371                  * command will never be completed. In such a case we
5372                  * need to resend whatever was the last sent
5373                  * command.
5374                  */
5375                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5376                         hci_resend_last(hdev);
5377
5378                 return;
5379         }
5380
5381         /* If the command succeeded and there's still more commands in
5382          * this request the request is not yet complete.
5383          */
5384         if (!status && !hci_req_is_complete(hdev))
5385                 return;
5386
5387         /* If this was the last command in a request the complete
5388          * callback would be found in hdev->sent_cmd instead of the
5389          * command queue (hdev->cmd_q).
5390          */
5391         if (hdev->sent_cmd) {
5392                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5393
5394                 if (req_complete) {
5395                         /* We must set the complete callback to NULL to
5396                          * avoid calling the callback more than once if
5397                          * this function gets called again.
5398                          */
5399                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
5400
5401                         goto call_complete;
5402                 }
5403         }
5404
5405         /* Remove all pending commands belonging to this request */
5406         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5407         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5408                 if (bt_cb(skb)->req.start) {
5409                         __skb_queue_head(&hdev->cmd_q, skb);
5410                         break;
5411                 }
5412
5413                 req_complete = bt_cb(skb)->req.complete;
5414                 kfree_skb(skb);
5415         }
5416         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5417
5418 call_complete:
5419         if (req_complete)
5420                 req_complete(hdev, status);
5421 }
5422
5423 static void hci_rx_work(struct work_struct *work)
5424 {
5425         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5426         struct sk_buff *skb;
5427
5428         BT_DBG("%s", hdev->name);
5429
5430         while ((skb = skb_dequeue(&hdev->rx_q))) {
5431                 /* Send copy to monitor */
5432                 hci_send_to_monitor(hdev, skb);
5433
5434                 if (atomic_read(&hdev->promisc)) {
5435                         /* Send copy to the sockets */
5436                         hci_send_to_sock(hdev, skb);
5437                 }
5438
5439                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5440                         kfree_skb(skb);
5441                         continue;
5442                 }
5443
5444                 if (test_bit(HCI_INIT, &hdev->flags)) {
5445                         /* Don't process data packets in this states. */
5446                         switch (bt_cb(skb)->pkt_type) {
5447                         case HCI_ACLDATA_PKT:
5448                         case HCI_SCODATA_PKT:
5449                                 kfree_skb(skb);
5450                                 continue;
5451                         }
5452                 }
5453
5454                 /* Process frame */
5455                 switch (bt_cb(skb)->pkt_type) {
5456                 case HCI_EVENT_PKT:
5457                         BT_DBG("%s Event packet", hdev->name);
5458                         hci_event_packet(hdev, skb);
5459                         break;
5460
5461                 case HCI_ACLDATA_PKT:
5462                         BT_DBG("%s ACL data packet", hdev->name);
5463                         hci_acldata_packet(hdev, skb);
5464                         break;
5465
5466                 case HCI_SCODATA_PKT:
5467                         BT_DBG("%s SCO data packet", hdev->name);
5468                         hci_scodata_packet(hdev, skb);
5469                         break;
5470
5471                 default:
5472                         kfree_skb(skb);
5473                         break;
5474                 }
5475         }
5476 }
5477
5478 static void hci_cmd_work(struct work_struct *work)
5479 {
5480         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5481         struct sk_buff *skb;
5482
5483         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5484                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5485
5486         /* Send queued commands */
5487         if (atomic_read(&hdev->cmd_cnt)) {
5488                 skb = skb_dequeue(&hdev->cmd_q);
5489                 if (!skb)
5490                         return;
5491
5492                 kfree_skb(hdev->sent_cmd);
5493
5494                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5495                 if (hdev->sent_cmd) {
5496                         atomic_dec(&hdev->cmd_cnt);
5497                         hci_send_frame(hdev, skb);
5498                         if (test_bit(HCI_RESET, &hdev->flags))
5499                                 cancel_delayed_work(&hdev->cmd_timer);
5500                         else
5501                                 schedule_delayed_work(&hdev->cmd_timer,
5502                                                       HCI_CMD_TIMEOUT);
5503                 } else {
5504                         skb_queue_head(&hdev->cmd_q, skb);
5505                         queue_work(hdev->workqueue, &hdev->cmd_work);
5506                 }
5507         }
5508 }
5509
5510 void hci_req_add_le_scan_disable(struct hci_request *req)
5511 {
5512         struct hci_cp_le_set_scan_enable cp;
5513
5514         memset(&cp, 0, sizeof(cp));
5515         cp.enable = LE_SCAN_DISABLE;
5516         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5517 }
5518
5519 static void add_to_white_list(struct hci_request *req,
5520                               struct hci_conn_params *params)
5521 {
5522         struct hci_cp_le_add_to_white_list cp;
5523
5524         cp.bdaddr_type = params->addr_type;
5525         bacpy(&cp.bdaddr, &params->addr);
5526
5527         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5528 }
5529
5530 static u8 update_white_list(struct hci_request *req)
5531 {
5532         struct hci_dev *hdev = req->hdev;
5533         struct hci_conn_params *params;
5534         struct bdaddr_list *b;
5535         uint8_t white_list_entries = 0;
5536
5537         /* Go through the current white list programmed into the
5538          * controller one by one and check if that address is still
5539          * in the list of pending connections or list of devices to
5540          * report. If not present in either list, then queue the
5541          * command to remove it from the controller.
5542          */
5543         list_for_each_entry(b, &hdev->le_white_list, list) {
5544                 struct hci_cp_le_del_from_white_list cp;
5545
5546                 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5547                                               &b->bdaddr, b->bdaddr_type) ||
5548                     hci_pend_le_action_lookup(&hdev->pend_le_reports,
5549                                               &b->bdaddr, b->bdaddr_type)) {
5550                         white_list_entries++;
5551                         continue;
5552                 }
5553
5554                 cp.bdaddr_type = b->bdaddr_type;
5555                 bacpy(&cp.bdaddr, &b->bdaddr);
5556
5557                 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5558                             sizeof(cp), &cp);
5559         }
5560
5561         /* Since all no longer valid white list entries have been
5562          * removed, walk through the list of pending connections
5563          * and ensure that any new device gets programmed into
5564          * the controller.
5565          *
5566          * If the list of the devices is larger than the list of
5567          * available white list entries in the controller, then
5568          * just abort and return filer policy value to not use the
5569          * white list.
5570          */
5571         list_for_each_entry(params, &hdev->pend_le_conns, action) {
5572                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5573                                            &params->addr, params->addr_type))
5574                         continue;
5575
5576                 if (white_list_entries >= hdev->le_white_list_size) {
5577                         /* Select filter policy to accept all advertising */
5578                         return 0x00;
5579                 }
5580
5581                 if (hci_find_irk_by_addr(hdev, &params->addr,
5582                                          params->addr_type)) {
5583                         /* White list can not be used with RPAs */
5584                         return 0x00;
5585                 }
5586
5587                 white_list_entries++;
5588                 add_to_white_list(req, params);
5589         }
5590
5591         /* After adding all new pending connections, walk through
5592          * the list of pending reports and also add these to the
5593          * white list if there is still space.
5594          */
5595         list_for_each_entry(params, &hdev->pend_le_reports, action) {
5596                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5597                                            &params->addr, params->addr_type))
5598                         continue;
5599
5600                 if (white_list_entries >= hdev->le_white_list_size) {
5601                         /* Select filter policy to accept all advertising */
5602                         return 0x00;
5603                 }
5604
5605                 if (hci_find_irk_by_addr(hdev, &params->addr,
5606                                          params->addr_type)) {
5607                         /* White list can not be used with RPAs */
5608                         return 0x00;
5609                 }
5610
5611                 white_list_entries++;
5612                 add_to_white_list(req, params);
5613         }
5614
5615         /* Select filter policy to use white list */
5616         return 0x01;
5617 }
5618
5619 void hci_req_add_le_passive_scan(struct hci_request *req)
5620 {
5621         struct hci_cp_le_set_scan_param param_cp;
5622         struct hci_cp_le_set_scan_enable enable_cp;
5623         struct hci_dev *hdev = req->hdev;
5624         u8 own_addr_type;
5625         u8 filter_policy;
5626
5627         /* Set require_privacy to false since no SCAN_REQ are send
5628          * during passive scanning. Not using an unresolvable address
5629          * here is important so that peer devices using direct
5630          * advertising with our address will be correctly reported
5631          * by the controller.
5632          */
5633         if (hci_update_random_address(req, false, &own_addr_type))
5634                 return;
5635
5636         /* Adding or removing entries from the white list must
5637          * happen before enabling scanning. The controller does
5638          * not allow white list modification while scanning.
5639          */
5640         filter_policy = update_white_list(req);
5641
5642         /* When the controller is using random resolvable addresses and
5643          * with that having LE privacy enabled, then controllers with
5644          * Extended Scanner Filter Policies support can now enable support
5645          * for handling directed advertising.
5646          *
5647          * So instead of using filter polices 0x00 (no whitelist)
5648          * and 0x01 (whitelist enabled) use the new filter policies
5649          * 0x02 (no whitelist) and 0x03 (whitelist enabled).
5650          */
5651         if (test_bit(HCI_PRIVACY, &hdev->dev_flags) &&
5652             (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
5653                 filter_policy |= 0x02;
5654
5655         memset(&param_cp, 0, sizeof(param_cp));
5656         param_cp.type = LE_SCAN_PASSIVE;
5657         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5658         param_cp.window = cpu_to_le16(hdev->le_scan_window);
5659         param_cp.own_address_type = own_addr_type;
5660         param_cp.filter_policy = filter_policy;
5661         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5662                     &param_cp);
5663
5664         memset(&enable_cp, 0, sizeof(enable_cp));
5665         enable_cp.enable = LE_SCAN_ENABLE;
5666         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5667         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5668                     &enable_cp);
5669 }
5670
5671 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5672 {
5673         if (status)
5674                 BT_DBG("HCI request failed to update background scanning: "
5675                        "status 0x%2.2x", status);
5676 }
5677
5678 /* This function controls the background scanning based on hdev->pend_le_conns
5679  * list. If there are pending LE connection we start the background scanning,
5680  * otherwise we stop it.
5681  *
5682  * This function requires the caller holds hdev->lock.
5683  */
5684 void hci_update_background_scan(struct hci_dev *hdev)
5685 {
5686         struct hci_request req;
5687         struct hci_conn *conn;
5688         int err;
5689
5690         if (!test_bit(HCI_UP, &hdev->flags) ||
5691             test_bit(HCI_INIT, &hdev->flags) ||
5692             test_bit(HCI_SETUP, &hdev->dev_flags) ||
5693             test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5694             test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5695             test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5696                 return;
5697
5698         /* No point in doing scanning if LE support hasn't been enabled */
5699         if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5700                 return;
5701
5702         /* If discovery is active don't interfere with it */
5703         if (hdev->discovery.state != DISCOVERY_STOPPED)
5704                 return;
5705
5706         /* Reset RSSI and UUID filters when starting background scanning
5707          * since these filters are meant for service discovery only.
5708          *
5709          * The Start Discovery and Start Service Discovery operations
5710          * ensure to set proper values for RSSI threshold and UUID
5711          * filter list. So it is safe to just reset them here.
5712          */
5713         hci_discovery_filter_clear(hdev);
5714
5715         hci_req_init(&req, hdev);
5716
5717         if (list_empty(&hdev->pend_le_conns) &&
5718             list_empty(&hdev->pend_le_reports)) {
5719                 /* If there is no pending LE connections or devices
5720                  * to be scanned for, we should stop the background
5721                  * scanning.
5722                  */
5723
5724                 /* If controller is not scanning we are done. */
5725                 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5726                         return;
5727
5728                 hci_req_add_le_scan_disable(&req);
5729
5730                 BT_DBG("%s stopping background scanning", hdev->name);
5731         } else {
5732                 /* If there is at least one pending LE connection, we should
5733                  * keep the background scan running.
5734                  */
5735
5736                 /* If controller is connecting, we should not start scanning
5737                  * since some controllers are not able to scan and connect at
5738                  * the same time.
5739                  */
5740                 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5741                 if (conn)
5742                         return;
5743
5744                 /* If controller is currently scanning, we stop it to ensure we
5745                  * don't miss any advertising (due to duplicates filter).
5746                  */
5747                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5748                         hci_req_add_le_scan_disable(&req);
5749
5750                 hci_req_add_le_passive_scan(&req);
5751
5752                 BT_DBG("%s starting background scanning", hdev->name);
5753         }
5754
5755         err = hci_req_run(&req, update_background_scan_complete);
5756         if (err)
5757                 BT_ERR("Failed to run HCI request: err %d", err);
5758 }
5759
5760 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5761 {
5762         struct bdaddr_list *b;
5763
5764         list_for_each_entry(b, &hdev->whitelist, list) {
5765                 struct hci_conn *conn;
5766
5767                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5768                 if (!conn)
5769                         return true;
5770
5771                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5772                         return true;
5773         }
5774
5775         return false;
5776 }
5777
5778 void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5779 {
5780         u8 scan;
5781
5782         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5783                 return;
5784
5785         if (!hdev_is_powered(hdev))
5786                 return;
5787
5788         if (mgmt_powering_down(hdev))
5789                 return;
5790
5791         if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
5792             disconnected_whitelist_entries(hdev))
5793                 scan = SCAN_PAGE;
5794         else
5795                 scan = SCAN_DISABLED;
5796
5797         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5798                 return;
5799
5800         if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5801                 scan |= SCAN_INQUIRY;
5802
5803         if (req)
5804                 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5805         else
5806                 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5807 }