Bluetooth: Remove unused hci_pend_le_conn_add function
[cascardo/linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "smp.h"
41
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
45
46 /* HCI device list */
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
49
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
53
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
56
57 /* ---- HCI notifications ---- */
58
59 static void hci_notify(struct hci_dev *hdev, int event)
60 {
61         hci_sock_dev_event(hdev, event);
62 }
63
64 /* ---- HCI debugfs entries ---- */
65
66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67                              size_t count, loff_t *ppos)
68 {
69         struct hci_dev *hdev = file->private_data;
70         char buf[3];
71
72         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
73         buf[1] = '\n';
74         buf[2] = '\0';
75         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76 }
77
78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79                               size_t count, loff_t *ppos)
80 {
81         struct hci_dev *hdev = file->private_data;
82         struct sk_buff *skb;
83         char buf[32];
84         size_t buf_size = min(count, (sizeof(buf)-1));
85         bool enable;
86         int err;
87
88         if (!test_bit(HCI_UP, &hdev->flags))
89                 return -ENETDOWN;
90
91         if (copy_from_user(buf, user_buf, buf_size))
92                 return -EFAULT;
93
94         buf[buf_size] = '\0';
95         if (strtobool(buf, &enable))
96                 return -EINVAL;
97
98         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
99                 return -EALREADY;
100
101         hci_req_lock(hdev);
102         if (enable)
103                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
104                                      HCI_CMD_TIMEOUT);
105         else
106                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
107                                      HCI_CMD_TIMEOUT);
108         hci_req_unlock(hdev);
109
110         if (IS_ERR(skb))
111                 return PTR_ERR(skb);
112
113         err = -bt_to_errno(skb->data[0]);
114         kfree_skb(skb);
115
116         if (err < 0)
117                 return err;
118
119         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
120
121         return count;
122 }
123
124 static const struct file_operations dut_mode_fops = {
125         .open           = simple_open,
126         .read           = dut_mode_read,
127         .write          = dut_mode_write,
128         .llseek         = default_llseek,
129 };
130
131 static int features_show(struct seq_file *f, void *ptr)
132 {
133         struct hci_dev *hdev = f->private;
134         u8 p;
135
136         hci_dev_lock(hdev);
137         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
138                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
139                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140                            hdev->features[p][0], hdev->features[p][1],
141                            hdev->features[p][2], hdev->features[p][3],
142                            hdev->features[p][4], hdev->features[p][5],
143                            hdev->features[p][6], hdev->features[p][7]);
144         }
145         if (lmp_le_capable(hdev))
146                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148                            hdev->le_features[0], hdev->le_features[1],
149                            hdev->le_features[2], hdev->le_features[3],
150                            hdev->le_features[4], hdev->le_features[5],
151                            hdev->le_features[6], hdev->le_features[7]);
152         hci_dev_unlock(hdev);
153
154         return 0;
155 }
156
157 static int features_open(struct inode *inode, struct file *file)
158 {
159         return single_open(file, features_show, inode->i_private);
160 }
161
162 static const struct file_operations features_fops = {
163         .open           = features_open,
164         .read           = seq_read,
165         .llseek         = seq_lseek,
166         .release        = single_release,
167 };
168
169 static int blacklist_show(struct seq_file *f, void *p)
170 {
171         struct hci_dev *hdev = f->private;
172         struct bdaddr_list *b;
173
174         hci_dev_lock(hdev);
175         list_for_each_entry(b, &hdev->blacklist, list)
176                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
177         hci_dev_unlock(hdev);
178
179         return 0;
180 }
181
182 static int blacklist_open(struct inode *inode, struct file *file)
183 {
184         return single_open(file, blacklist_show, inode->i_private);
185 }
186
187 static const struct file_operations blacklist_fops = {
188         .open           = blacklist_open,
189         .read           = seq_read,
190         .llseek         = seq_lseek,
191         .release        = single_release,
192 };
193
194 static int uuids_show(struct seq_file *f, void *p)
195 {
196         struct hci_dev *hdev = f->private;
197         struct bt_uuid *uuid;
198
199         hci_dev_lock(hdev);
200         list_for_each_entry(uuid, &hdev->uuids, list) {
201                 u8 i, val[16];
202
203                 /* The Bluetooth UUID values are stored in big endian,
204                  * but with reversed byte order. So convert them into
205                  * the right order for the %pUb modifier.
206                  */
207                 for (i = 0; i < 16; i++)
208                         val[i] = uuid->uuid[15 - i];
209
210                 seq_printf(f, "%pUb\n", val);
211         }
212         hci_dev_unlock(hdev);
213
214         return 0;
215 }
216
217 static int uuids_open(struct inode *inode, struct file *file)
218 {
219         return single_open(file, uuids_show, inode->i_private);
220 }
221
222 static const struct file_operations uuids_fops = {
223         .open           = uuids_open,
224         .read           = seq_read,
225         .llseek         = seq_lseek,
226         .release        = single_release,
227 };
228
229 static int inquiry_cache_show(struct seq_file *f, void *p)
230 {
231         struct hci_dev *hdev = f->private;
232         struct discovery_state *cache = &hdev->discovery;
233         struct inquiry_entry *e;
234
235         hci_dev_lock(hdev);
236
237         list_for_each_entry(e, &cache->all, all) {
238                 struct inquiry_data *data = &e->data;
239                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
240                            &data->bdaddr,
241                            data->pscan_rep_mode, data->pscan_period_mode,
242                            data->pscan_mode, data->dev_class[2],
243                            data->dev_class[1], data->dev_class[0],
244                            __le16_to_cpu(data->clock_offset),
245                            data->rssi, data->ssp_mode, e->timestamp);
246         }
247
248         hci_dev_unlock(hdev);
249
250         return 0;
251 }
252
253 static int inquiry_cache_open(struct inode *inode, struct file *file)
254 {
255         return single_open(file, inquiry_cache_show, inode->i_private);
256 }
257
258 static const struct file_operations inquiry_cache_fops = {
259         .open           = inquiry_cache_open,
260         .read           = seq_read,
261         .llseek         = seq_lseek,
262         .release        = single_release,
263 };
264
265 static int link_keys_show(struct seq_file *f, void *ptr)
266 {
267         struct hci_dev *hdev = f->private;
268         struct list_head *p, *n;
269
270         hci_dev_lock(hdev);
271         list_for_each_safe(p, n, &hdev->link_keys) {
272                 struct link_key *key = list_entry(p, struct link_key, list);
273                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
274                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
275         }
276         hci_dev_unlock(hdev);
277
278         return 0;
279 }
280
281 static int link_keys_open(struct inode *inode, struct file *file)
282 {
283         return single_open(file, link_keys_show, inode->i_private);
284 }
285
286 static const struct file_operations link_keys_fops = {
287         .open           = link_keys_open,
288         .read           = seq_read,
289         .llseek         = seq_lseek,
290         .release        = single_release,
291 };
292
293 static int dev_class_show(struct seq_file *f, void *ptr)
294 {
295         struct hci_dev *hdev = f->private;
296
297         hci_dev_lock(hdev);
298         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
299                    hdev->dev_class[1], hdev->dev_class[0]);
300         hci_dev_unlock(hdev);
301
302         return 0;
303 }
304
305 static int dev_class_open(struct inode *inode, struct file *file)
306 {
307         return single_open(file, dev_class_show, inode->i_private);
308 }
309
310 static const struct file_operations dev_class_fops = {
311         .open           = dev_class_open,
312         .read           = seq_read,
313         .llseek         = seq_lseek,
314         .release        = single_release,
315 };
316
317 static int voice_setting_get(void *data, u64 *val)
318 {
319         struct hci_dev *hdev = data;
320
321         hci_dev_lock(hdev);
322         *val = hdev->voice_setting;
323         hci_dev_unlock(hdev);
324
325         return 0;
326 }
327
328 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
329                         NULL, "0x%4.4llx\n");
330
331 static int auto_accept_delay_set(void *data, u64 val)
332 {
333         struct hci_dev *hdev = data;
334
335         hci_dev_lock(hdev);
336         hdev->auto_accept_delay = val;
337         hci_dev_unlock(hdev);
338
339         return 0;
340 }
341
342 static int auto_accept_delay_get(void *data, u64 *val)
343 {
344         struct hci_dev *hdev = data;
345
346         hci_dev_lock(hdev);
347         *val = hdev->auto_accept_delay;
348         hci_dev_unlock(hdev);
349
350         return 0;
351 }
352
353 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
354                         auto_accept_delay_set, "%llu\n");
355
356 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
357                                      size_t count, loff_t *ppos)
358 {
359         struct hci_dev *hdev = file->private_data;
360         char buf[3];
361
362         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
363         buf[1] = '\n';
364         buf[2] = '\0';
365         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
366 }
367
368 static ssize_t force_sc_support_write(struct file *file,
369                                       const char __user *user_buf,
370                                       size_t count, loff_t *ppos)
371 {
372         struct hci_dev *hdev = file->private_data;
373         char buf[32];
374         size_t buf_size = min(count, (sizeof(buf)-1));
375         bool enable;
376
377         if (test_bit(HCI_UP, &hdev->flags))
378                 return -EBUSY;
379
380         if (copy_from_user(buf, user_buf, buf_size))
381                 return -EFAULT;
382
383         buf[buf_size] = '\0';
384         if (strtobool(buf, &enable))
385                 return -EINVAL;
386
387         if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
388                 return -EALREADY;
389
390         change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
391
392         return count;
393 }
394
395 static const struct file_operations force_sc_support_fops = {
396         .open           = simple_open,
397         .read           = force_sc_support_read,
398         .write          = force_sc_support_write,
399         .llseek         = default_llseek,
400 };
401
402 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
403                                  size_t count, loff_t *ppos)
404 {
405         struct hci_dev *hdev = file->private_data;
406         char buf[3];
407
408         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
409         buf[1] = '\n';
410         buf[2] = '\0';
411         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
412 }
413
414 static const struct file_operations sc_only_mode_fops = {
415         .open           = simple_open,
416         .read           = sc_only_mode_read,
417         .llseek         = default_llseek,
418 };
419
420 static int idle_timeout_set(void *data, u64 val)
421 {
422         struct hci_dev *hdev = data;
423
424         if (val != 0 && (val < 500 || val > 3600000))
425                 return -EINVAL;
426
427         hci_dev_lock(hdev);
428         hdev->idle_timeout = val;
429         hci_dev_unlock(hdev);
430
431         return 0;
432 }
433
434 static int idle_timeout_get(void *data, u64 *val)
435 {
436         struct hci_dev *hdev = data;
437
438         hci_dev_lock(hdev);
439         *val = hdev->idle_timeout;
440         hci_dev_unlock(hdev);
441
442         return 0;
443 }
444
445 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
446                         idle_timeout_set, "%llu\n");
447
448 static int rpa_timeout_set(void *data, u64 val)
449 {
450         struct hci_dev *hdev = data;
451
452         /* Require the RPA timeout to be at least 30 seconds and at most
453          * 24 hours.
454          */
455         if (val < 30 || val > (60 * 60 * 24))
456                 return -EINVAL;
457
458         hci_dev_lock(hdev);
459         hdev->rpa_timeout = val;
460         hci_dev_unlock(hdev);
461
462         return 0;
463 }
464
465 static int rpa_timeout_get(void *data, u64 *val)
466 {
467         struct hci_dev *hdev = data;
468
469         hci_dev_lock(hdev);
470         *val = hdev->rpa_timeout;
471         hci_dev_unlock(hdev);
472
473         return 0;
474 }
475
476 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
477                         rpa_timeout_set, "%llu\n");
478
479 static int sniff_min_interval_set(void *data, u64 val)
480 {
481         struct hci_dev *hdev = data;
482
483         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
484                 return -EINVAL;
485
486         hci_dev_lock(hdev);
487         hdev->sniff_min_interval = val;
488         hci_dev_unlock(hdev);
489
490         return 0;
491 }
492
493 static int sniff_min_interval_get(void *data, u64 *val)
494 {
495         struct hci_dev *hdev = data;
496
497         hci_dev_lock(hdev);
498         *val = hdev->sniff_min_interval;
499         hci_dev_unlock(hdev);
500
501         return 0;
502 }
503
504 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
505                         sniff_min_interval_set, "%llu\n");
506
507 static int sniff_max_interval_set(void *data, u64 val)
508 {
509         struct hci_dev *hdev = data;
510
511         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
512                 return -EINVAL;
513
514         hci_dev_lock(hdev);
515         hdev->sniff_max_interval = val;
516         hci_dev_unlock(hdev);
517
518         return 0;
519 }
520
521 static int sniff_max_interval_get(void *data, u64 *val)
522 {
523         struct hci_dev *hdev = data;
524
525         hci_dev_lock(hdev);
526         *val = hdev->sniff_max_interval;
527         hci_dev_unlock(hdev);
528
529         return 0;
530 }
531
532 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
533                         sniff_max_interval_set, "%llu\n");
534
535 static int conn_info_min_age_set(void *data, u64 val)
536 {
537         struct hci_dev *hdev = data;
538
539         if (val == 0 || val > hdev->conn_info_max_age)
540                 return -EINVAL;
541
542         hci_dev_lock(hdev);
543         hdev->conn_info_min_age = val;
544         hci_dev_unlock(hdev);
545
546         return 0;
547 }
548
549 static int conn_info_min_age_get(void *data, u64 *val)
550 {
551         struct hci_dev *hdev = data;
552
553         hci_dev_lock(hdev);
554         *val = hdev->conn_info_min_age;
555         hci_dev_unlock(hdev);
556
557         return 0;
558 }
559
560 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
561                         conn_info_min_age_set, "%llu\n");
562
563 static int conn_info_max_age_set(void *data, u64 val)
564 {
565         struct hci_dev *hdev = data;
566
567         if (val == 0 || val < hdev->conn_info_min_age)
568                 return -EINVAL;
569
570         hci_dev_lock(hdev);
571         hdev->conn_info_max_age = val;
572         hci_dev_unlock(hdev);
573
574         return 0;
575 }
576
577 static int conn_info_max_age_get(void *data, u64 *val)
578 {
579         struct hci_dev *hdev = data;
580
581         hci_dev_lock(hdev);
582         *val = hdev->conn_info_max_age;
583         hci_dev_unlock(hdev);
584
585         return 0;
586 }
587
588 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
589                         conn_info_max_age_set, "%llu\n");
590
591 static int identity_show(struct seq_file *f, void *p)
592 {
593         struct hci_dev *hdev = f->private;
594         bdaddr_t addr;
595         u8 addr_type;
596
597         hci_dev_lock(hdev);
598
599         hci_copy_identity_address(hdev, &addr, &addr_type);
600
601         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
602                    16, hdev->irk, &hdev->rpa);
603
604         hci_dev_unlock(hdev);
605
606         return 0;
607 }
608
609 static int identity_open(struct inode *inode, struct file *file)
610 {
611         return single_open(file, identity_show, inode->i_private);
612 }
613
614 static const struct file_operations identity_fops = {
615         .open           = identity_open,
616         .read           = seq_read,
617         .llseek         = seq_lseek,
618         .release        = single_release,
619 };
620
621 static int random_address_show(struct seq_file *f, void *p)
622 {
623         struct hci_dev *hdev = f->private;
624
625         hci_dev_lock(hdev);
626         seq_printf(f, "%pMR\n", &hdev->random_addr);
627         hci_dev_unlock(hdev);
628
629         return 0;
630 }
631
632 static int random_address_open(struct inode *inode, struct file *file)
633 {
634         return single_open(file, random_address_show, inode->i_private);
635 }
636
637 static const struct file_operations random_address_fops = {
638         .open           = random_address_open,
639         .read           = seq_read,
640         .llseek         = seq_lseek,
641         .release        = single_release,
642 };
643
644 static int static_address_show(struct seq_file *f, void *p)
645 {
646         struct hci_dev *hdev = f->private;
647
648         hci_dev_lock(hdev);
649         seq_printf(f, "%pMR\n", &hdev->static_addr);
650         hci_dev_unlock(hdev);
651
652         return 0;
653 }
654
655 static int static_address_open(struct inode *inode, struct file *file)
656 {
657         return single_open(file, static_address_show, inode->i_private);
658 }
659
660 static const struct file_operations static_address_fops = {
661         .open           = static_address_open,
662         .read           = seq_read,
663         .llseek         = seq_lseek,
664         .release        = single_release,
665 };
666
667 static ssize_t force_static_address_read(struct file *file,
668                                          char __user *user_buf,
669                                          size_t count, loff_t *ppos)
670 {
671         struct hci_dev *hdev = file->private_data;
672         char buf[3];
673
674         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
675         buf[1] = '\n';
676         buf[2] = '\0';
677         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
678 }
679
680 static ssize_t force_static_address_write(struct file *file,
681                                           const char __user *user_buf,
682                                           size_t count, loff_t *ppos)
683 {
684         struct hci_dev *hdev = file->private_data;
685         char buf[32];
686         size_t buf_size = min(count, (sizeof(buf)-1));
687         bool enable;
688
689         if (test_bit(HCI_UP, &hdev->flags))
690                 return -EBUSY;
691
692         if (copy_from_user(buf, user_buf, buf_size))
693                 return -EFAULT;
694
695         buf[buf_size] = '\0';
696         if (strtobool(buf, &enable))
697                 return -EINVAL;
698
699         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
700                 return -EALREADY;
701
702         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
703
704         return count;
705 }
706
707 static const struct file_operations force_static_address_fops = {
708         .open           = simple_open,
709         .read           = force_static_address_read,
710         .write          = force_static_address_write,
711         .llseek         = default_llseek,
712 };
713
714 static int white_list_show(struct seq_file *f, void *ptr)
715 {
716         struct hci_dev *hdev = f->private;
717         struct bdaddr_list *b;
718
719         hci_dev_lock(hdev);
720         list_for_each_entry(b, &hdev->le_white_list, list)
721                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
722         hci_dev_unlock(hdev);
723
724         return 0;
725 }
726
727 static int white_list_open(struct inode *inode, struct file *file)
728 {
729         return single_open(file, white_list_show, inode->i_private);
730 }
731
732 static const struct file_operations white_list_fops = {
733         .open           = white_list_open,
734         .read           = seq_read,
735         .llseek         = seq_lseek,
736         .release        = single_release,
737 };
738
739 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
740 {
741         struct hci_dev *hdev = f->private;
742         struct list_head *p, *n;
743
744         hci_dev_lock(hdev);
745         list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
746                 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
747                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
748                            &irk->bdaddr, irk->addr_type,
749                            16, irk->val, &irk->rpa);
750         }
751         hci_dev_unlock(hdev);
752
753         return 0;
754 }
755
756 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
757 {
758         return single_open(file, identity_resolving_keys_show,
759                            inode->i_private);
760 }
761
762 static const struct file_operations identity_resolving_keys_fops = {
763         .open           = identity_resolving_keys_open,
764         .read           = seq_read,
765         .llseek         = seq_lseek,
766         .release        = single_release,
767 };
768
769 static int long_term_keys_show(struct seq_file *f, void *ptr)
770 {
771         struct hci_dev *hdev = f->private;
772         struct list_head *p, *n;
773
774         hci_dev_lock(hdev);
775         list_for_each_safe(p, n, &hdev->long_term_keys) {
776                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
777                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
778                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
779                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
780                            __le64_to_cpu(ltk->rand), 16, ltk->val);
781         }
782         hci_dev_unlock(hdev);
783
784         return 0;
785 }
786
787 static int long_term_keys_open(struct inode *inode, struct file *file)
788 {
789         return single_open(file, long_term_keys_show, inode->i_private);
790 }
791
792 static const struct file_operations long_term_keys_fops = {
793         .open           = long_term_keys_open,
794         .read           = seq_read,
795         .llseek         = seq_lseek,
796         .release        = single_release,
797 };
798
799 static int conn_min_interval_set(void *data, u64 val)
800 {
801         struct hci_dev *hdev = data;
802
803         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
804                 return -EINVAL;
805
806         hci_dev_lock(hdev);
807         hdev->le_conn_min_interval = val;
808         hci_dev_unlock(hdev);
809
810         return 0;
811 }
812
813 static int conn_min_interval_get(void *data, u64 *val)
814 {
815         struct hci_dev *hdev = data;
816
817         hci_dev_lock(hdev);
818         *val = hdev->le_conn_min_interval;
819         hci_dev_unlock(hdev);
820
821         return 0;
822 }
823
824 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
825                         conn_min_interval_set, "%llu\n");
826
827 static int conn_max_interval_set(void *data, u64 val)
828 {
829         struct hci_dev *hdev = data;
830
831         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
832                 return -EINVAL;
833
834         hci_dev_lock(hdev);
835         hdev->le_conn_max_interval = val;
836         hci_dev_unlock(hdev);
837
838         return 0;
839 }
840
841 static int conn_max_interval_get(void *data, u64 *val)
842 {
843         struct hci_dev *hdev = data;
844
845         hci_dev_lock(hdev);
846         *val = hdev->le_conn_max_interval;
847         hci_dev_unlock(hdev);
848
849         return 0;
850 }
851
852 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
853                         conn_max_interval_set, "%llu\n");
854
855 static int conn_latency_set(void *data, u64 val)
856 {
857         struct hci_dev *hdev = data;
858
859         if (val > 0x01f3)
860                 return -EINVAL;
861
862         hci_dev_lock(hdev);
863         hdev->le_conn_latency = val;
864         hci_dev_unlock(hdev);
865
866         return 0;
867 }
868
869 static int conn_latency_get(void *data, u64 *val)
870 {
871         struct hci_dev *hdev = data;
872
873         hci_dev_lock(hdev);
874         *val = hdev->le_conn_latency;
875         hci_dev_unlock(hdev);
876
877         return 0;
878 }
879
880 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
881                         conn_latency_set, "%llu\n");
882
883 static int supervision_timeout_set(void *data, u64 val)
884 {
885         struct hci_dev *hdev = data;
886
887         if (val < 0x000a || val > 0x0c80)
888                 return -EINVAL;
889
890         hci_dev_lock(hdev);
891         hdev->le_supv_timeout = val;
892         hci_dev_unlock(hdev);
893
894         return 0;
895 }
896
897 static int supervision_timeout_get(void *data, u64 *val)
898 {
899         struct hci_dev *hdev = data;
900
901         hci_dev_lock(hdev);
902         *val = hdev->le_supv_timeout;
903         hci_dev_unlock(hdev);
904
905         return 0;
906 }
907
908 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
909                         supervision_timeout_set, "%llu\n");
910
911 static int adv_channel_map_set(void *data, u64 val)
912 {
913         struct hci_dev *hdev = data;
914
915         if (val < 0x01 || val > 0x07)
916                 return -EINVAL;
917
918         hci_dev_lock(hdev);
919         hdev->le_adv_channel_map = val;
920         hci_dev_unlock(hdev);
921
922         return 0;
923 }
924
925 static int adv_channel_map_get(void *data, u64 *val)
926 {
927         struct hci_dev *hdev = data;
928
929         hci_dev_lock(hdev);
930         *val = hdev->le_adv_channel_map;
931         hci_dev_unlock(hdev);
932
933         return 0;
934 }
935
936 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
937                         adv_channel_map_set, "%llu\n");
938
939 static int device_list_show(struct seq_file *f, void *ptr)
940 {
941         struct hci_dev *hdev = f->private;
942         struct hci_conn_params *p;
943
944         hci_dev_lock(hdev);
945         list_for_each_entry(p, &hdev->le_conn_params, list) {
946                 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
947                            p->auto_connect);
948         }
949         hci_dev_unlock(hdev);
950
951         return 0;
952 }
953
954 static int device_list_open(struct inode *inode, struct file *file)
955 {
956         return single_open(file, device_list_show, inode->i_private);
957 }
958
959 static const struct file_operations device_list_fops = {
960         .open           = device_list_open,
961         .read           = seq_read,
962         .llseek         = seq_lseek,
963         .release        = single_release,
964 };
965
966 /* ---- HCI requests ---- */
967
968 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
969 {
970         BT_DBG("%s result 0x%2.2x", hdev->name, result);
971
972         if (hdev->req_status == HCI_REQ_PEND) {
973                 hdev->req_result = result;
974                 hdev->req_status = HCI_REQ_DONE;
975                 wake_up_interruptible(&hdev->req_wait_q);
976         }
977 }
978
979 static void hci_req_cancel(struct hci_dev *hdev, int err)
980 {
981         BT_DBG("%s err 0x%2.2x", hdev->name, err);
982
983         if (hdev->req_status == HCI_REQ_PEND) {
984                 hdev->req_result = err;
985                 hdev->req_status = HCI_REQ_CANCELED;
986                 wake_up_interruptible(&hdev->req_wait_q);
987         }
988 }
989
990 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
991                                             u8 event)
992 {
993         struct hci_ev_cmd_complete *ev;
994         struct hci_event_hdr *hdr;
995         struct sk_buff *skb;
996
997         hci_dev_lock(hdev);
998
999         skb = hdev->recv_evt;
1000         hdev->recv_evt = NULL;
1001
1002         hci_dev_unlock(hdev);
1003
1004         if (!skb)
1005                 return ERR_PTR(-ENODATA);
1006
1007         if (skb->len < sizeof(*hdr)) {
1008                 BT_ERR("Too short HCI event");
1009                 goto failed;
1010         }
1011
1012         hdr = (void *) skb->data;
1013         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1014
1015         if (event) {
1016                 if (hdr->evt != event)
1017                         goto failed;
1018                 return skb;
1019         }
1020
1021         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1022                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1023                 goto failed;
1024         }
1025
1026         if (skb->len < sizeof(*ev)) {
1027                 BT_ERR("Too short cmd_complete event");
1028                 goto failed;
1029         }
1030
1031         ev = (void *) skb->data;
1032         skb_pull(skb, sizeof(*ev));
1033
1034         if (opcode == __le16_to_cpu(ev->opcode))
1035                 return skb;
1036
1037         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1038                __le16_to_cpu(ev->opcode));
1039
1040 failed:
1041         kfree_skb(skb);
1042         return ERR_PTR(-ENODATA);
1043 }
1044
1045 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1046                                   const void *param, u8 event, u32 timeout)
1047 {
1048         DECLARE_WAITQUEUE(wait, current);
1049         struct hci_request req;
1050         int err = 0;
1051
1052         BT_DBG("%s", hdev->name);
1053
1054         hci_req_init(&req, hdev);
1055
1056         hci_req_add_ev(&req, opcode, plen, param, event);
1057
1058         hdev->req_status = HCI_REQ_PEND;
1059
1060         err = hci_req_run(&req, hci_req_sync_complete);
1061         if (err < 0)
1062                 return ERR_PTR(err);
1063
1064         add_wait_queue(&hdev->req_wait_q, &wait);
1065         set_current_state(TASK_INTERRUPTIBLE);
1066
1067         schedule_timeout(timeout);
1068
1069         remove_wait_queue(&hdev->req_wait_q, &wait);
1070
1071         if (signal_pending(current))
1072                 return ERR_PTR(-EINTR);
1073
1074         switch (hdev->req_status) {
1075         case HCI_REQ_DONE:
1076                 err = -bt_to_errno(hdev->req_result);
1077                 break;
1078
1079         case HCI_REQ_CANCELED:
1080                 err = -hdev->req_result;
1081                 break;
1082
1083         default:
1084                 err = -ETIMEDOUT;
1085                 break;
1086         }
1087
1088         hdev->req_status = hdev->req_result = 0;
1089
1090         BT_DBG("%s end: err %d", hdev->name, err);
1091
1092         if (err < 0)
1093                 return ERR_PTR(err);
1094
1095         return hci_get_cmd_complete(hdev, opcode, event);
1096 }
1097 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1098
1099 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1100                                const void *param, u32 timeout)
1101 {
1102         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1103 }
1104 EXPORT_SYMBOL(__hci_cmd_sync);
1105
1106 /* Execute request and wait for completion. */
1107 static int __hci_req_sync(struct hci_dev *hdev,
1108                           void (*func)(struct hci_request *req,
1109                                       unsigned long opt),
1110                           unsigned long opt, __u32 timeout)
1111 {
1112         struct hci_request req;
1113         DECLARE_WAITQUEUE(wait, current);
1114         int err = 0;
1115
1116         BT_DBG("%s start", hdev->name);
1117
1118         hci_req_init(&req, hdev);
1119
1120         hdev->req_status = HCI_REQ_PEND;
1121
1122         func(&req, opt);
1123
1124         err = hci_req_run(&req, hci_req_sync_complete);
1125         if (err < 0) {
1126                 hdev->req_status = 0;
1127
1128                 /* ENODATA means the HCI request command queue is empty.
1129                  * This can happen when a request with conditionals doesn't
1130                  * trigger any commands to be sent. This is normal behavior
1131                  * and should not trigger an error return.
1132                  */
1133                 if (err == -ENODATA)
1134                         return 0;
1135
1136                 return err;
1137         }
1138
1139         add_wait_queue(&hdev->req_wait_q, &wait);
1140         set_current_state(TASK_INTERRUPTIBLE);
1141
1142         schedule_timeout(timeout);
1143
1144         remove_wait_queue(&hdev->req_wait_q, &wait);
1145
1146         if (signal_pending(current))
1147                 return -EINTR;
1148
1149         switch (hdev->req_status) {
1150         case HCI_REQ_DONE:
1151                 err = -bt_to_errno(hdev->req_result);
1152                 break;
1153
1154         case HCI_REQ_CANCELED:
1155                 err = -hdev->req_result;
1156                 break;
1157
1158         default:
1159                 err = -ETIMEDOUT;
1160                 break;
1161         }
1162
1163         hdev->req_status = hdev->req_result = 0;
1164
1165         BT_DBG("%s end: err %d", hdev->name, err);
1166
1167         return err;
1168 }
1169
1170 static int hci_req_sync(struct hci_dev *hdev,
1171                         void (*req)(struct hci_request *req,
1172                                     unsigned long opt),
1173                         unsigned long opt, __u32 timeout)
1174 {
1175         int ret;
1176
1177         if (!test_bit(HCI_UP, &hdev->flags))
1178                 return -ENETDOWN;
1179
1180         /* Serialize all requests */
1181         hci_req_lock(hdev);
1182         ret = __hci_req_sync(hdev, req, opt, timeout);
1183         hci_req_unlock(hdev);
1184
1185         return ret;
1186 }
1187
1188 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1189 {
1190         BT_DBG("%s %ld", req->hdev->name, opt);
1191
1192         /* Reset device */
1193         set_bit(HCI_RESET, &req->hdev->flags);
1194         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1195 }
1196
1197 static void bredr_init(struct hci_request *req)
1198 {
1199         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1200
1201         /* Read Local Supported Features */
1202         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1203
1204         /* Read Local Version */
1205         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1206
1207         /* Read BD Address */
1208         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1209 }
1210
1211 static void amp_init(struct hci_request *req)
1212 {
1213         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1214
1215         /* Read Local Version */
1216         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1217
1218         /* Read Local Supported Commands */
1219         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1220
1221         /* Read Local Supported Features */
1222         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1223
1224         /* Read Local AMP Info */
1225         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1226
1227         /* Read Data Blk size */
1228         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1229
1230         /* Read Flow Control Mode */
1231         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1232
1233         /* Read Location Data */
1234         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1235 }
1236
1237 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1238 {
1239         struct hci_dev *hdev = req->hdev;
1240
1241         BT_DBG("%s %ld", hdev->name, opt);
1242
1243         /* Reset */
1244         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1245                 hci_reset_req(req, 0);
1246
1247         switch (hdev->dev_type) {
1248         case HCI_BREDR:
1249                 bredr_init(req);
1250                 break;
1251
1252         case HCI_AMP:
1253                 amp_init(req);
1254                 break;
1255
1256         default:
1257                 BT_ERR("Unknown device type %d", hdev->dev_type);
1258                 break;
1259         }
1260 }
1261
1262 static void bredr_setup(struct hci_request *req)
1263 {
1264         struct hci_dev *hdev = req->hdev;
1265
1266         __le16 param;
1267         __u8 flt_type;
1268
1269         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1270         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1271
1272         /* Read Class of Device */
1273         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1274
1275         /* Read Local Name */
1276         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1277
1278         /* Read Voice Setting */
1279         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1280
1281         /* Read Number of Supported IAC */
1282         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1283
1284         /* Read Current IAC LAP */
1285         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1286
1287         /* Clear Event Filters */
1288         flt_type = HCI_FLT_CLEAR_ALL;
1289         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1290
1291         /* Connection accept timeout ~20 secs */
1292         param = cpu_to_le16(0x7d00);
1293         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1294
1295         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1296          * but it does not support page scan related HCI commands.
1297          */
1298         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1299                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1300                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1301         }
1302 }
1303
1304 static void le_setup(struct hci_request *req)
1305 {
1306         struct hci_dev *hdev = req->hdev;
1307
1308         /* Read LE Buffer Size */
1309         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1310
1311         /* Read LE Local Supported Features */
1312         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1313
1314         /* Read LE Supported States */
1315         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1316
1317         /* Read LE Advertising Channel TX Power */
1318         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1319
1320         /* Read LE White List Size */
1321         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1322
1323         /* Clear LE White List */
1324         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1325
1326         /* LE-only controllers have LE implicitly enabled */
1327         if (!lmp_bredr_capable(hdev))
1328                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1329 }
1330
1331 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1332 {
1333         if (lmp_ext_inq_capable(hdev))
1334                 return 0x02;
1335
1336         if (lmp_inq_rssi_capable(hdev))
1337                 return 0x01;
1338
1339         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1340             hdev->lmp_subver == 0x0757)
1341                 return 0x01;
1342
1343         if (hdev->manufacturer == 15) {
1344                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1345                         return 0x01;
1346                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1347                         return 0x01;
1348                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1349                         return 0x01;
1350         }
1351
1352         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1353             hdev->lmp_subver == 0x1805)
1354                 return 0x01;
1355
1356         return 0x00;
1357 }
1358
1359 static void hci_setup_inquiry_mode(struct hci_request *req)
1360 {
1361         u8 mode;
1362
1363         mode = hci_get_inquiry_mode(req->hdev);
1364
1365         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1366 }
1367
1368 static void hci_setup_event_mask(struct hci_request *req)
1369 {
1370         struct hci_dev *hdev = req->hdev;
1371
1372         /* The second byte is 0xff instead of 0x9f (two reserved bits
1373          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1374          * command otherwise.
1375          */
1376         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1377
1378         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1379          * any event mask for pre 1.2 devices.
1380          */
1381         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1382                 return;
1383
1384         if (lmp_bredr_capable(hdev)) {
1385                 events[4] |= 0x01; /* Flow Specification Complete */
1386                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1387                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1388                 events[5] |= 0x08; /* Synchronous Connection Complete */
1389                 events[5] |= 0x10; /* Synchronous Connection Changed */
1390         } else {
1391                 /* Use a different default for LE-only devices */
1392                 memset(events, 0, sizeof(events));
1393                 events[0] |= 0x10; /* Disconnection Complete */
1394                 events[0] |= 0x80; /* Encryption Change */
1395                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1396                 events[1] |= 0x20; /* Command Complete */
1397                 events[1] |= 0x40; /* Command Status */
1398                 events[1] |= 0x80; /* Hardware Error */
1399                 events[2] |= 0x04; /* Number of Completed Packets */
1400                 events[3] |= 0x02; /* Data Buffer Overflow */
1401                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1402         }
1403
1404         if (lmp_inq_rssi_capable(hdev))
1405                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1406
1407         if (lmp_sniffsubr_capable(hdev))
1408                 events[5] |= 0x20; /* Sniff Subrating */
1409
1410         if (lmp_pause_enc_capable(hdev))
1411                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1412
1413         if (lmp_ext_inq_capable(hdev))
1414                 events[5] |= 0x40; /* Extended Inquiry Result */
1415
1416         if (lmp_no_flush_capable(hdev))
1417                 events[7] |= 0x01; /* Enhanced Flush Complete */
1418
1419         if (lmp_lsto_capable(hdev))
1420                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1421
1422         if (lmp_ssp_capable(hdev)) {
1423                 events[6] |= 0x01;      /* IO Capability Request */
1424                 events[6] |= 0x02;      /* IO Capability Response */
1425                 events[6] |= 0x04;      /* User Confirmation Request */
1426                 events[6] |= 0x08;      /* User Passkey Request */
1427                 events[6] |= 0x10;      /* Remote OOB Data Request */
1428                 events[6] |= 0x20;      /* Simple Pairing Complete */
1429                 events[7] |= 0x04;      /* User Passkey Notification */
1430                 events[7] |= 0x08;      /* Keypress Notification */
1431                 events[7] |= 0x10;      /* Remote Host Supported
1432                                          * Features Notification
1433                                          */
1434         }
1435
1436         if (lmp_le_capable(hdev))
1437                 events[7] |= 0x20;      /* LE Meta-Event */
1438
1439         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1440 }
1441
1442 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1443 {
1444         struct hci_dev *hdev = req->hdev;
1445
1446         if (lmp_bredr_capable(hdev))
1447                 bredr_setup(req);
1448         else
1449                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1450
1451         if (lmp_le_capable(hdev))
1452                 le_setup(req);
1453
1454         hci_setup_event_mask(req);
1455
1456         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1457          * local supported commands HCI command.
1458          */
1459         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1460                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1461
1462         if (lmp_ssp_capable(hdev)) {
1463                 /* When SSP is available, then the host features page
1464                  * should also be available as well. However some
1465                  * controllers list the max_page as 0 as long as SSP
1466                  * has not been enabled. To achieve proper debugging
1467                  * output, force the minimum max_page to 1 at least.
1468                  */
1469                 hdev->max_page = 0x01;
1470
1471                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1472                         u8 mode = 0x01;
1473                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1474                                     sizeof(mode), &mode);
1475                 } else {
1476                         struct hci_cp_write_eir cp;
1477
1478                         memset(hdev->eir, 0, sizeof(hdev->eir));
1479                         memset(&cp, 0, sizeof(cp));
1480
1481                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1482                 }
1483         }
1484
1485         if (lmp_inq_rssi_capable(hdev))
1486                 hci_setup_inquiry_mode(req);
1487
1488         if (lmp_inq_tx_pwr_capable(hdev))
1489                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1490
1491         if (lmp_ext_feat_capable(hdev)) {
1492                 struct hci_cp_read_local_ext_features cp;
1493
1494                 cp.page = 0x01;
1495                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1496                             sizeof(cp), &cp);
1497         }
1498
1499         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1500                 u8 enable = 1;
1501                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1502                             &enable);
1503         }
1504 }
1505
1506 static void hci_setup_link_policy(struct hci_request *req)
1507 {
1508         struct hci_dev *hdev = req->hdev;
1509         struct hci_cp_write_def_link_policy cp;
1510         u16 link_policy = 0;
1511
1512         if (lmp_rswitch_capable(hdev))
1513                 link_policy |= HCI_LP_RSWITCH;
1514         if (lmp_hold_capable(hdev))
1515                 link_policy |= HCI_LP_HOLD;
1516         if (lmp_sniff_capable(hdev))
1517                 link_policy |= HCI_LP_SNIFF;
1518         if (lmp_park_capable(hdev))
1519                 link_policy |= HCI_LP_PARK;
1520
1521         cp.policy = cpu_to_le16(link_policy);
1522         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1523 }
1524
1525 static void hci_set_le_support(struct hci_request *req)
1526 {
1527         struct hci_dev *hdev = req->hdev;
1528         struct hci_cp_write_le_host_supported cp;
1529
1530         /* LE-only devices do not support explicit enablement */
1531         if (!lmp_bredr_capable(hdev))
1532                 return;
1533
1534         memset(&cp, 0, sizeof(cp));
1535
1536         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1537                 cp.le = 0x01;
1538                 cp.simul = lmp_le_br_capable(hdev);
1539         }
1540
1541         if (cp.le != lmp_host_le_capable(hdev))
1542                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1543                             &cp);
1544 }
1545
1546 static void hci_set_event_mask_page_2(struct hci_request *req)
1547 {
1548         struct hci_dev *hdev = req->hdev;
1549         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1550
1551         /* If Connectionless Slave Broadcast master role is supported
1552          * enable all necessary events for it.
1553          */
1554         if (lmp_csb_master_capable(hdev)) {
1555                 events[1] |= 0x40;      /* Triggered Clock Capture */
1556                 events[1] |= 0x80;      /* Synchronization Train Complete */
1557                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1558                 events[2] |= 0x20;      /* CSB Channel Map Change */
1559         }
1560
1561         /* If Connectionless Slave Broadcast slave role is supported
1562          * enable all necessary events for it.
1563          */
1564         if (lmp_csb_slave_capable(hdev)) {
1565                 events[2] |= 0x01;      /* Synchronization Train Received */
1566                 events[2] |= 0x02;      /* CSB Receive */
1567                 events[2] |= 0x04;      /* CSB Timeout */
1568                 events[2] |= 0x08;      /* Truncated Page Complete */
1569         }
1570
1571         /* Enable Authenticated Payload Timeout Expired event if supported */
1572         if (lmp_ping_capable(hdev))
1573                 events[2] |= 0x80;
1574
1575         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1576 }
1577
1578 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1579 {
1580         struct hci_dev *hdev = req->hdev;
1581         u8 p;
1582
1583         /* Some Broadcom based Bluetooth controllers do not support the
1584          * Delete Stored Link Key command. They are clearly indicating its
1585          * absence in the bit mask of supported commands.
1586          *
1587          * Check the supported commands and only if the the command is marked
1588          * as supported send it. If not supported assume that the controller
1589          * does not have actual support for stored link keys which makes this
1590          * command redundant anyway.
1591          *
1592          * Some controllers indicate that they support handling deleting
1593          * stored link keys, but they don't. The quirk lets a driver
1594          * just disable this command.
1595          */
1596         if (hdev->commands[6] & 0x80 &&
1597             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1598                 struct hci_cp_delete_stored_link_key cp;
1599
1600                 bacpy(&cp.bdaddr, BDADDR_ANY);
1601                 cp.delete_all = 0x01;
1602                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1603                             sizeof(cp), &cp);
1604         }
1605
1606         if (hdev->commands[5] & 0x10)
1607                 hci_setup_link_policy(req);
1608
1609         if (lmp_le_capable(hdev)) {
1610                 u8 events[8];
1611
1612                 memset(events, 0, sizeof(events));
1613                 events[0] = 0x1f;
1614
1615                 /* If controller supports the Connection Parameters Request
1616                  * Link Layer Procedure, enable the corresponding event.
1617                  */
1618                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1619                         events[0] |= 0x20;      /* LE Remote Connection
1620                                                  * Parameter Request
1621                                                  */
1622
1623                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1624                             events);
1625
1626                 hci_set_le_support(req);
1627         }
1628
1629         /* Read features beyond page 1 if available */
1630         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1631                 struct hci_cp_read_local_ext_features cp;
1632
1633                 cp.page = p;
1634                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1635                             sizeof(cp), &cp);
1636         }
1637 }
1638
1639 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1640 {
1641         struct hci_dev *hdev = req->hdev;
1642
1643         /* Set event mask page 2 if the HCI command for it is supported */
1644         if (hdev->commands[22] & 0x04)
1645                 hci_set_event_mask_page_2(req);
1646
1647         /* Check for Synchronization Train support */
1648         if (lmp_sync_train_capable(hdev))
1649                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1650
1651         /* Enable Secure Connections if supported and configured */
1652         if ((lmp_sc_capable(hdev) ||
1653              test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1654             test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1655                 u8 support = 0x01;
1656                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1657                             sizeof(support), &support);
1658         }
1659 }
1660
1661 static int __hci_init(struct hci_dev *hdev)
1662 {
1663         int err;
1664
1665         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1666         if (err < 0)
1667                 return err;
1668
1669         /* The Device Under Test (DUT) mode is special and available for
1670          * all controller types. So just create it early on.
1671          */
1672         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1673                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1674                                     &dut_mode_fops);
1675         }
1676
1677         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1678          * BR/EDR/LE type controllers. AMP controllers only need the
1679          * first stage init.
1680          */
1681         if (hdev->dev_type != HCI_BREDR)
1682                 return 0;
1683
1684         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1685         if (err < 0)
1686                 return err;
1687
1688         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1689         if (err < 0)
1690                 return err;
1691
1692         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1693         if (err < 0)
1694                 return err;
1695
1696         /* Only create debugfs entries during the initial setup
1697          * phase and not every time the controller gets powered on.
1698          */
1699         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1700                 return 0;
1701
1702         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1703                             &features_fops);
1704         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1705                            &hdev->manufacturer);
1706         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1707         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1708         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1709                             &blacklist_fops);
1710         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1711
1712         debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1713                             &conn_info_min_age_fops);
1714         debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1715                             &conn_info_max_age_fops);
1716
1717         if (lmp_bredr_capable(hdev)) {
1718                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1719                                     hdev, &inquiry_cache_fops);
1720                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1721                                     hdev, &link_keys_fops);
1722                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1723                                     hdev, &dev_class_fops);
1724                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1725                                     hdev, &voice_setting_fops);
1726         }
1727
1728         if (lmp_ssp_capable(hdev)) {
1729                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1730                                     hdev, &auto_accept_delay_fops);
1731                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1732                                     hdev, &force_sc_support_fops);
1733                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1734                                     hdev, &sc_only_mode_fops);
1735         }
1736
1737         if (lmp_sniff_capable(hdev)) {
1738                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1739                                     hdev, &idle_timeout_fops);
1740                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1741                                     hdev, &sniff_min_interval_fops);
1742                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1743                                     hdev, &sniff_max_interval_fops);
1744         }
1745
1746         if (lmp_le_capable(hdev)) {
1747                 debugfs_create_file("identity", 0400, hdev->debugfs,
1748                                     hdev, &identity_fops);
1749                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1750                                     hdev, &rpa_timeout_fops);
1751                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1752                                     hdev, &random_address_fops);
1753                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1754                                     hdev, &static_address_fops);
1755
1756                 /* For controllers with a public address, provide a debug
1757                  * option to force the usage of the configured static
1758                  * address. By default the public address is used.
1759                  */
1760                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1761                         debugfs_create_file("force_static_address", 0644,
1762                                             hdev->debugfs, hdev,
1763                                             &force_static_address_fops);
1764
1765                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1766                                   &hdev->le_white_list_size);
1767                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1768                                     &white_list_fops);
1769                 debugfs_create_file("identity_resolving_keys", 0400,
1770                                     hdev->debugfs, hdev,
1771                                     &identity_resolving_keys_fops);
1772                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1773                                     hdev, &long_term_keys_fops);
1774                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1775                                     hdev, &conn_min_interval_fops);
1776                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1777                                     hdev, &conn_max_interval_fops);
1778                 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1779                                     hdev, &conn_latency_fops);
1780                 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1781                                     hdev, &supervision_timeout_fops);
1782                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1783                                     hdev, &adv_channel_map_fops);
1784                 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1785                                     &device_list_fops);
1786                 debugfs_create_u16("discov_interleaved_timeout", 0644,
1787                                    hdev->debugfs,
1788                                    &hdev->discov_interleaved_timeout);
1789         }
1790
1791         return 0;
1792 }
1793
1794 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1795 {
1796         __u8 scan = opt;
1797
1798         BT_DBG("%s %x", req->hdev->name, scan);
1799
1800         /* Inquiry and Page scans */
1801         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1802 }
1803
1804 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1805 {
1806         __u8 auth = opt;
1807
1808         BT_DBG("%s %x", req->hdev->name, auth);
1809
1810         /* Authentication */
1811         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1812 }
1813
1814 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1815 {
1816         __u8 encrypt = opt;
1817
1818         BT_DBG("%s %x", req->hdev->name, encrypt);
1819
1820         /* Encryption */
1821         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1822 }
1823
1824 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1825 {
1826         __le16 policy = cpu_to_le16(opt);
1827
1828         BT_DBG("%s %x", req->hdev->name, policy);
1829
1830         /* Default link policy */
1831         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1832 }
1833
1834 /* Get HCI device by index.
1835  * Device is held on return. */
1836 struct hci_dev *hci_dev_get(int index)
1837 {
1838         struct hci_dev *hdev = NULL, *d;
1839
1840         BT_DBG("%d", index);
1841
1842         if (index < 0)
1843                 return NULL;
1844
1845         read_lock(&hci_dev_list_lock);
1846         list_for_each_entry(d, &hci_dev_list, list) {
1847                 if (d->id == index) {
1848                         hdev = hci_dev_hold(d);
1849                         break;
1850                 }
1851         }
1852         read_unlock(&hci_dev_list_lock);
1853         return hdev;
1854 }
1855
1856 /* ---- Inquiry support ---- */
1857
1858 bool hci_discovery_active(struct hci_dev *hdev)
1859 {
1860         struct discovery_state *discov = &hdev->discovery;
1861
1862         switch (discov->state) {
1863         case DISCOVERY_FINDING:
1864         case DISCOVERY_RESOLVING:
1865                 return true;
1866
1867         default:
1868                 return false;
1869         }
1870 }
1871
1872 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1873 {
1874         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1875
1876         if (hdev->discovery.state == state)
1877                 return;
1878
1879         switch (state) {
1880         case DISCOVERY_STOPPED:
1881                 hci_update_background_scan(hdev);
1882
1883                 if (hdev->discovery.state != DISCOVERY_STARTING)
1884                         mgmt_discovering(hdev, 0);
1885                 break;
1886         case DISCOVERY_STARTING:
1887                 break;
1888         case DISCOVERY_FINDING:
1889                 mgmt_discovering(hdev, 1);
1890                 break;
1891         case DISCOVERY_RESOLVING:
1892                 break;
1893         case DISCOVERY_STOPPING:
1894                 break;
1895         }
1896
1897         hdev->discovery.state = state;
1898 }
1899
1900 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1901 {
1902         struct discovery_state *cache = &hdev->discovery;
1903         struct inquiry_entry *p, *n;
1904
1905         list_for_each_entry_safe(p, n, &cache->all, all) {
1906                 list_del(&p->all);
1907                 kfree(p);
1908         }
1909
1910         INIT_LIST_HEAD(&cache->unknown);
1911         INIT_LIST_HEAD(&cache->resolve);
1912 }
1913
1914 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1915                                                bdaddr_t *bdaddr)
1916 {
1917         struct discovery_state *cache = &hdev->discovery;
1918         struct inquiry_entry *e;
1919
1920         BT_DBG("cache %p, %pMR", cache, bdaddr);
1921
1922         list_for_each_entry(e, &cache->all, all) {
1923                 if (!bacmp(&e->data.bdaddr, bdaddr))
1924                         return e;
1925         }
1926
1927         return NULL;
1928 }
1929
1930 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1931                                                        bdaddr_t *bdaddr)
1932 {
1933         struct discovery_state *cache = &hdev->discovery;
1934         struct inquiry_entry *e;
1935
1936         BT_DBG("cache %p, %pMR", cache, bdaddr);
1937
1938         list_for_each_entry(e, &cache->unknown, list) {
1939                 if (!bacmp(&e->data.bdaddr, bdaddr))
1940                         return e;
1941         }
1942
1943         return NULL;
1944 }
1945
1946 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1947                                                        bdaddr_t *bdaddr,
1948                                                        int state)
1949 {
1950         struct discovery_state *cache = &hdev->discovery;
1951         struct inquiry_entry *e;
1952
1953         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1954
1955         list_for_each_entry(e, &cache->resolve, list) {
1956                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1957                         return e;
1958                 if (!bacmp(&e->data.bdaddr, bdaddr))
1959                         return e;
1960         }
1961
1962         return NULL;
1963 }
1964
1965 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1966                                       struct inquiry_entry *ie)
1967 {
1968         struct discovery_state *cache = &hdev->discovery;
1969         struct list_head *pos = &cache->resolve;
1970         struct inquiry_entry *p;
1971
1972         list_del(&ie->list);
1973
1974         list_for_each_entry(p, &cache->resolve, list) {
1975                 if (p->name_state != NAME_PENDING &&
1976                     abs(p->data.rssi) >= abs(ie->data.rssi))
1977                         break;
1978                 pos = &p->list;
1979         }
1980
1981         list_add(&ie->list, pos);
1982 }
1983
1984 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1985                              bool name_known)
1986 {
1987         struct discovery_state *cache = &hdev->discovery;
1988         struct inquiry_entry *ie;
1989         u32 flags = 0;
1990
1991         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1992
1993         hci_remove_remote_oob_data(hdev, &data->bdaddr);
1994
1995         if (!data->ssp_mode)
1996                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1997
1998         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1999         if (ie) {
2000                 if (!ie->data.ssp_mode)
2001                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2002
2003                 if (ie->name_state == NAME_NEEDED &&
2004                     data->rssi != ie->data.rssi) {
2005                         ie->data.rssi = data->rssi;
2006                         hci_inquiry_cache_update_resolve(hdev, ie);
2007                 }
2008
2009                 goto update;
2010         }
2011
2012         /* Entry not in the cache. Add new one. */
2013         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2014         if (!ie) {
2015                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2016                 goto done;
2017         }
2018
2019         list_add(&ie->all, &cache->all);
2020
2021         if (name_known) {
2022                 ie->name_state = NAME_KNOWN;
2023         } else {
2024                 ie->name_state = NAME_NOT_KNOWN;
2025                 list_add(&ie->list, &cache->unknown);
2026         }
2027
2028 update:
2029         if (name_known && ie->name_state != NAME_KNOWN &&
2030             ie->name_state != NAME_PENDING) {
2031                 ie->name_state = NAME_KNOWN;
2032                 list_del(&ie->list);
2033         }
2034
2035         memcpy(&ie->data, data, sizeof(*data));
2036         ie->timestamp = jiffies;
2037         cache->timestamp = jiffies;
2038
2039         if (ie->name_state == NAME_NOT_KNOWN)
2040                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2041
2042 done:
2043         return flags;
2044 }
2045
2046 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2047 {
2048         struct discovery_state *cache = &hdev->discovery;
2049         struct inquiry_info *info = (struct inquiry_info *) buf;
2050         struct inquiry_entry *e;
2051         int copied = 0;
2052
2053         list_for_each_entry(e, &cache->all, all) {
2054                 struct inquiry_data *data = &e->data;
2055
2056                 if (copied >= num)
2057                         break;
2058
2059                 bacpy(&info->bdaddr, &data->bdaddr);
2060                 info->pscan_rep_mode    = data->pscan_rep_mode;
2061                 info->pscan_period_mode = data->pscan_period_mode;
2062                 info->pscan_mode        = data->pscan_mode;
2063                 memcpy(info->dev_class, data->dev_class, 3);
2064                 info->clock_offset      = data->clock_offset;
2065
2066                 info++;
2067                 copied++;
2068         }
2069
2070         BT_DBG("cache %p, copied %d", cache, copied);
2071         return copied;
2072 }
2073
2074 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2075 {
2076         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2077         struct hci_dev *hdev = req->hdev;
2078         struct hci_cp_inquiry cp;
2079
2080         BT_DBG("%s", hdev->name);
2081
2082         if (test_bit(HCI_INQUIRY, &hdev->flags))
2083                 return;
2084
2085         /* Start Inquiry */
2086         memcpy(&cp.lap, &ir->lap, 3);
2087         cp.length  = ir->length;
2088         cp.num_rsp = ir->num_rsp;
2089         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2090 }
2091
2092 static int wait_inquiry(void *word)
2093 {
2094         schedule();
2095         return signal_pending(current);
2096 }
2097
2098 int hci_inquiry(void __user *arg)
2099 {
2100         __u8 __user *ptr = arg;
2101         struct hci_inquiry_req ir;
2102         struct hci_dev *hdev;
2103         int err = 0, do_inquiry = 0, max_rsp;
2104         long timeo;
2105         __u8 *buf;
2106
2107         if (copy_from_user(&ir, ptr, sizeof(ir)))
2108                 return -EFAULT;
2109
2110         hdev = hci_dev_get(ir.dev_id);
2111         if (!hdev)
2112                 return -ENODEV;
2113
2114         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2115                 err = -EBUSY;
2116                 goto done;
2117         }
2118
2119         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2120                 err = -EOPNOTSUPP;
2121                 goto done;
2122         }
2123
2124         if (hdev->dev_type != HCI_BREDR) {
2125                 err = -EOPNOTSUPP;
2126                 goto done;
2127         }
2128
2129         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2130                 err = -EOPNOTSUPP;
2131                 goto done;
2132         }
2133
2134         hci_dev_lock(hdev);
2135         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2136             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2137                 hci_inquiry_cache_flush(hdev);
2138                 do_inquiry = 1;
2139         }
2140         hci_dev_unlock(hdev);
2141
2142         timeo = ir.length * msecs_to_jiffies(2000);
2143
2144         if (do_inquiry) {
2145                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2146                                    timeo);
2147                 if (err < 0)
2148                         goto done;
2149
2150                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2151                  * cleared). If it is interrupted by a signal, return -EINTR.
2152                  */
2153                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2154                                 TASK_INTERRUPTIBLE))
2155                         return -EINTR;
2156         }
2157
2158         /* for unlimited number of responses we will use buffer with
2159          * 255 entries
2160          */
2161         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2162
2163         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2164          * copy it to the user space.
2165          */
2166         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2167         if (!buf) {
2168                 err = -ENOMEM;
2169                 goto done;
2170         }
2171
2172         hci_dev_lock(hdev);
2173         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2174         hci_dev_unlock(hdev);
2175
2176         BT_DBG("num_rsp %d", ir.num_rsp);
2177
2178         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2179                 ptr += sizeof(ir);
2180                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2181                                  ir.num_rsp))
2182                         err = -EFAULT;
2183         } else
2184                 err = -EFAULT;
2185
2186         kfree(buf);
2187
2188 done:
2189         hci_dev_put(hdev);
2190         return err;
2191 }
2192
2193 static int hci_dev_do_open(struct hci_dev *hdev)
2194 {
2195         int ret = 0;
2196
2197         BT_DBG("%s %p", hdev->name, hdev);
2198
2199         hci_req_lock(hdev);
2200
2201         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2202                 ret = -ENODEV;
2203                 goto done;
2204         }
2205
2206         if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2207                 /* Check for rfkill but allow the HCI setup stage to
2208                  * proceed (which in itself doesn't cause any RF activity).
2209                  */
2210                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2211                         ret = -ERFKILL;
2212                         goto done;
2213                 }
2214
2215                 /* Check for valid public address or a configured static
2216                  * random adddress, but let the HCI setup proceed to
2217                  * be able to determine if there is a public address
2218                  * or not.
2219                  *
2220                  * In case of user channel usage, it is not important
2221                  * if a public address or static random address is
2222                  * available.
2223                  *
2224                  * This check is only valid for BR/EDR controllers
2225                  * since AMP controllers do not have an address.
2226                  */
2227                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2228                     hdev->dev_type == HCI_BREDR &&
2229                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2230                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2231                         ret = -EADDRNOTAVAIL;
2232                         goto done;
2233                 }
2234         }
2235
2236         if (test_bit(HCI_UP, &hdev->flags)) {
2237                 ret = -EALREADY;
2238                 goto done;
2239         }
2240
2241         if (hdev->open(hdev)) {
2242                 ret = -EIO;
2243                 goto done;
2244         }
2245
2246         atomic_set(&hdev->cmd_cnt, 1);
2247         set_bit(HCI_INIT, &hdev->flags);
2248
2249         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2250                 ret = hdev->setup(hdev);
2251
2252         /* If public address change is configured, ensure that the
2253          * address gets programmed. If the driver does not support
2254          * changing the public address, fail the power on procedure.
2255          */
2256         if (!ret && bacmp(&hdev->public_addr, BDADDR_ANY)) {
2257                 if (hdev->set_bdaddr)
2258                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2259                 else
2260                         ret = -EADDRNOTAVAIL;
2261         }
2262
2263         if (!ret) {
2264                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2265                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2266                         ret = __hci_init(hdev);
2267         }
2268
2269         clear_bit(HCI_INIT, &hdev->flags);
2270
2271         if (!ret) {
2272                 hci_dev_hold(hdev);
2273                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2274                 set_bit(HCI_UP, &hdev->flags);
2275                 hci_notify(hdev, HCI_DEV_UP);
2276                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2277                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2278                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2279                     hdev->dev_type == HCI_BREDR) {
2280                         hci_dev_lock(hdev);
2281                         mgmt_powered(hdev, 1);
2282                         hci_dev_unlock(hdev);
2283                 }
2284         } else {
2285                 /* Init failed, cleanup */
2286                 flush_work(&hdev->tx_work);
2287                 flush_work(&hdev->cmd_work);
2288                 flush_work(&hdev->rx_work);
2289
2290                 skb_queue_purge(&hdev->cmd_q);
2291                 skb_queue_purge(&hdev->rx_q);
2292
2293                 if (hdev->flush)
2294                         hdev->flush(hdev);
2295
2296                 if (hdev->sent_cmd) {
2297                         kfree_skb(hdev->sent_cmd);
2298                         hdev->sent_cmd = NULL;
2299                 }
2300
2301                 hdev->close(hdev);
2302                 hdev->flags &= BIT(HCI_RAW);
2303         }
2304
2305 done:
2306         hci_req_unlock(hdev);
2307         return ret;
2308 }
2309
2310 /* ---- HCI ioctl helpers ---- */
2311
2312 int hci_dev_open(__u16 dev)
2313 {
2314         struct hci_dev *hdev;
2315         int err;
2316
2317         hdev = hci_dev_get(dev);
2318         if (!hdev)
2319                 return -ENODEV;
2320
2321         /* Devices that are marked as unconfigured can only be powered
2322          * up as user channel. Trying to bring them up as normal devices
2323          * will result into a failure. Only user channel operation is
2324          * possible.
2325          *
2326          * When this function is called for a user channel, the flag
2327          * HCI_USER_CHANNEL will be set first before attempting to
2328          * open the device.
2329          */
2330         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2331             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2332                 err = -EOPNOTSUPP;
2333                 goto done;
2334         }
2335
2336         /* We need to ensure that no other power on/off work is pending
2337          * before proceeding to call hci_dev_do_open. This is
2338          * particularly important if the setup procedure has not yet
2339          * completed.
2340          */
2341         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2342                 cancel_delayed_work(&hdev->power_off);
2343
2344         /* After this call it is guaranteed that the setup procedure
2345          * has finished. This means that error conditions like RFKILL
2346          * or no valid public or static random address apply.
2347          */
2348         flush_workqueue(hdev->req_workqueue);
2349
2350         err = hci_dev_do_open(hdev);
2351
2352 done:
2353         hci_dev_put(hdev);
2354         return err;
2355 }
2356
2357 /* This function requires the caller holds hdev->lock */
2358 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2359 {
2360         struct hci_conn_params *p;
2361
2362         list_for_each_entry(p, &hdev->le_conn_params, list)
2363                 list_del_init(&p->action);
2364
2365         BT_DBG("All LE pending actions cleared");
2366 }
2367
2368 static int hci_dev_do_close(struct hci_dev *hdev)
2369 {
2370         BT_DBG("%s %p", hdev->name, hdev);
2371
2372         cancel_delayed_work(&hdev->power_off);
2373
2374         hci_req_cancel(hdev, ENODEV);
2375         hci_req_lock(hdev);
2376
2377         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2378                 cancel_delayed_work_sync(&hdev->cmd_timer);
2379                 hci_req_unlock(hdev);
2380                 return 0;
2381         }
2382
2383         /* Flush RX and TX works */
2384         flush_work(&hdev->tx_work);
2385         flush_work(&hdev->rx_work);
2386
2387         if (hdev->discov_timeout > 0) {
2388                 cancel_delayed_work(&hdev->discov_off);
2389                 hdev->discov_timeout = 0;
2390                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2391                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2392         }
2393
2394         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2395                 cancel_delayed_work(&hdev->service_cache);
2396
2397         cancel_delayed_work_sync(&hdev->le_scan_disable);
2398
2399         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2400                 cancel_delayed_work_sync(&hdev->rpa_expired);
2401
2402         hci_dev_lock(hdev);
2403         hci_inquiry_cache_flush(hdev);
2404         hci_conn_hash_flush(hdev);
2405         hci_pend_le_actions_clear(hdev);
2406         hci_dev_unlock(hdev);
2407
2408         hci_notify(hdev, HCI_DEV_DOWN);
2409
2410         if (hdev->flush)
2411                 hdev->flush(hdev);
2412
2413         /* Reset device */
2414         skb_queue_purge(&hdev->cmd_q);
2415         atomic_set(&hdev->cmd_cnt, 1);
2416         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2417             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2418             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2419                 set_bit(HCI_INIT, &hdev->flags);
2420                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2421                 clear_bit(HCI_INIT, &hdev->flags);
2422         }
2423
2424         /* flush cmd  work */
2425         flush_work(&hdev->cmd_work);
2426
2427         /* Drop queues */
2428         skb_queue_purge(&hdev->rx_q);
2429         skb_queue_purge(&hdev->cmd_q);
2430         skb_queue_purge(&hdev->raw_q);
2431
2432         /* Drop last sent command */
2433         if (hdev->sent_cmd) {
2434                 cancel_delayed_work_sync(&hdev->cmd_timer);
2435                 kfree_skb(hdev->sent_cmd);
2436                 hdev->sent_cmd = NULL;
2437         }
2438
2439         kfree_skb(hdev->recv_evt);
2440         hdev->recv_evt = NULL;
2441
2442         /* After this point our queues are empty
2443          * and no tasks are scheduled. */
2444         hdev->close(hdev);
2445
2446         /* Clear flags */
2447         hdev->flags &= BIT(HCI_RAW);
2448         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2449
2450         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2451                 if (hdev->dev_type == HCI_BREDR) {
2452                         hci_dev_lock(hdev);
2453                         mgmt_powered(hdev, 0);
2454                         hci_dev_unlock(hdev);
2455                 }
2456         }
2457
2458         /* Controller radio is available but is currently powered down */
2459         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2460
2461         memset(hdev->eir, 0, sizeof(hdev->eir));
2462         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2463         bacpy(&hdev->random_addr, BDADDR_ANY);
2464
2465         hci_req_unlock(hdev);
2466
2467         hci_dev_put(hdev);
2468         return 0;
2469 }
2470
2471 int hci_dev_close(__u16 dev)
2472 {
2473         struct hci_dev *hdev;
2474         int err;
2475
2476         hdev = hci_dev_get(dev);
2477         if (!hdev)
2478                 return -ENODEV;
2479
2480         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2481                 err = -EBUSY;
2482                 goto done;
2483         }
2484
2485         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2486                 cancel_delayed_work(&hdev->power_off);
2487
2488         err = hci_dev_do_close(hdev);
2489
2490 done:
2491         hci_dev_put(hdev);
2492         return err;
2493 }
2494
2495 int hci_dev_reset(__u16 dev)
2496 {
2497         struct hci_dev *hdev;
2498         int ret = 0;
2499
2500         hdev = hci_dev_get(dev);
2501         if (!hdev)
2502                 return -ENODEV;
2503
2504         hci_req_lock(hdev);
2505
2506         if (!test_bit(HCI_UP, &hdev->flags)) {
2507                 ret = -ENETDOWN;
2508                 goto done;
2509         }
2510
2511         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2512                 ret = -EBUSY;
2513                 goto done;
2514         }
2515
2516         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2517                 ret = -EOPNOTSUPP;
2518                 goto done;
2519         }
2520
2521         /* Drop queues */
2522         skb_queue_purge(&hdev->rx_q);
2523         skb_queue_purge(&hdev->cmd_q);
2524
2525         hci_dev_lock(hdev);
2526         hci_inquiry_cache_flush(hdev);
2527         hci_conn_hash_flush(hdev);
2528         hci_dev_unlock(hdev);
2529
2530         if (hdev->flush)
2531                 hdev->flush(hdev);
2532
2533         atomic_set(&hdev->cmd_cnt, 1);
2534         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2535
2536         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2537
2538 done:
2539         hci_req_unlock(hdev);
2540         hci_dev_put(hdev);
2541         return ret;
2542 }
2543
2544 int hci_dev_reset_stat(__u16 dev)
2545 {
2546         struct hci_dev *hdev;
2547         int ret = 0;
2548
2549         hdev = hci_dev_get(dev);
2550         if (!hdev)
2551                 return -ENODEV;
2552
2553         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2554                 ret = -EBUSY;
2555                 goto done;
2556         }
2557
2558         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2559                 ret = -EOPNOTSUPP;
2560                 goto done;
2561         }
2562
2563         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2564
2565 done:
2566         hci_dev_put(hdev);
2567         return ret;
2568 }
2569
2570 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2571 {
2572         struct hci_dev *hdev;
2573         struct hci_dev_req dr;
2574         int err = 0;
2575
2576         if (copy_from_user(&dr, arg, sizeof(dr)))
2577                 return -EFAULT;
2578
2579         hdev = hci_dev_get(dr.dev_id);
2580         if (!hdev)
2581                 return -ENODEV;
2582
2583         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2584                 err = -EBUSY;
2585                 goto done;
2586         }
2587
2588         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2589                 err = -EOPNOTSUPP;
2590                 goto done;
2591         }
2592
2593         if (hdev->dev_type != HCI_BREDR) {
2594                 err = -EOPNOTSUPP;
2595                 goto done;
2596         }
2597
2598         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2599                 err = -EOPNOTSUPP;
2600                 goto done;
2601         }
2602
2603         switch (cmd) {
2604         case HCISETAUTH:
2605                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2606                                    HCI_INIT_TIMEOUT);
2607                 break;
2608
2609         case HCISETENCRYPT:
2610                 if (!lmp_encrypt_capable(hdev)) {
2611                         err = -EOPNOTSUPP;
2612                         break;
2613                 }
2614
2615                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2616                         /* Auth must be enabled first */
2617                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2618                                            HCI_INIT_TIMEOUT);
2619                         if (err)
2620                                 break;
2621                 }
2622
2623                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2624                                    HCI_INIT_TIMEOUT);
2625                 break;
2626
2627         case HCISETSCAN:
2628                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2629                                    HCI_INIT_TIMEOUT);
2630                 break;
2631
2632         case HCISETLINKPOL:
2633                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2634                                    HCI_INIT_TIMEOUT);
2635                 break;
2636
2637         case HCISETLINKMODE:
2638                 hdev->link_mode = ((__u16) dr.dev_opt) &
2639                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2640                 break;
2641
2642         case HCISETPTYPE:
2643                 hdev->pkt_type = (__u16) dr.dev_opt;
2644                 break;
2645
2646         case HCISETACLMTU:
2647                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2648                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2649                 break;
2650
2651         case HCISETSCOMTU:
2652                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2653                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2654                 break;
2655
2656         default:
2657                 err = -EINVAL;
2658                 break;
2659         }
2660
2661 done:
2662         hci_dev_put(hdev);
2663         return err;
2664 }
2665
2666 int hci_get_dev_list(void __user *arg)
2667 {
2668         struct hci_dev *hdev;
2669         struct hci_dev_list_req *dl;
2670         struct hci_dev_req *dr;
2671         int n = 0, size, err;
2672         __u16 dev_num;
2673
2674         if (get_user(dev_num, (__u16 __user *) arg))
2675                 return -EFAULT;
2676
2677         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2678                 return -EINVAL;
2679
2680         size = sizeof(*dl) + dev_num * sizeof(*dr);
2681
2682         dl = kzalloc(size, GFP_KERNEL);
2683         if (!dl)
2684                 return -ENOMEM;
2685
2686         dr = dl->dev_req;
2687
2688         read_lock(&hci_dev_list_lock);
2689         list_for_each_entry(hdev, &hci_dev_list, list) {
2690                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2691                         cancel_delayed_work(&hdev->power_off);
2692
2693                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2694                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2695
2696                 (dr + n)->dev_id  = hdev->id;
2697                 (dr + n)->dev_opt = hdev->flags;
2698
2699                 if (++n >= dev_num)
2700                         break;
2701         }
2702         read_unlock(&hci_dev_list_lock);
2703
2704         dl->dev_num = n;
2705         size = sizeof(*dl) + n * sizeof(*dr);
2706
2707         err = copy_to_user(arg, dl, size);
2708         kfree(dl);
2709
2710         return err ? -EFAULT : 0;
2711 }
2712
2713 int hci_get_dev_info(void __user *arg)
2714 {
2715         struct hci_dev *hdev;
2716         struct hci_dev_info di;
2717         int err = 0;
2718
2719         if (copy_from_user(&di, arg, sizeof(di)))
2720                 return -EFAULT;
2721
2722         hdev = hci_dev_get(di.dev_id);
2723         if (!hdev)
2724                 return -ENODEV;
2725
2726         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2727                 cancel_delayed_work_sync(&hdev->power_off);
2728
2729         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2730                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2731
2732         strcpy(di.name, hdev->name);
2733         di.bdaddr   = hdev->bdaddr;
2734         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2735         di.flags    = hdev->flags;
2736         di.pkt_type = hdev->pkt_type;
2737         if (lmp_bredr_capable(hdev)) {
2738                 di.acl_mtu  = hdev->acl_mtu;
2739                 di.acl_pkts = hdev->acl_pkts;
2740                 di.sco_mtu  = hdev->sco_mtu;
2741                 di.sco_pkts = hdev->sco_pkts;
2742         } else {
2743                 di.acl_mtu  = hdev->le_mtu;
2744                 di.acl_pkts = hdev->le_pkts;
2745                 di.sco_mtu  = 0;
2746                 di.sco_pkts = 0;
2747         }
2748         di.link_policy = hdev->link_policy;
2749         di.link_mode   = hdev->link_mode;
2750
2751         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2752         memcpy(&di.features, &hdev->features, sizeof(di.features));
2753
2754         if (copy_to_user(arg, &di, sizeof(di)))
2755                 err = -EFAULT;
2756
2757         hci_dev_put(hdev);
2758
2759         return err;
2760 }
2761
2762 /* ---- Interface to HCI drivers ---- */
2763
2764 static int hci_rfkill_set_block(void *data, bool blocked)
2765 {
2766         struct hci_dev *hdev = data;
2767
2768         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2769
2770         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2771                 return -EBUSY;
2772
2773         if (blocked) {
2774                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2775                 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2776                         hci_dev_do_close(hdev);
2777         } else {
2778                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2779         }
2780
2781         return 0;
2782 }
2783
2784 static const struct rfkill_ops hci_rfkill_ops = {
2785         .set_block = hci_rfkill_set_block,
2786 };
2787
2788 static void hci_power_on(struct work_struct *work)
2789 {
2790         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2791         int err;
2792
2793         BT_DBG("%s", hdev->name);
2794
2795         err = hci_dev_do_open(hdev);
2796         if (err < 0) {
2797                 mgmt_set_powered_failed(hdev, err);
2798                 return;
2799         }
2800
2801         /* During the HCI setup phase, a few error conditions are
2802          * ignored and they need to be checked now. If they are still
2803          * valid, it is important to turn the device back off.
2804          */
2805         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2806             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2807             (hdev->dev_type == HCI_BREDR &&
2808              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2809              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2810                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2811                 hci_dev_do_close(hdev);
2812         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2813                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2814                                    HCI_AUTO_OFF_TIMEOUT);
2815         }
2816
2817         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2818                 /* For unconfigured devices, set the HCI_RAW flag
2819                  * so that userspace can easily identify them.
2820                  */
2821                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2822                         set_bit(HCI_RAW, &hdev->flags);
2823
2824                 /* For fully configured devices, this will send
2825                  * the Index Added event. For unconfigured devices,
2826                  * it will send Unconfigued Index Added event.
2827                  *
2828                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2829                  * and no event will be send.
2830                  */
2831                 mgmt_index_added(hdev);
2832         }
2833 }
2834
2835 static void hci_power_off(struct work_struct *work)
2836 {
2837         struct hci_dev *hdev = container_of(work, struct hci_dev,
2838                                             power_off.work);
2839
2840         BT_DBG("%s", hdev->name);
2841
2842         hci_dev_do_close(hdev);
2843 }
2844
2845 static void hci_discov_off(struct work_struct *work)
2846 {
2847         struct hci_dev *hdev;
2848
2849         hdev = container_of(work, struct hci_dev, discov_off.work);
2850
2851         BT_DBG("%s", hdev->name);
2852
2853         mgmt_discoverable_timeout(hdev);
2854 }
2855
2856 void hci_uuids_clear(struct hci_dev *hdev)
2857 {
2858         struct bt_uuid *uuid, *tmp;
2859
2860         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2861                 list_del(&uuid->list);
2862                 kfree(uuid);
2863         }
2864 }
2865
2866 void hci_link_keys_clear(struct hci_dev *hdev)
2867 {
2868         struct list_head *p, *n;
2869
2870         list_for_each_safe(p, n, &hdev->link_keys) {
2871                 struct link_key *key;
2872
2873                 key = list_entry(p, struct link_key, list);
2874
2875                 list_del(p);
2876                 kfree(key);
2877         }
2878 }
2879
2880 void hci_smp_ltks_clear(struct hci_dev *hdev)
2881 {
2882         struct smp_ltk *k, *tmp;
2883
2884         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2885                 list_del(&k->list);
2886                 kfree(k);
2887         }
2888 }
2889
2890 void hci_smp_irks_clear(struct hci_dev *hdev)
2891 {
2892         struct smp_irk *k, *tmp;
2893
2894         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2895                 list_del(&k->list);
2896                 kfree(k);
2897         }
2898 }
2899
2900 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2901 {
2902         struct link_key *k;
2903
2904         list_for_each_entry(k, &hdev->link_keys, list)
2905                 if (bacmp(bdaddr, &k->bdaddr) == 0)
2906                         return k;
2907
2908         return NULL;
2909 }
2910
2911 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2912                                u8 key_type, u8 old_key_type)
2913 {
2914         /* Legacy key */
2915         if (key_type < 0x03)
2916                 return true;
2917
2918         /* Debug keys are insecure so don't store them persistently */
2919         if (key_type == HCI_LK_DEBUG_COMBINATION)
2920                 return false;
2921
2922         /* Changed combination key and there's no previous one */
2923         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2924                 return false;
2925
2926         /* Security mode 3 case */
2927         if (!conn)
2928                 return true;
2929
2930         /* Neither local nor remote side had no-bonding as requirement */
2931         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2932                 return true;
2933
2934         /* Local side had dedicated bonding as requirement */
2935         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2936                 return true;
2937
2938         /* Remote side had dedicated bonding as requirement */
2939         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2940                 return true;
2941
2942         /* If none of the above criteria match, then don't store the key
2943          * persistently */
2944         return false;
2945 }
2946
2947 static bool ltk_type_master(u8 type)
2948 {
2949         return (type == SMP_LTK);
2950 }
2951
2952 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
2953                              bool master)
2954 {
2955         struct smp_ltk *k;
2956
2957         list_for_each_entry(k, &hdev->long_term_keys, list) {
2958                 if (k->ediv != ediv || k->rand != rand)
2959                         continue;
2960
2961                 if (ltk_type_master(k->type) != master)
2962                         continue;
2963
2964                 return k;
2965         }
2966
2967         return NULL;
2968 }
2969
2970 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2971                                      u8 addr_type, bool master)
2972 {
2973         struct smp_ltk *k;
2974
2975         list_for_each_entry(k, &hdev->long_term_keys, list)
2976                 if (addr_type == k->bdaddr_type &&
2977                     bacmp(bdaddr, &k->bdaddr) == 0 &&
2978                     ltk_type_master(k->type) == master)
2979                         return k;
2980
2981         return NULL;
2982 }
2983
2984 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2985 {
2986         struct smp_irk *irk;
2987
2988         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2989                 if (!bacmp(&irk->rpa, rpa))
2990                         return irk;
2991         }
2992
2993         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2994                 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2995                         bacpy(&irk->rpa, rpa);
2996                         return irk;
2997                 }
2998         }
2999
3000         return NULL;
3001 }
3002
3003 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3004                                      u8 addr_type)
3005 {
3006         struct smp_irk *irk;
3007
3008         /* Identity Address must be public or static random */
3009         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3010                 return NULL;
3011
3012         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3013                 if (addr_type == irk->addr_type &&
3014                     bacmp(bdaddr, &irk->bdaddr) == 0)
3015                         return irk;
3016         }
3017
3018         return NULL;
3019 }
3020
3021 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3022                                   bdaddr_t *bdaddr, u8 *val, u8 type,
3023                                   u8 pin_len, bool *persistent)
3024 {
3025         struct link_key *key, *old_key;
3026         u8 old_key_type;
3027
3028         old_key = hci_find_link_key(hdev, bdaddr);
3029         if (old_key) {
3030                 old_key_type = old_key->type;
3031                 key = old_key;
3032         } else {
3033                 old_key_type = conn ? conn->key_type : 0xff;
3034                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3035                 if (!key)
3036                         return NULL;
3037                 list_add(&key->list, &hdev->link_keys);
3038         }
3039
3040         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3041
3042         /* Some buggy controller combinations generate a changed
3043          * combination key for legacy pairing even when there's no
3044          * previous key */
3045         if (type == HCI_LK_CHANGED_COMBINATION &&
3046             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3047                 type = HCI_LK_COMBINATION;
3048                 if (conn)
3049                         conn->key_type = type;
3050         }
3051
3052         bacpy(&key->bdaddr, bdaddr);
3053         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3054         key->pin_len = pin_len;
3055
3056         if (type == HCI_LK_CHANGED_COMBINATION)
3057                 key->type = old_key_type;
3058         else
3059                 key->type = type;
3060
3061         if (persistent)
3062                 *persistent = hci_persistent_key(hdev, conn, type,
3063                                                  old_key_type);
3064
3065         return key;
3066 }
3067
3068 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3069                             u8 addr_type, u8 type, u8 authenticated,
3070                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3071 {
3072         struct smp_ltk *key, *old_key;
3073         bool master = ltk_type_master(type);
3074
3075         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3076         if (old_key)
3077                 key = old_key;
3078         else {
3079                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3080                 if (!key)
3081                         return NULL;
3082                 list_add(&key->list, &hdev->long_term_keys);
3083         }
3084
3085         bacpy(&key->bdaddr, bdaddr);
3086         key->bdaddr_type = addr_type;
3087         memcpy(key->val, tk, sizeof(key->val));
3088         key->authenticated = authenticated;
3089         key->ediv = ediv;
3090         key->rand = rand;
3091         key->enc_size = enc_size;
3092         key->type = type;
3093
3094         return key;
3095 }
3096
3097 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3098                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3099 {
3100         struct smp_irk *irk;
3101
3102         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3103         if (!irk) {
3104                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3105                 if (!irk)
3106                         return NULL;
3107
3108                 bacpy(&irk->bdaddr, bdaddr);
3109                 irk->addr_type = addr_type;
3110
3111                 list_add(&irk->list, &hdev->identity_resolving_keys);
3112         }
3113
3114         memcpy(irk->val, val, 16);
3115         bacpy(&irk->rpa, rpa);
3116
3117         return irk;
3118 }
3119
3120 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3121 {
3122         struct link_key *key;
3123
3124         key = hci_find_link_key(hdev, bdaddr);
3125         if (!key)
3126                 return -ENOENT;
3127
3128         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3129
3130         list_del(&key->list);
3131         kfree(key);
3132
3133         return 0;
3134 }
3135
3136 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3137 {
3138         struct smp_ltk *k, *tmp;
3139         int removed = 0;
3140
3141         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3142                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3143                         continue;
3144
3145                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3146
3147                 list_del(&k->list);
3148                 kfree(k);
3149                 removed++;
3150         }
3151
3152         return removed ? 0 : -ENOENT;
3153 }
3154
3155 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3156 {
3157         struct smp_irk *k, *tmp;
3158
3159         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3160                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3161                         continue;
3162
3163                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3164
3165                 list_del(&k->list);
3166                 kfree(k);
3167         }
3168 }
3169
3170 /* HCI command timer function */
3171 static void hci_cmd_timeout(struct work_struct *work)
3172 {
3173         struct hci_dev *hdev = container_of(work, struct hci_dev,
3174                                             cmd_timer.work);
3175
3176         if (hdev->sent_cmd) {
3177                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3178                 u16 opcode = __le16_to_cpu(sent->opcode);
3179
3180                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3181         } else {
3182                 BT_ERR("%s command tx timeout", hdev->name);
3183         }
3184
3185         atomic_set(&hdev->cmd_cnt, 1);
3186         queue_work(hdev->workqueue, &hdev->cmd_work);
3187 }
3188
3189 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3190                                           bdaddr_t *bdaddr)
3191 {
3192         struct oob_data *data;
3193
3194         list_for_each_entry(data, &hdev->remote_oob_data, list)
3195                 if (bacmp(bdaddr, &data->bdaddr) == 0)
3196                         return data;
3197
3198         return NULL;
3199 }
3200
3201 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3202 {
3203         struct oob_data *data;
3204
3205         data = hci_find_remote_oob_data(hdev, bdaddr);
3206         if (!data)
3207                 return -ENOENT;
3208
3209         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3210
3211         list_del(&data->list);
3212         kfree(data);
3213
3214         return 0;
3215 }
3216
3217 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3218 {
3219         struct oob_data *data, *n;
3220
3221         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3222                 list_del(&data->list);
3223                 kfree(data);
3224         }
3225 }
3226
3227 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3228                             u8 *hash, u8 *randomizer)
3229 {
3230         struct oob_data *data;
3231
3232         data = hci_find_remote_oob_data(hdev, bdaddr);
3233         if (!data) {
3234                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3235                 if (!data)
3236                         return -ENOMEM;
3237
3238                 bacpy(&data->bdaddr, bdaddr);
3239                 list_add(&data->list, &hdev->remote_oob_data);
3240         }
3241
3242         memcpy(data->hash192, hash, sizeof(data->hash192));
3243         memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3244
3245         memset(data->hash256, 0, sizeof(data->hash256));
3246         memset(data->randomizer256, 0, sizeof(data->randomizer256));
3247
3248         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3249
3250         return 0;
3251 }
3252
3253 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3254                                 u8 *hash192, u8 *randomizer192,
3255                                 u8 *hash256, u8 *randomizer256)
3256 {
3257         struct oob_data *data;
3258
3259         data = hci_find_remote_oob_data(hdev, bdaddr);
3260         if (!data) {
3261                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3262                 if (!data)
3263                         return -ENOMEM;
3264
3265                 bacpy(&data->bdaddr, bdaddr);
3266                 list_add(&data->list, &hdev->remote_oob_data);
3267         }
3268
3269         memcpy(data->hash192, hash192, sizeof(data->hash192));
3270         memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3271
3272         memcpy(data->hash256, hash256, sizeof(data->hash256));
3273         memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3274
3275         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3276
3277         return 0;
3278 }
3279
3280 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3281                                          bdaddr_t *bdaddr, u8 type)
3282 {
3283         struct bdaddr_list *b;
3284
3285         list_for_each_entry(b, &hdev->blacklist, list) {
3286                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3287                         return b;
3288         }
3289
3290         return NULL;
3291 }
3292
3293 static void hci_blacklist_clear(struct hci_dev *hdev)
3294 {
3295         struct list_head *p, *n;
3296
3297         list_for_each_safe(p, n, &hdev->blacklist) {
3298                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3299
3300                 list_del(p);
3301                 kfree(b);
3302         }
3303 }
3304
3305 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3306 {
3307         struct bdaddr_list *entry;
3308
3309         if (!bacmp(bdaddr, BDADDR_ANY))
3310                 return -EBADF;
3311
3312         if (hci_blacklist_lookup(hdev, bdaddr, type))
3313                 return -EEXIST;
3314
3315         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3316         if (!entry)
3317                 return -ENOMEM;
3318
3319         bacpy(&entry->bdaddr, bdaddr);
3320         entry->bdaddr_type = type;
3321
3322         list_add(&entry->list, &hdev->blacklist);
3323
3324         return 0;
3325 }
3326
3327 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3328 {
3329         struct bdaddr_list *entry;
3330
3331         if (!bacmp(bdaddr, BDADDR_ANY)) {
3332                 hci_blacklist_clear(hdev);
3333                 return 0;
3334         }
3335
3336         entry = hci_blacklist_lookup(hdev, bdaddr, type);
3337         if (!entry)
3338                 return -ENOENT;
3339
3340         list_del(&entry->list);
3341         kfree(entry);
3342
3343         return 0;
3344 }
3345
3346 struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3347                                           bdaddr_t *bdaddr, u8 type)
3348 {
3349         struct bdaddr_list *b;
3350
3351         list_for_each_entry(b, &hdev->le_white_list, list) {
3352                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3353                         return b;
3354         }
3355
3356         return NULL;
3357 }
3358
3359 void hci_white_list_clear(struct hci_dev *hdev)
3360 {
3361         struct list_head *p, *n;
3362
3363         list_for_each_safe(p, n, &hdev->le_white_list) {
3364                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3365
3366                 list_del(p);
3367                 kfree(b);
3368         }
3369 }
3370
3371 int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3372 {
3373         struct bdaddr_list *entry;
3374
3375         if (!bacmp(bdaddr, BDADDR_ANY))
3376                 return -EBADF;
3377
3378         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3379         if (!entry)
3380                 return -ENOMEM;
3381
3382         bacpy(&entry->bdaddr, bdaddr);
3383         entry->bdaddr_type = type;
3384
3385         list_add(&entry->list, &hdev->le_white_list);
3386
3387         return 0;
3388 }
3389
3390 int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3391 {
3392         struct bdaddr_list *entry;
3393
3394         if (!bacmp(bdaddr, BDADDR_ANY))
3395                 return -EBADF;
3396
3397         entry = hci_white_list_lookup(hdev, bdaddr, type);
3398         if (!entry)
3399                 return -ENOENT;
3400
3401         list_del(&entry->list);
3402         kfree(entry);
3403
3404         return 0;
3405 }
3406
3407 /* This function requires the caller holds hdev->lock */
3408 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3409                                                bdaddr_t *addr, u8 addr_type)
3410 {
3411         struct hci_conn_params *params;
3412
3413         /* The conn params list only contains identity addresses */
3414         if (!hci_is_identity_address(addr, addr_type))
3415                 return NULL;
3416
3417         list_for_each_entry(params, &hdev->le_conn_params, list) {
3418                 if (bacmp(&params->addr, addr) == 0 &&
3419                     params->addr_type == addr_type) {
3420                         return params;
3421                 }
3422         }
3423
3424         return NULL;
3425 }
3426
3427 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3428 {
3429         struct hci_conn *conn;
3430
3431         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3432         if (!conn)
3433                 return false;
3434
3435         if (conn->dst_type != type)
3436                 return false;
3437
3438         if (conn->state != BT_CONNECTED)
3439                 return false;
3440
3441         return true;
3442 }
3443
3444 /* This function requires the caller holds hdev->lock */
3445 struct hci_conn_params *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3446                                                 bdaddr_t *addr, u8 addr_type)
3447 {
3448         struct hci_conn_params *param;
3449
3450         /* The list only contains identity addresses */
3451         if (!hci_is_identity_address(addr, addr_type))
3452                 return NULL;
3453
3454         list_for_each_entry(param, &hdev->pend_le_conns, action) {
3455                 if (bacmp(&param->addr, addr) == 0 &&
3456                     param->addr_type == addr_type)
3457                         return param;
3458         }
3459
3460         return NULL;
3461 }
3462
3463 /* This function requires the caller holds hdev->lock */
3464 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3465                                             bdaddr_t *addr, u8 addr_type)
3466 {
3467         struct hci_conn_params *params;
3468
3469         if (!hci_is_identity_address(addr, addr_type))
3470                 return NULL;
3471
3472         params = hci_conn_params_lookup(hdev, addr, addr_type);
3473         if (params)
3474                 return params;
3475
3476         params = kzalloc(sizeof(*params), GFP_KERNEL);
3477         if (!params) {
3478                 BT_ERR("Out of memory");
3479                 return NULL;
3480         }
3481
3482         bacpy(&params->addr, addr);
3483         params->addr_type = addr_type;
3484
3485         list_add(&params->list, &hdev->le_conn_params);
3486         INIT_LIST_HEAD(&params->action);
3487
3488         params->conn_min_interval = hdev->le_conn_min_interval;
3489         params->conn_max_interval = hdev->le_conn_max_interval;
3490         params->conn_latency = hdev->le_conn_latency;
3491         params->supervision_timeout = hdev->le_supv_timeout;
3492         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3493
3494         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3495
3496         return params;
3497 }
3498
3499 /* This function requires the caller holds hdev->lock */
3500 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3501                         u8 auto_connect)
3502 {
3503         struct hci_conn_params *params;
3504
3505         params = hci_conn_params_add(hdev, addr, addr_type);
3506         if (!params)
3507                 return -EIO;
3508
3509         if (params->auto_connect == auto_connect)
3510                 return 0;
3511
3512         list_del_init(&params->action);
3513
3514         switch (auto_connect) {
3515         case HCI_AUTO_CONN_DISABLED:
3516         case HCI_AUTO_CONN_LINK_LOSS:
3517                 hci_update_background_scan(hdev);
3518                 break;
3519         case HCI_AUTO_CONN_REPORT:
3520                 list_add(&params->action, &hdev->pend_le_reports);
3521                 hci_update_background_scan(hdev);
3522                 break;
3523         case HCI_AUTO_CONN_ALWAYS:
3524                 if (!is_connected(hdev, addr, addr_type)) {
3525                         list_add(&params->action, &hdev->pend_le_conns);
3526                         hci_update_background_scan(hdev);
3527                 }
3528                 break;
3529         }
3530
3531         params->auto_connect = auto_connect;
3532
3533         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3534                auto_connect);
3535
3536         return 0;
3537 }
3538
3539 /* This function requires the caller holds hdev->lock */
3540 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3541 {
3542         struct hci_conn_params *params;
3543
3544         params = hci_conn_params_lookup(hdev, addr, addr_type);
3545         if (!params)
3546                 return;
3547
3548         list_del(&params->action);
3549         list_del(&params->list);
3550         kfree(params);
3551
3552         hci_update_background_scan(hdev);
3553
3554         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3555 }
3556
3557 /* This function requires the caller holds hdev->lock */
3558 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3559 {
3560         struct hci_conn_params *params, *tmp;
3561
3562         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3563                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3564                         continue;
3565                 list_del(&params->list);
3566                 kfree(params);
3567         }
3568
3569         BT_DBG("All LE disabled connection parameters were removed");
3570 }
3571
3572 /* This function requires the caller holds hdev->lock */
3573 void hci_conn_params_clear_enabled(struct hci_dev *hdev)
3574 {
3575         struct hci_conn_params *params, *tmp;
3576
3577         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3578                 if (params->auto_connect == HCI_AUTO_CONN_DISABLED)
3579                         continue;
3580                 list_del(&params->action);
3581                 list_del(&params->list);
3582                 kfree(params);
3583         }
3584
3585         hci_update_background_scan(hdev);
3586
3587         BT_DBG("All enabled LE connection parameters were removed");
3588 }
3589
3590 /* This function requires the caller holds hdev->lock */
3591 void hci_conn_params_clear_all(struct hci_dev *hdev)
3592 {
3593         struct hci_conn_params *params, *tmp;
3594
3595         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3596                 list_del(&params->action);
3597                 list_del(&params->list);
3598                 kfree(params);
3599         }
3600
3601         hci_update_background_scan(hdev);
3602
3603         BT_DBG("All LE connection parameters were removed");
3604 }
3605
3606 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3607 {
3608         if (status) {
3609                 BT_ERR("Failed to start inquiry: status %d", status);
3610
3611                 hci_dev_lock(hdev);
3612                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3613                 hci_dev_unlock(hdev);
3614                 return;
3615         }
3616 }
3617
3618 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3619 {
3620         /* General inquiry access code (GIAC) */
3621         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3622         struct hci_request req;
3623         struct hci_cp_inquiry cp;
3624         int err;
3625
3626         if (status) {
3627                 BT_ERR("Failed to disable LE scanning: status %d", status);
3628                 return;
3629         }
3630
3631         switch (hdev->discovery.type) {
3632         case DISCOV_TYPE_LE:
3633                 hci_dev_lock(hdev);
3634                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3635                 hci_dev_unlock(hdev);
3636                 break;
3637
3638         case DISCOV_TYPE_INTERLEAVED:
3639                 hci_req_init(&req, hdev);
3640
3641                 memset(&cp, 0, sizeof(cp));
3642                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3643                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3644                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3645
3646                 hci_dev_lock(hdev);
3647
3648                 hci_inquiry_cache_flush(hdev);
3649
3650                 err = hci_req_run(&req, inquiry_complete);
3651                 if (err) {
3652                         BT_ERR("Inquiry request failed: err %d", err);
3653                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3654                 }
3655
3656                 hci_dev_unlock(hdev);
3657                 break;
3658         }
3659 }
3660
3661 static void le_scan_disable_work(struct work_struct *work)
3662 {
3663         struct hci_dev *hdev = container_of(work, struct hci_dev,
3664                                             le_scan_disable.work);
3665         struct hci_request req;
3666         int err;
3667
3668         BT_DBG("%s", hdev->name);
3669
3670         hci_req_init(&req, hdev);
3671
3672         hci_req_add_le_scan_disable(&req);
3673
3674         err = hci_req_run(&req, le_scan_disable_work_complete);
3675         if (err)
3676                 BT_ERR("Disable LE scanning request failed: err %d", err);
3677 }
3678
3679 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3680 {
3681         struct hci_dev *hdev = req->hdev;
3682
3683         /* If we're advertising or initiating an LE connection we can't
3684          * go ahead and change the random address at this time. This is
3685          * because the eventual initiator address used for the
3686          * subsequently created connection will be undefined (some
3687          * controllers use the new address and others the one we had
3688          * when the operation started).
3689          *
3690          * In this kind of scenario skip the update and let the random
3691          * address be updated at the next cycle.
3692          */
3693         if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3694             hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3695                 BT_DBG("Deferring random address update");
3696                 return;
3697         }
3698
3699         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3700 }
3701
3702 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3703                               u8 *own_addr_type)
3704 {
3705         struct hci_dev *hdev = req->hdev;
3706         int err;
3707
3708         /* If privacy is enabled use a resolvable private address. If
3709          * current RPA has expired or there is something else than
3710          * the current RPA in use, then generate a new one.
3711          */
3712         if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3713                 int to;
3714
3715                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3716
3717                 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3718                     !bacmp(&hdev->random_addr, &hdev->rpa))
3719                         return 0;
3720
3721                 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3722                 if (err < 0) {
3723                         BT_ERR("%s failed to generate new RPA", hdev->name);
3724                         return err;
3725                 }
3726
3727                 set_random_addr(req, &hdev->rpa);
3728
3729                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3730                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3731
3732                 return 0;
3733         }
3734
3735         /* In case of required privacy without resolvable private address,
3736          * use an unresolvable private address. This is useful for active
3737          * scanning and non-connectable advertising.
3738          */
3739         if (require_privacy) {
3740                 bdaddr_t urpa;
3741
3742                 get_random_bytes(&urpa, 6);
3743                 urpa.b[5] &= 0x3f;      /* Clear two most significant bits */
3744
3745                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3746                 set_random_addr(req, &urpa);
3747                 return 0;
3748         }
3749
3750         /* If forcing static address is in use or there is no public
3751          * address use the static address as random address (but skip
3752          * the HCI command if the current random address is already the
3753          * static one.
3754          */
3755         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3756             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3757                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3758                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3759                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3760                                     &hdev->static_addr);
3761                 return 0;
3762         }
3763
3764         /* Neither privacy nor static address is being used so use a
3765          * public address.
3766          */
3767         *own_addr_type = ADDR_LE_DEV_PUBLIC;
3768
3769         return 0;
3770 }
3771
3772 /* Copy the Identity Address of the controller.
3773  *
3774  * If the controller has a public BD_ADDR, then by default use that one.
3775  * If this is a LE only controller without a public address, default to
3776  * the static random address.
3777  *
3778  * For debugging purposes it is possible to force controllers with a
3779  * public address to use the static random address instead.
3780  */
3781 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3782                                u8 *bdaddr_type)
3783 {
3784         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3785             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3786                 bacpy(bdaddr, &hdev->static_addr);
3787                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3788         } else {
3789                 bacpy(bdaddr, &hdev->bdaddr);
3790                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3791         }
3792 }
3793
3794 /* Alloc HCI device */
3795 struct hci_dev *hci_alloc_dev(void)
3796 {
3797         struct hci_dev *hdev;
3798
3799         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3800         if (!hdev)
3801                 return NULL;
3802
3803         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3804         hdev->esco_type = (ESCO_HV1);
3805         hdev->link_mode = (HCI_LM_ACCEPT);
3806         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3807         hdev->io_capability = 0x03;     /* No Input No Output */
3808         hdev->manufacturer = 0xffff;    /* Default to internal use */
3809         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3810         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3811
3812         hdev->sniff_max_interval = 800;
3813         hdev->sniff_min_interval = 80;
3814
3815         hdev->le_adv_channel_map = 0x07;
3816         hdev->le_scan_interval = 0x0060;
3817         hdev->le_scan_window = 0x0030;
3818         hdev->le_conn_min_interval = 0x0028;
3819         hdev->le_conn_max_interval = 0x0038;
3820         hdev->le_conn_latency = 0x0000;
3821         hdev->le_supv_timeout = 0x002a;
3822
3823         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3824         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3825         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3826         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3827
3828         mutex_init(&hdev->lock);
3829         mutex_init(&hdev->req_lock);
3830
3831         INIT_LIST_HEAD(&hdev->mgmt_pending);
3832         INIT_LIST_HEAD(&hdev->blacklist);
3833         INIT_LIST_HEAD(&hdev->uuids);
3834         INIT_LIST_HEAD(&hdev->link_keys);
3835         INIT_LIST_HEAD(&hdev->long_term_keys);
3836         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3837         INIT_LIST_HEAD(&hdev->remote_oob_data);
3838         INIT_LIST_HEAD(&hdev->le_white_list);
3839         INIT_LIST_HEAD(&hdev->le_conn_params);
3840         INIT_LIST_HEAD(&hdev->pend_le_conns);
3841         INIT_LIST_HEAD(&hdev->pend_le_reports);
3842         INIT_LIST_HEAD(&hdev->conn_hash.list);
3843
3844         INIT_WORK(&hdev->rx_work, hci_rx_work);
3845         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3846         INIT_WORK(&hdev->tx_work, hci_tx_work);
3847         INIT_WORK(&hdev->power_on, hci_power_on);
3848
3849         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3850         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3851         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3852
3853         skb_queue_head_init(&hdev->rx_q);
3854         skb_queue_head_init(&hdev->cmd_q);
3855         skb_queue_head_init(&hdev->raw_q);
3856
3857         init_waitqueue_head(&hdev->req_wait_q);
3858
3859         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3860
3861         hci_init_sysfs(hdev);
3862         discovery_init(hdev);
3863
3864         return hdev;
3865 }
3866 EXPORT_SYMBOL(hci_alloc_dev);
3867
3868 /* Free HCI device */
3869 void hci_free_dev(struct hci_dev *hdev)
3870 {
3871         /* will free via device release */
3872         put_device(&hdev->dev);
3873 }
3874 EXPORT_SYMBOL(hci_free_dev);
3875
3876 /* Register HCI device */
3877 int hci_register_dev(struct hci_dev *hdev)
3878 {
3879         int id, error;
3880
3881         if (!hdev->open || !hdev->close)
3882                 return -EINVAL;
3883
3884         /* Do not allow HCI_AMP devices to register at index 0,
3885          * so the index can be used as the AMP controller ID.
3886          */
3887         switch (hdev->dev_type) {
3888         case HCI_BREDR:
3889                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3890                 break;
3891         case HCI_AMP:
3892                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3893                 break;
3894         default:
3895                 return -EINVAL;
3896         }
3897
3898         if (id < 0)
3899                 return id;
3900
3901         sprintf(hdev->name, "hci%d", id);
3902         hdev->id = id;
3903
3904         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3905
3906         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3907                                           WQ_MEM_RECLAIM, 1, hdev->name);
3908         if (!hdev->workqueue) {
3909                 error = -ENOMEM;
3910                 goto err;
3911         }
3912
3913         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3914                                               WQ_MEM_RECLAIM, 1, hdev->name);
3915         if (!hdev->req_workqueue) {
3916                 destroy_workqueue(hdev->workqueue);
3917                 error = -ENOMEM;
3918                 goto err;
3919         }
3920
3921         if (!IS_ERR_OR_NULL(bt_debugfs))
3922                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3923
3924         dev_set_name(&hdev->dev, "%s", hdev->name);
3925
3926         hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3927                                                CRYPTO_ALG_ASYNC);
3928         if (IS_ERR(hdev->tfm_aes)) {
3929                 BT_ERR("Unable to create crypto context");
3930                 error = PTR_ERR(hdev->tfm_aes);
3931                 hdev->tfm_aes = NULL;
3932                 goto err_wqueue;
3933         }
3934
3935         error = device_add(&hdev->dev);
3936         if (error < 0)
3937                 goto err_tfm;
3938
3939         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3940                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3941                                     hdev);
3942         if (hdev->rfkill) {
3943                 if (rfkill_register(hdev->rfkill) < 0) {
3944                         rfkill_destroy(hdev->rfkill);
3945                         hdev->rfkill = NULL;
3946                 }
3947         }
3948
3949         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3950                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3951
3952         set_bit(HCI_SETUP, &hdev->dev_flags);
3953         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3954
3955         if (hdev->dev_type == HCI_BREDR) {
3956                 /* Assume BR/EDR support until proven otherwise (such as
3957                  * through reading supported features during init.
3958                  */
3959                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3960         }
3961
3962         write_lock(&hci_dev_list_lock);
3963         list_add(&hdev->list, &hci_dev_list);
3964         write_unlock(&hci_dev_list_lock);
3965
3966         /* Devices that are marked for raw-only usage are unconfigured
3967          * and should not be included in normal operation.
3968          */
3969         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3970                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
3971
3972         hci_notify(hdev, HCI_DEV_REG);
3973         hci_dev_hold(hdev);
3974
3975         queue_work(hdev->req_workqueue, &hdev->power_on);
3976
3977         return id;
3978
3979 err_tfm:
3980         crypto_free_blkcipher(hdev->tfm_aes);
3981 err_wqueue:
3982         destroy_workqueue(hdev->workqueue);
3983         destroy_workqueue(hdev->req_workqueue);
3984 err:
3985         ida_simple_remove(&hci_index_ida, hdev->id);
3986
3987         return error;
3988 }
3989 EXPORT_SYMBOL(hci_register_dev);
3990
3991 /* Unregister HCI device */
3992 void hci_unregister_dev(struct hci_dev *hdev)
3993 {
3994         int i, id;
3995
3996         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3997
3998         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3999
4000         id = hdev->id;
4001
4002         write_lock(&hci_dev_list_lock);
4003         list_del(&hdev->list);
4004         write_unlock(&hci_dev_list_lock);
4005
4006         hci_dev_do_close(hdev);
4007
4008         for (i = 0; i < NUM_REASSEMBLY; i++)
4009                 kfree_skb(hdev->reassembly[i]);
4010
4011         cancel_work_sync(&hdev->power_on);
4012
4013         if (!test_bit(HCI_INIT, &hdev->flags) &&
4014             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
4015                 hci_dev_lock(hdev);
4016                 mgmt_index_removed(hdev);
4017                 hci_dev_unlock(hdev);
4018         }
4019
4020         /* mgmt_index_removed should take care of emptying the
4021          * pending list */
4022         BUG_ON(!list_empty(&hdev->mgmt_pending));
4023
4024         hci_notify(hdev, HCI_DEV_UNREG);
4025
4026         if (hdev->rfkill) {
4027                 rfkill_unregister(hdev->rfkill);
4028                 rfkill_destroy(hdev->rfkill);
4029         }
4030
4031         if (hdev->tfm_aes)
4032                 crypto_free_blkcipher(hdev->tfm_aes);
4033
4034         device_del(&hdev->dev);
4035
4036         debugfs_remove_recursive(hdev->debugfs);
4037
4038         destroy_workqueue(hdev->workqueue);
4039         destroy_workqueue(hdev->req_workqueue);
4040
4041         hci_dev_lock(hdev);
4042         hci_blacklist_clear(hdev);
4043         hci_uuids_clear(hdev);
4044         hci_link_keys_clear(hdev);
4045         hci_smp_ltks_clear(hdev);
4046         hci_smp_irks_clear(hdev);
4047         hci_remote_oob_data_clear(hdev);
4048         hci_white_list_clear(hdev);
4049         hci_conn_params_clear_all(hdev);
4050         hci_dev_unlock(hdev);
4051
4052         hci_dev_put(hdev);
4053
4054         ida_simple_remove(&hci_index_ida, id);
4055 }
4056 EXPORT_SYMBOL(hci_unregister_dev);
4057
4058 /* Suspend HCI device */
4059 int hci_suspend_dev(struct hci_dev *hdev)
4060 {
4061         hci_notify(hdev, HCI_DEV_SUSPEND);
4062         return 0;
4063 }
4064 EXPORT_SYMBOL(hci_suspend_dev);
4065
4066 /* Resume HCI device */
4067 int hci_resume_dev(struct hci_dev *hdev)
4068 {
4069         hci_notify(hdev, HCI_DEV_RESUME);
4070         return 0;
4071 }
4072 EXPORT_SYMBOL(hci_resume_dev);
4073
4074 /* Receive frame from HCI drivers */
4075 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4076 {
4077         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4078                       && !test_bit(HCI_INIT, &hdev->flags))) {
4079                 kfree_skb(skb);
4080                 return -ENXIO;
4081         }
4082
4083         /* Incoming skb */
4084         bt_cb(skb)->incoming = 1;
4085
4086         /* Time stamp */
4087         __net_timestamp(skb);
4088
4089         skb_queue_tail(&hdev->rx_q, skb);
4090         queue_work(hdev->workqueue, &hdev->rx_work);
4091
4092         return 0;
4093 }
4094 EXPORT_SYMBOL(hci_recv_frame);
4095
4096 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4097                           int count, __u8 index)
4098 {
4099         int len = 0;
4100         int hlen = 0;
4101         int remain = count;
4102         struct sk_buff *skb;
4103         struct bt_skb_cb *scb;
4104
4105         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4106             index >= NUM_REASSEMBLY)
4107                 return -EILSEQ;
4108
4109         skb = hdev->reassembly[index];
4110
4111         if (!skb) {
4112                 switch (type) {
4113                 case HCI_ACLDATA_PKT:
4114                         len = HCI_MAX_FRAME_SIZE;
4115                         hlen = HCI_ACL_HDR_SIZE;
4116                         break;
4117                 case HCI_EVENT_PKT:
4118                         len = HCI_MAX_EVENT_SIZE;
4119                         hlen = HCI_EVENT_HDR_SIZE;
4120                         break;
4121                 case HCI_SCODATA_PKT:
4122                         len = HCI_MAX_SCO_SIZE;
4123                         hlen = HCI_SCO_HDR_SIZE;
4124                         break;
4125                 }
4126
4127                 skb = bt_skb_alloc(len, GFP_ATOMIC);
4128                 if (!skb)
4129                         return -ENOMEM;
4130
4131                 scb = (void *) skb->cb;
4132                 scb->expect = hlen;
4133                 scb->pkt_type = type;
4134
4135                 hdev->reassembly[index] = skb;
4136         }
4137
4138         while (count) {
4139                 scb = (void *) skb->cb;
4140                 len = min_t(uint, scb->expect, count);
4141
4142                 memcpy(skb_put(skb, len), data, len);
4143
4144                 count -= len;
4145                 data += len;
4146                 scb->expect -= len;
4147                 remain = count;
4148
4149                 switch (type) {
4150                 case HCI_EVENT_PKT:
4151                         if (skb->len == HCI_EVENT_HDR_SIZE) {
4152                                 struct hci_event_hdr *h = hci_event_hdr(skb);
4153                                 scb->expect = h->plen;
4154
4155                                 if (skb_tailroom(skb) < scb->expect) {
4156                                         kfree_skb(skb);
4157                                         hdev->reassembly[index] = NULL;
4158                                         return -ENOMEM;
4159                                 }
4160                         }
4161                         break;
4162
4163                 case HCI_ACLDATA_PKT:
4164                         if (skb->len  == HCI_ACL_HDR_SIZE) {
4165                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4166                                 scb->expect = __le16_to_cpu(h->dlen);
4167
4168                                 if (skb_tailroom(skb) < scb->expect) {
4169                                         kfree_skb(skb);
4170                                         hdev->reassembly[index] = NULL;
4171                                         return -ENOMEM;
4172                                 }
4173                         }
4174                         break;
4175
4176                 case HCI_SCODATA_PKT:
4177                         if (skb->len == HCI_SCO_HDR_SIZE) {
4178                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4179                                 scb->expect = h->dlen;
4180
4181                                 if (skb_tailroom(skb) < scb->expect) {
4182                                         kfree_skb(skb);
4183                                         hdev->reassembly[index] = NULL;
4184                                         return -ENOMEM;
4185                                 }
4186                         }
4187                         break;
4188                 }
4189
4190                 if (scb->expect == 0) {
4191                         /* Complete frame */
4192
4193                         bt_cb(skb)->pkt_type = type;
4194                         hci_recv_frame(hdev, skb);
4195
4196                         hdev->reassembly[index] = NULL;
4197                         return remain;
4198                 }
4199         }
4200
4201         return remain;
4202 }
4203
4204 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4205 {
4206         int rem = 0;
4207
4208         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4209                 return -EILSEQ;
4210
4211         while (count) {
4212                 rem = hci_reassembly(hdev, type, data, count, type - 1);
4213                 if (rem < 0)
4214                         return rem;
4215
4216                 data += (count - rem);
4217                 count = rem;
4218         }
4219
4220         return rem;
4221 }
4222 EXPORT_SYMBOL(hci_recv_fragment);
4223
4224 #define STREAM_REASSEMBLY 0
4225
4226 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4227 {
4228         int type;
4229         int rem = 0;
4230
4231         while (count) {
4232                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4233
4234                 if (!skb) {
4235                         struct { char type; } *pkt;
4236
4237                         /* Start of the frame */
4238                         pkt = data;
4239                         type = pkt->type;
4240
4241                         data++;
4242                         count--;
4243                 } else
4244                         type = bt_cb(skb)->pkt_type;
4245
4246                 rem = hci_reassembly(hdev, type, data, count,
4247                                      STREAM_REASSEMBLY);
4248                 if (rem < 0)
4249                         return rem;
4250
4251                 data += (count - rem);
4252                 count = rem;
4253         }
4254
4255         return rem;
4256 }
4257 EXPORT_SYMBOL(hci_recv_stream_fragment);
4258
4259 /* ---- Interface to upper protocols ---- */
4260
4261 int hci_register_cb(struct hci_cb *cb)
4262 {
4263         BT_DBG("%p name %s", cb, cb->name);
4264
4265         write_lock(&hci_cb_list_lock);
4266         list_add(&cb->list, &hci_cb_list);
4267         write_unlock(&hci_cb_list_lock);
4268
4269         return 0;
4270 }
4271 EXPORT_SYMBOL(hci_register_cb);
4272
4273 int hci_unregister_cb(struct hci_cb *cb)
4274 {
4275         BT_DBG("%p name %s", cb, cb->name);
4276
4277         write_lock(&hci_cb_list_lock);
4278         list_del(&cb->list);
4279         write_unlock(&hci_cb_list_lock);
4280
4281         return 0;
4282 }
4283 EXPORT_SYMBOL(hci_unregister_cb);
4284
4285 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4286 {
4287         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4288
4289         /* Time stamp */
4290         __net_timestamp(skb);
4291
4292         /* Send copy to monitor */
4293         hci_send_to_monitor(hdev, skb);
4294
4295         if (atomic_read(&hdev->promisc)) {
4296                 /* Send copy to the sockets */
4297                 hci_send_to_sock(hdev, skb);
4298         }
4299
4300         /* Get rid of skb owner, prior to sending to the driver. */
4301         skb_orphan(skb);
4302
4303         if (hdev->send(hdev, skb) < 0)
4304                 BT_ERR("%s sending frame failed", hdev->name);
4305 }
4306
4307 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4308 {
4309         skb_queue_head_init(&req->cmd_q);
4310         req->hdev = hdev;
4311         req->err = 0;
4312 }
4313
4314 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4315 {
4316         struct hci_dev *hdev = req->hdev;
4317         struct sk_buff *skb;
4318         unsigned long flags;
4319
4320         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4321
4322         /* If an error occured during request building, remove all HCI
4323          * commands queued on the HCI request queue.
4324          */
4325         if (req->err) {
4326                 skb_queue_purge(&req->cmd_q);
4327                 return req->err;
4328         }
4329
4330         /* Do not allow empty requests */
4331         if (skb_queue_empty(&req->cmd_q))
4332                 return -ENODATA;
4333
4334         skb = skb_peek_tail(&req->cmd_q);
4335         bt_cb(skb)->req.complete = complete;
4336
4337         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4338         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4339         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4340
4341         queue_work(hdev->workqueue, &hdev->cmd_work);
4342
4343         return 0;
4344 }
4345
4346 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4347                                        u32 plen, const void *param)
4348 {
4349         int len = HCI_COMMAND_HDR_SIZE + plen;
4350         struct hci_command_hdr *hdr;
4351         struct sk_buff *skb;
4352
4353         skb = bt_skb_alloc(len, GFP_ATOMIC);
4354         if (!skb)
4355                 return NULL;
4356
4357         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4358         hdr->opcode = cpu_to_le16(opcode);
4359         hdr->plen   = plen;
4360
4361         if (plen)
4362                 memcpy(skb_put(skb, plen), param, plen);
4363
4364         BT_DBG("skb len %d", skb->len);
4365
4366         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4367
4368         return skb;
4369 }
4370
4371 /* Send HCI command */
4372 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4373                  const void *param)
4374 {
4375         struct sk_buff *skb;
4376
4377         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4378
4379         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4380         if (!skb) {
4381                 BT_ERR("%s no memory for command", hdev->name);
4382                 return -ENOMEM;
4383         }
4384
4385         /* Stand-alone HCI commands must be flaged as
4386          * single-command requests.
4387          */
4388         bt_cb(skb)->req.start = true;
4389
4390         skb_queue_tail(&hdev->cmd_q, skb);
4391         queue_work(hdev->workqueue, &hdev->cmd_work);
4392
4393         return 0;
4394 }
4395
4396 /* Queue a command to an asynchronous HCI request */
4397 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4398                     const void *param, u8 event)
4399 {
4400         struct hci_dev *hdev = req->hdev;
4401         struct sk_buff *skb;
4402
4403         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4404
4405         /* If an error occured during request building, there is no point in
4406          * queueing the HCI command. We can simply return.
4407          */
4408         if (req->err)
4409                 return;
4410
4411         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4412         if (!skb) {
4413                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4414                        hdev->name, opcode);
4415                 req->err = -ENOMEM;
4416                 return;
4417         }
4418
4419         if (skb_queue_empty(&req->cmd_q))
4420                 bt_cb(skb)->req.start = true;
4421
4422         bt_cb(skb)->req.event = event;
4423
4424         skb_queue_tail(&req->cmd_q, skb);
4425 }
4426
4427 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4428                  const void *param)
4429 {
4430         hci_req_add_ev(req, opcode, plen, param, 0);
4431 }
4432
4433 /* Get data from the previously sent command */
4434 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4435 {
4436         struct hci_command_hdr *hdr;
4437
4438         if (!hdev->sent_cmd)
4439                 return NULL;
4440
4441         hdr = (void *) hdev->sent_cmd->data;
4442
4443         if (hdr->opcode != cpu_to_le16(opcode))
4444                 return NULL;
4445
4446         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4447
4448         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4449 }
4450
4451 /* Send ACL data */
4452 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4453 {
4454         struct hci_acl_hdr *hdr;
4455         int len = skb->len;
4456
4457         skb_push(skb, HCI_ACL_HDR_SIZE);
4458         skb_reset_transport_header(skb);
4459         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4460         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4461         hdr->dlen   = cpu_to_le16(len);
4462 }
4463
4464 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4465                           struct sk_buff *skb, __u16 flags)
4466 {
4467         struct hci_conn *conn = chan->conn;
4468         struct hci_dev *hdev = conn->hdev;
4469         struct sk_buff *list;
4470
4471         skb->len = skb_headlen(skb);
4472         skb->data_len = 0;
4473
4474         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4475
4476         switch (hdev->dev_type) {
4477         case HCI_BREDR:
4478                 hci_add_acl_hdr(skb, conn->handle, flags);
4479                 break;
4480         case HCI_AMP:
4481                 hci_add_acl_hdr(skb, chan->handle, flags);
4482                 break;
4483         default:
4484                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4485                 return;
4486         }
4487
4488         list = skb_shinfo(skb)->frag_list;
4489         if (!list) {
4490                 /* Non fragmented */
4491                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4492
4493                 skb_queue_tail(queue, skb);
4494         } else {
4495                 /* Fragmented */
4496                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4497
4498                 skb_shinfo(skb)->frag_list = NULL;
4499
4500                 /* Queue all fragments atomically */
4501                 spin_lock(&queue->lock);
4502
4503                 __skb_queue_tail(queue, skb);
4504
4505                 flags &= ~ACL_START;
4506                 flags |= ACL_CONT;
4507                 do {
4508                         skb = list; list = list->next;
4509
4510                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4511                         hci_add_acl_hdr(skb, conn->handle, flags);
4512
4513                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4514
4515                         __skb_queue_tail(queue, skb);
4516                 } while (list);
4517
4518                 spin_unlock(&queue->lock);
4519         }
4520 }
4521
4522 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4523 {
4524         struct hci_dev *hdev = chan->conn->hdev;
4525
4526         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4527
4528         hci_queue_acl(chan, &chan->data_q, skb, flags);
4529
4530         queue_work(hdev->workqueue, &hdev->tx_work);
4531 }
4532
4533 /* Send SCO data */
4534 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4535 {
4536         struct hci_dev *hdev = conn->hdev;
4537         struct hci_sco_hdr hdr;
4538
4539         BT_DBG("%s len %d", hdev->name, skb->len);
4540
4541         hdr.handle = cpu_to_le16(conn->handle);
4542         hdr.dlen   = skb->len;
4543
4544         skb_push(skb, HCI_SCO_HDR_SIZE);
4545         skb_reset_transport_header(skb);
4546         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4547
4548         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4549
4550         skb_queue_tail(&conn->data_q, skb);
4551         queue_work(hdev->workqueue, &hdev->tx_work);
4552 }
4553
4554 /* ---- HCI TX task (outgoing data) ---- */
4555
4556 /* HCI Connection scheduler */
4557 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4558                                      int *quote)
4559 {
4560         struct hci_conn_hash *h = &hdev->conn_hash;
4561         struct hci_conn *conn = NULL, *c;
4562         unsigned int num = 0, min = ~0;
4563
4564         /* We don't have to lock device here. Connections are always
4565          * added and removed with TX task disabled. */
4566
4567         rcu_read_lock();
4568
4569         list_for_each_entry_rcu(c, &h->list, list) {
4570                 if (c->type != type || skb_queue_empty(&c->data_q))
4571                         continue;
4572
4573                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4574                         continue;
4575
4576                 num++;
4577
4578                 if (c->sent < min) {
4579                         min  = c->sent;
4580                         conn = c;
4581                 }
4582
4583                 if (hci_conn_num(hdev, type) == num)
4584                         break;
4585         }
4586
4587         rcu_read_unlock();
4588
4589         if (conn) {
4590                 int cnt, q;
4591
4592                 switch (conn->type) {
4593                 case ACL_LINK:
4594                         cnt = hdev->acl_cnt;
4595                         break;
4596                 case SCO_LINK:
4597                 case ESCO_LINK:
4598                         cnt = hdev->sco_cnt;
4599                         break;
4600                 case LE_LINK:
4601                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4602                         break;
4603                 default:
4604                         cnt = 0;
4605                         BT_ERR("Unknown link type");
4606                 }
4607
4608                 q = cnt / num;
4609                 *quote = q ? q : 1;
4610         } else
4611                 *quote = 0;
4612
4613         BT_DBG("conn %p quote %d", conn, *quote);
4614         return conn;
4615 }
4616
4617 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4618 {
4619         struct hci_conn_hash *h = &hdev->conn_hash;
4620         struct hci_conn *c;
4621
4622         BT_ERR("%s link tx timeout", hdev->name);
4623
4624         rcu_read_lock();
4625
4626         /* Kill stalled connections */
4627         list_for_each_entry_rcu(c, &h->list, list) {
4628                 if (c->type == type && c->sent) {
4629                         BT_ERR("%s killing stalled connection %pMR",
4630                                hdev->name, &c->dst);
4631                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4632                 }
4633         }
4634
4635         rcu_read_unlock();
4636 }
4637
4638 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4639                                       int *quote)
4640 {
4641         struct hci_conn_hash *h = &hdev->conn_hash;
4642         struct hci_chan *chan = NULL;
4643         unsigned int num = 0, min = ~0, cur_prio = 0;
4644         struct hci_conn *conn;
4645         int cnt, q, conn_num = 0;
4646
4647         BT_DBG("%s", hdev->name);
4648
4649         rcu_read_lock();
4650
4651         list_for_each_entry_rcu(conn, &h->list, list) {
4652                 struct hci_chan *tmp;
4653
4654                 if (conn->type != type)
4655                         continue;
4656
4657                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4658                         continue;
4659
4660                 conn_num++;
4661
4662                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4663                         struct sk_buff *skb;
4664
4665                         if (skb_queue_empty(&tmp->data_q))
4666                                 continue;
4667
4668                         skb = skb_peek(&tmp->data_q);
4669                         if (skb->priority < cur_prio)
4670                                 continue;
4671
4672                         if (skb->priority > cur_prio) {
4673                                 num = 0;
4674                                 min = ~0;
4675                                 cur_prio = skb->priority;
4676                         }
4677
4678                         num++;
4679
4680                         if (conn->sent < min) {
4681                                 min  = conn->sent;
4682                                 chan = tmp;
4683                         }
4684                 }
4685
4686                 if (hci_conn_num(hdev, type) == conn_num)
4687                         break;
4688         }
4689
4690         rcu_read_unlock();
4691
4692         if (!chan)
4693                 return NULL;
4694
4695         switch (chan->conn->type) {
4696         case ACL_LINK:
4697                 cnt = hdev->acl_cnt;
4698                 break;
4699         case AMP_LINK:
4700                 cnt = hdev->block_cnt;
4701                 break;
4702         case SCO_LINK:
4703         case ESCO_LINK:
4704                 cnt = hdev->sco_cnt;
4705                 break;
4706         case LE_LINK:
4707                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4708                 break;
4709         default:
4710                 cnt = 0;
4711                 BT_ERR("Unknown link type");
4712         }
4713
4714         q = cnt / num;
4715         *quote = q ? q : 1;
4716         BT_DBG("chan %p quote %d", chan, *quote);
4717         return chan;
4718 }
4719
4720 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4721 {
4722         struct hci_conn_hash *h = &hdev->conn_hash;
4723         struct hci_conn *conn;
4724         int num = 0;
4725
4726         BT_DBG("%s", hdev->name);
4727
4728         rcu_read_lock();
4729
4730         list_for_each_entry_rcu(conn, &h->list, list) {
4731                 struct hci_chan *chan;
4732
4733                 if (conn->type != type)
4734                         continue;
4735
4736                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4737                         continue;
4738
4739                 num++;
4740
4741                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4742                         struct sk_buff *skb;
4743
4744                         if (chan->sent) {
4745                                 chan->sent = 0;
4746                                 continue;
4747                         }
4748
4749                         if (skb_queue_empty(&chan->data_q))
4750                                 continue;
4751
4752                         skb = skb_peek(&chan->data_q);
4753                         if (skb->priority >= HCI_PRIO_MAX - 1)
4754                                 continue;
4755
4756                         skb->priority = HCI_PRIO_MAX - 1;
4757
4758                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4759                                skb->priority);
4760                 }
4761
4762                 if (hci_conn_num(hdev, type) == num)
4763                         break;
4764         }
4765
4766         rcu_read_unlock();
4767
4768 }
4769
4770 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4771 {
4772         /* Calculate count of blocks used by this packet */
4773         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4774 }
4775
4776 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4777 {
4778         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4779                 /* ACL tx timeout must be longer than maximum
4780                  * link supervision timeout (40.9 seconds) */
4781                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4782                                        HCI_ACL_TX_TIMEOUT))
4783                         hci_link_tx_to(hdev, ACL_LINK);
4784         }
4785 }
4786
4787 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4788 {
4789         unsigned int cnt = hdev->acl_cnt;
4790         struct hci_chan *chan;
4791         struct sk_buff *skb;
4792         int quote;
4793
4794         __check_timeout(hdev, cnt);
4795
4796         while (hdev->acl_cnt &&
4797                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4798                 u32 priority = (skb_peek(&chan->data_q))->priority;
4799                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4800                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4801                                skb->len, skb->priority);
4802
4803                         /* Stop if priority has changed */
4804                         if (skb->priority < priority)
4805                                 break;
4806
4807                         skb = skb_dequeue(&chan->data_q);
4808
4809                         hci_conn_enter_active_mode(chan->conn,
4810                                                    bt_cb(skb)->force_active);
4811
4812                         hci_send_frame(hdev, skb);
4813                         hdev->acl_last_tx = jiffies;
4814
4815                         hdev->acl_cnt--;
4816                         chan->sent++;
4817                         chan->conn->sent++;
4818                 }
4819         }
4820
4821         if (cnt != hdev->acl_cnt)
4822                 hci_prio_recalculate(hdev, ACL_LINK);
4823 }
4824
4825 static void hci_sched_acl_blk(struct hci_dev *hdev)
4826 {
4827         unsigned int cnt = hdev->block_cnt;
4828         struct hci_chan *chan;
4829         struct sk_buff *skb;
4830         int quote;
4831         u8 type;
4832
4833         __check_timeout(hdev, cnt);
4834
4835         BT_DBG("%s", hdev->name);
4836
4837         if (hdev->dev_type == HCI_AMP)
4838                 type = AMP_LINK;
4839         else
4840                 type = ACL_LINK;
4841
4842         while (hdev->block_cnt > 0 &&
4843                (chan = hci_chan_sent(hdev, type, &quote))) {
4844                 u32 priority = (skb_peek(&chan->data_q))->priority;
4845                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4846                         int blocks;
4847
4848                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4849                                skb->len, skb->priority);
4850
4851                         /* Stop if priority has changed */
4852                         if (skb->priority < priority)
4853                                 break;
4854
4855                         skb = skb_dequeue(&chan->data_q);
4856
4857                         blocks = __get_blocks(hdev, skb);
4858                         if (blocks > hdev->block_cnt)
4859                                 return;
4860
4861                         hci_conn_enter_active_mode(chan->conn,
4862                                                    bt_cb(skb)->force_active);
4863
4864                         hci_send_frame(hdev, skb);
4865                         hdev->acl_last_tx = jiffies;
4866
4867                         hdev->block_cnt -= blocks;
4868                         quote -= blocks;
4869
4870                         chan->sent += blocks;
4871                         chan->conn->sent += blocks;
4872                 }
4873         }
4874
4875         if (cnt != hdev->block_cnt)
4876                 hci_prio_recalculate(hdev, type);
4877 }
4878
4879 static void hci_sched_acl(struct hci_dev *hdev)
4880 {
4881         BT_DBG("%s", hdev->name);
4882
4883         /* No ACL link over BR/EDR controller */
4884         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4885                 return;
4886
4887         /* No AMP link over AMP controller */
4888         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4889                 return;
4890
4891         switch (hdev->flow_ctl_mode) {
4892         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4893                 hci_sched_acl_pkt(hdev);
4894                 break;
4895
4896         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4897                 hci_sched_acl_blk(hdev);
4898                 break;
4899         }
4900 }
4901
4902 /* Schedule SCO */
4903 static void hci_sched_sco(struct hci_dev *hdev)
4904 {
4905         struct hci_conn *conn;
4906         struct sk_buff *skb;
4907         int quote;
4908
4909         BT_DBG("%s", hdev->name);
4910
4911         if (!hci_conn_num(hdev, SCO_LINK))
4912                 return;
4913
4914         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4915                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4916                         BT_DBG("skb %p len %d", skb, skb->len);
4917                         hci_send_frame(hdev, skb);
4918
4919                         conn->sent++;
4920                         if (conn->sent == ~0)
4921                                 conn->sent = 0;
4922                 }
4923         }
4924 }
4925
4926 static void hci_sched_esco(struct hci_dev *hdev)
4927 {
4928         struct hci_conn *conn;
4929         struct sk_buff *skb;
4930         int quote;
4931
4932         BT_DBG("%s", hdev->name);
4933
4934         if (!hci_conn_num(hdev, ESCO_LINK))
4935                 return;
4936
4937         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4938                                                      &quote))) {
4939                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4940                         BT_DBG("skb %p len %d", skb, skb->len);
4941                         hci_send_frame(hdev, skb);
4942
4943                         conn->sent++;
4944                         if (conn->sent == ~0)
4945                                 conn->sent = 0;
4946                 }
4947         }
4948 }
4949
4950 static void hci_sched_le(struct hci_dev *hdev)
4951 {
4952         struct hci_chan *chan;
4953         struct sk_buff *skb;
4954         int quote, cnt, tmp;
4955
4956         BT_DBG("%s", hdev->name);
4957
4958         if (!hci_conn_num(hdev, LE_LINK))
4959                 return;
4960
4961         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4962                 /* LE tx timeout must be longer than maximum
4963                  * link supervision timeout (40.9 seconds) */
4964                 if (!hdev->le_cnt && hdev->le_pkts &&
4965                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4966                         hci_link_tx_to(hdev, LE_LINK);
4967         }
4968
4969         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4970         tmp = cnt;
4971         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4972                 u32 priority = (skb_peek(&chan->data_q))->priority;
4973                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4974                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4975                                skb->len, skb->priority);
4976
4977                         /* Stop if priority has changed */
4978                         if (skb->priority < priority)
4979                                 break;
4980
4981                         skb = skb_dequeue(&chan->data_q);
4982
4983                         hci_send_frame(hdev, skb);
4984                         hdev->le_last_tx = jiffies;
4985
4986                         cnt--;
4987                         chan->sent++;
4988                         chan->conn->sent++;
4989                 }
4990         }
4991
4992         if (hdev->le_pkts)
4993                 hdev->le_cnt = cnt;
4994         else
4995                 hdev->acl_cnt = cnt;
4996
4997         if (cnt != tmp)
4998                 hci_prio_recalculate(hdev, LE_LINK);
4999 }
5000
5001 static void hci_tx_work(struct work_struct *work)
5002 {
5003         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5004         struct sk_buff *skb;
5005
5006         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5007                hdev->sco_cnt, hdev->le_cnt);
5008
5009         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5010                 /* Schedule queues and send stuff to HCI driver */
5011                 hci_sched_acl(hdev);
5012                 hci_sched_sco(hdev);
5013                 hci_sched_esco(hdev);
5014                 hci_sched_le(hdev);
5015         }
5016
5017         /* Send next queued raw (unknown type) packet */
5018         while ((skb = skb_dequeue(&hdev->raw_q)))
5019                 hci_send_frame(hdev, skb);
5020 }
5021
5022 /* ----- HCI RX task (incoming data processing) ----- */
5023
5024 /* ACL data packet */
5025 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5026 {
5027         struct hci_acl_hdr *hdr = (void *) skb->data;
5028         struct hci_conn *conn;
5029         __u16 handle, flags;
5030
5031         skb_pull(skb, HCI_ACL_HDR_SIZE);
5032
5033         handle = __le16_to_cpu(hdr->handle);
5034         flags  = hci_flags(handle);
5035         handle = hci_handle(handle);
5036
5037         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5038                handle, flags);
5039
5040         hdev->stat.acl_rx++;
5041
5042         hci_dev_lock(hdev);
5043         conn = hci_conn_hash_lookup_handle(hdev, handle);
5044         hci_dev_unlock(hdev);
5045
5046         if (conn) {
5047                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5048
5049                 /* Send to upper protocol */
5050                 l2cap_recv_acldata(conn, skb, flags);
5051                 return;
5052         } else {
5053                 BT_ERR("%s ACL packet for unknown connection handle %d",
5054                        hdev->name, handle);
5055         }
5056
5057         kfree_skb(skb);
5058 }
5059
5060 /* SCO data packet */
5061 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5062 {
5063         struct hci_sco_hdr *hdr = (void *) skb->data;
5064         struct hci_conn *conn;
5065         __u16 handle;
5066
5067         skb_pull(skb, HCI_SCO_HDR_SIZE);
5068
5069         handle = __le16_to_cpu(hdr->handle);
5070
5071         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5072
5073         hdev->stat.sco_rx++;
5074
5075         hci_dev_lock(hdev);
5076         conn = hci_conn_hash_lookup_handle(hdev, handle);
5077         hci_dev_unlock(hdev);
5078
5079         if (conn) {
5080                 /* Send to upper protocol */
5081                 sco_recv_scodata(conn, skb);
5082                 return;
5083         } else {
5084                 BT_ERR("%s SCO packet for unknown connection handle %d",
5085                        hdev->name, handle);
5086         }
5087
5088         kfree_skb(skb);
5089 }
5090
5091 static bool hci_req_is_complete(struct hci_dev *hdev)
5092 {
5093         struct sk_buff *skb;
5094
5095         skb = skb_peek(&hdev->cmd_q);
5096         if (!skb)
5097                 return true;
5098
5099         return bt_cb(skb)->req.start;
5100 }
5101
5102 static void hci_resend_last(struct hci_dev *hdev)
5103 {
5104         struct hci_command_hdr *sent;
5105         struct sk_buff *skb;
5106         u16 opcode;
5107
5108         if (!hdev->sent_cmd)
5109                 return;
5110
5111         sent = (void *) hdev->sent_cmd->data;
5112         opcode = __le16_to_cpu(sent->opcode);
5113         if (opcode == HCI_OP_RESET)
5114                 return;
5115
5116         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5117         if (!skb)
5118                 return;
5119
5120         skb_queue_head(&hdev->cmd_q, skb);
5121         queue_work(hdev->workqueue, &hdev->cmd_work);
5122 }
5123
5124 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5125 {
5126         hci_req_complete_t req_complete = NULL;
5127         struct sk_buff *skb;
5128         unsigned long flags;
5129
5130         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5131
5132         /* If the completed command doesn't match the last one that was
5133          * sent we need to do special handling of it.
5134          */
5135         if (!hci_sent_cmd_data(hdev, opcode)) {
5136                 /* Some CSR based controllers generate a spontaneous
5137                  * reset complete event during init and any pending
5138                  * command will never be completed. In such a case we
5139                  * need to resend whatever was the last sent
5140                  * command.
5141                  */
5142                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5143                         hci_resend_last(hdev);
5144
5145                 return;
5146         }
5147
5148         /* If the command succeeded and there's still more commands in
5149          * this request the request is not yet complete.
5150          */
5151         if (!status && !hci_req_is_complete(hdev))
5152                 return;
5153
5154         /* If this was the last command in a request the complete
5155          * callback would be found in hdev->sent_cmd instead of the
5156          * command queue (hdev->cmd_q).
5157          */
5158         if (hdev->sent_cmd) {
5159                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5160
5161                 if (req_complete) {
5162                         /* We must set the complete callback to NULL to
5163                          * avoid calling the callback more than once if
5164                          * this function gets called again.
5165                          */
5166                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
5167
5168                         goto call_complete;
5169                 }
5170         }
5171
5172         /* Remove all pending commands belonging to this request */
5173         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5174         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5175                 if (bt_cb(skb)->req.start) {
5176                         __skb_queue_head(&hdev->cmd_q, skb);
5177                         break;
5178                 }
5179
5180                 req_complete = bt_cb(skb)->req.complete;
5181                 kfree_skb(skb);
5182         }
5183         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5184
5185 call_complete:
5186         if (req_complete)
5187                 req_complete(hdev, status);
5188 }
5189
5190 static void hci_rx_work(struct work_struct *work)
5191 {
5192         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5193         struct sk_buff *skb;
5194
5195         BT_DBG("%s", hdev->name);
5196
5197         while ((skb = skb_dequeue(&hdev->rx_q))) {
5198                 /* Send copy to monitor */
5199                 hci_send_to_monitor(hdev, skb);
5200
5201                 if (atomic_read(&hdev->promisc)) {
5202                         /* Send copy to the sockets */
5203                         hci_send_to_sock(hdev, skb);
5204                 }
5205
5206                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5207                         kfree_skb(skb);
5208                         continue;
5209                 }
5210
5211                 if (test_bit(HCI_INIT, &hdev->flags)) {
5212                         /* Don't process data packets in this states. */
5213                         switch (bt_cb(skb)->pkt_type) {
5214                         case HCI_ACLDATA_PKT:
5215                         case HCI_SCODATA_PKT:
5216                                 kfree_skb(skb);
5217                                 continue;
5218                         }
5219                 }
5220
5221                 /* Process frame */
5222                 switch (bt_cb(skb)->pkt_type) {
5223                 case HCI_EVENT_PKT:
5224                         BT_DBG("%s Event packet", hdev->name);
5225                         hci_event_packet(hdev, skb);
5226                         break;
5227
5228                 case HCI_ACLDATA_PKT:
5229                         BT_DBG("%s ACL data packet", hdev->name);
5230                         hci_acldata_packet(hdev, skb);
5231                         break;
5232
5233                 case HCI_SCODATA_PKT:
5234                         BT_DBG("%s SCO data packet", hdev->name);
5235                         hci_scodata_packet(hdev, skb);
5236                         break;
5237
5238                 default:
5239                         kfree_skb(skb);
5240                         break;
5241                 }
5242         }
5243 }
5244
5245 static void hci_cmd_work(struct work_struct *work)
5246 {
5247         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5248         struct sk_buff *skb;
5249
5250         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5251                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5252
5253         /* Send queued commands */
5254         if (atomic_read(&hdev->cmd_cnt)) {
5255                 skb = skb_dequeue(&hdev->cmd_q);
5256                 if (!skb)
5257                         return;
5258
5259                 kfree_skb(hdev->sent_cmd);
5260
5261                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5262                 if (hdev->sent_cmd) {
5263                         atomic_dec(&hdev->cmd_cnt);
5264                         hci_send_frame(hdev, skb);
5265                         if (test_bit(HCI_RESET, &hdev->flags))
5266                                 cancel_delayed_work(&hdev->cmd_timer);
5267                         else
5268                                 schedule_delayed_work(&hdev->cmd_timer,
5269                                                       HCI_CMD_TIMEOUT);
5270                 } else {
5271                         skb_queue_head(&hdev->cmd_q, skb);
5272                         queue_work(hdev->workqueue, &hdev->cmd_work);
5273                 }
5274         }
5275 }
5276
5277 void hci_req_add_le_scan_disable(struct hci_request *req)
5278 {
5279         struct hci_cp_le_set_scan_enable cp;
5280
5281         memset(&cp, 0, sizeof(cp));
5282         cp.enable = LE_SCAN_DISABLE;
5283         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5284 }
5285
5286 void hci_req_add_le_passive_scan(struct hci_request *req)
5287 {
5288         struct hci_cp_le_set_scan_param param_cp;
5289         struct hci_cp_le_set_scan_enable enable_cp;
5290         struct hci_dev *hdev = req->hdev;
5291         u8 own_addr_type;
5292
5293         /* Set require_privacy to false since no SCAN_REQ are send
5294          * during passive scanning. Not using an unresolvable address
5295          * here is important so that peer devices using direct
5296          * advertising with our address will be correctly reported
5297          * by the controller.
5298          */
5299         if (hci_update_random_address(req, false, &own_addr_type))
5300                 return;
5301
5302         memset(&param_cp, 0, sizeof(param_cp));
5303         param_cp.type = LE_SCAN_PASSIVE;
5304         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5305         param_cp.window = cpu_to_le16(hdev->le_scan_window);
5306         param_cp.own_address_type = own_addr_type;
5307         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5308                     &param_cp);
5309
5310         memset(&enable_cp, 0, sizeof(enable_cp));
5311         enable_cp.enable = LE_SCAN_ENABLE;
5312         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5313         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5314                     &enable_cp);
5315 }
5316
5317 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5318 {
5319         if (status)
5320                 BT_DBG("HCI request failed to update background scanning: "
5321                        "status 0x%2.2x", status);
5322 }
5323
5324 /* This function controls the background scanning based on hdev->pend_le_conns
5325  * list. If there are pending LE connection we start the background scanning,
5326  * otherwise we stop it.
5327  *
5328  * This function requires the caller holds hdev->lock.
5329  */
5330 void hci_update_background_scan(struct hci_dev *hdev)
5331 {
5332         struct hci_request req;
5333         struct hci_conn *conn;
5334         int err;
5335
5336         if (!test_bit(HCI_UP, &hdev->flags) ||
5337             test_bit(HCI_INIT, &hdev->flags) ||
5338             test_bit(HCI_SETUP, &hdev->dev_flags) ||
5339             test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5340             test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5341                 return;
5342
5343         hci_req_init(&req, hdev);
5344
5345         if (list_empty(&hdev->pend_le_conns) &&
5346             list_empty(&hdev->pend_le_reports)) {
5347                 /* If there is no pending LE connections or devices
5348                  * to be scanned for, we should stop the background
5349                  * scanning.
5350                  */
5351
5352                 /* If controller is not scanning we are done. */
5353                 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5354                         return;
5355
5356                 hci_req_add_le_scan_disable(&req);
5357
5358                 BT_DBG("%s stopping background scanning", hdev->name);
5359         } else {
5360                 /* If there is at least one pending LE connection, we should
5361                  * keep the background scan running.
5362                  */
5363
5364                 /* If controller is connecting, we should not start scanning
5365                  * since some controllers are not able to scan and connect at
5366                  * the same time.
5367                  */
5368                 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5369                 if (conn)
5370                         return;
5371
5372                 /* If controller is currently scanning, we stop it to ensure we
5373                  * don't miss any advertising (due to duplicates filter).
5374                  */
5375                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5376                         hci_req_add_le_scan_disable(&req);
5377
5378                 hci_req_add_le_passive_scan(&req);
5379
5380                 BT_DBG("%s starting background scanning", hdev->name);
5381         }
5382
5383         err = hci_req_run(&req, update_background_scan_complete);
5384         if (err)
5385                 BT_ERR("Failed to run HCI request: err %d", err);
5386 }