Bluetooth: Prefer sizeof(*ptr) when allocating memory
[cascardo/linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "smp.h"
41
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
45
46 /* HCI device list */
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
49
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
53
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
56
57 /* ----- HCI requests ----- */
58
59 #define HCI_REQ_DONE      0
60 #define HCI_REQ_PEND      1
61 #define HCI_REQ_CANCELED  2
62
63 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
64 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
65
66 /* ---- HCI notifications ---- */
67
68 static void hci_notify(struct hci_dev *hdev, int event)
69 {
70         hci_sock_dev_event(hdev, event);
71 }
72
73 /* ---- HCI debugfs entries ---- */
74
75 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76                              size_t count, loff_t *ppos)
77 {
78         struct hci_dev *hdev = file->private_data;
79         char buf[3];
80
81         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
82         buf[1] = '\n';
83         buf[2] = '\0';
84         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85 }
86
87 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88                               size_t count, loff_t *ppos)
89 {
90         struct hci_dev *hdev = file->private_data;
91         struct sk_buff *skb;
92         char buf[32];
93         size_t buf_size = min(count, (sizeof(buf)-1));
94         bool enable;
95         int err;
96
97         if (!test_bit(HCI_UP, &hdev->flags))
98                 return -ENETDOWN;
99
100         if (copy_from_user(buf, user_buf, buf_size))
101                 return -EFAULT;
102
103         buf[buf_size] = '\0';
104         if (strtobool(buf, &enable))
105                 return -EINVAL;
106
107         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
108                 return -EALREADY;
109
110         hci_req_lock(hdev);
111         if (enable)
112                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113                                      HCI_CMD_TIMEOUT);
114         else
115                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116                                      HCI_CMD_TIMEOUT);
117         hci_req_unlock(hdev);
118
119         if (IS_ERR(skb))
120                 return PTR_ERR(skb);
121
122         err = -bt_to_errno(skb->data[0]);
123         kfree_skb(skb);
124
125         if (err < 0)
126                 return err;
127
128         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
129
130         return count;
131 }
132
133 static const struct file_operations dut_mode_fops = {
134         .open           = simple_open,
135         .read           = dut_mode_read,
136         .write          = dut_mode_write,
137         .llseek         = default_llseek,
138 };
139
140 static int features_show(struct seq_file *f, void *ptr)
141 {
142         struct hci_dev *hdev = f->private;
143         u8 p;
144
145         hci_dev_lock(hdev);
146         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
147                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
148                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149                            hdev->features[p][0], hdev->features[p][1],
150                            hdev->features[p][2], hdev->features[p][3],
151                            hdev->features[p][4], hdev->features[p][5],
152                            hdev->features[p][6], hdev->features[p][7]);
153         }
154         if (lmp_le_capable(hdev))
155                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157                            hdev->le_features[0], hdev->le_features[1],
158                            hdev->le_features[2], hdev->le_features[3],
159                            hdev->le_features[4], hdev->le_features[5],
160                            hdev->le_features[6], hdev->le_features[7]);
161         hci_dev_unlock(hdev);
162
163         return 0;
164 }
165
166 static int features_open(struct inode *inode, struct file *file)
167 {
168         return single_open(file, features_show, inode->i_private);
169 }
170
171 static const struct file_operations features_fops = {
172         .open           = features_open,
173         .read           = seq_read,
174         .llseek         = seq_lseek,
175         .release        = single_release,
176 };
177
178 static int blacklist_show(struct seq_file *f, void *p)
179 {
180         struct hci_dev *hdev = f->private;
181         struct bdaddr_list *b;
182
183         hci_dev_lock(hdev);
184         list_for_each_entry(b, &hdev->blacklist, list)
185                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
186         hci_dev_unlock(hdev);
187
188         return 0;
189 }
190
191 static int blacklist_open(struct inode *inode, struct file *file)
192 {
193         return single_open(file, blacklist_show, inode->i_private);
194 }
195
196 static const struct file_operations blacklist_fops = {
197         .open           = blacklist_open,
198         .read           = seq_read,
199         .llseek         = seq_lseek,
200         .release        = single_release,
201 };
202
203 static int whitelist_show(struct seq_file *f, void *p)
204 {
205         struct hci_dev *hdev = f->private;
206         struct bdaddr_list *b;
207
208         hci_dev_lock(hdev);
209         list_for_each_entry(b, &hdev->whitelist, list)
210                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
211         hci_dev_unlock(hdev);
212
213         return 0;
214 }
215
216 static int whitelist_open(struct inode *inode, struct file *file)
217 {
218         return single_open(file, whitelist_show, inode->i_private);
219 }
220
221 static const struct file_operations whitelist_fops = {
222         .open           = whitelist_open,
223         .read           = seq_read,
224         .llseek         = seq_lseek,
225         .release        = single_release,
226 };
227
228 static int uuids_show(struct seq_file *f, void *p)
229 {
230         struct hci_dev *hdev = f->private;
231         struct bt_uuid *uuid;
232
233         hci_dev_lock(hdev);
234         list_for_each_entry(uuid, &hdev->uuids, list) {
235                 u8 i, val[16];
236
237                 /* The Bluetooth UUID values are stored in big endian,
238                  * but with reversed byte order. So convert them into
239                  * the right order for the %pUb modifier.
240                  */
241                 for (i = 0; i < 16; i++)
242                         val[i] = uuid->uuid[15 - i];
243
244                 seq_printf(f, "%pUb\n", val);
245         }
246         hci_dev_unlock(hdev);
247
248         return 0;
249 }
250
251 static int uuids_open(struct inode *inode, struct file *file)
252 {
253         return single_open(file, uuids_show, inode->i_private);
254 }
255
256 static const struct file_operations uuids_fops = {
257         .open           = uuids_open,
258         .read           = seq_read,
259         .llseek         = seq_lseek,
260         .release        = single_release,
261 };
262
263 static int inquiry_cache_show(struct seq_file *f, void *p)
264 {
265         struct hci_dev *hdev = f->private;
266         struct discovery_state *cache = &hdev->discovery;
267         struct inquiry_entry *e;
268
269         hci_dev_lock(hdev);
270
271         list_for_each_entry(e, &cache->all, all) {
272                 struct inquiry_data *data = &e->data;
273                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
274                            &data->bdaddr,
275                            data->pscan_rep_mode, data->pscan_period_mode,
276                            data->pscan_mode, data->dev_class[2],
277                            data->dev_class[1], data->dev_class[0],
278                            __le16_to_cpu(data->clock_offset),
279                            data->rssi, data->ssp_mode, e->timestamp);
280         }
281
282         hci_dev_unlock(hdev);
283
284         return 0;
285 }
286
287 static int inquiry_cache_open(struct inode *inode, struct file *file)
288 {
289         return single_open(file, inquiry_cache_show, inode->i_private);
290 }
291
292 static const struct file_operations inquiry_cache_fops = {
293         .open           = inquiry_cache_open,
294         .read           = seq_read,
295         .llseek         = seq_lseek,
296         .release        = single_release,
297 };
298
299 static int link_keys_show(struct seq_file *f, void *ptr)
300 {
301         struct hci_dev *hdev = f->private;
302         struct list_head *p, *n;
303
304         hci_dev_lock(hdev);
305         list_for_each_safe(p, n, &hdev->link_keys) {
306                 struct link_key *key = list_entry(p, struct link_key, list);
307                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
308                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
309         }
310         hci_dev_unlock(hdev);
311
312         return 0;
313 }
314
315 static int link_keys_open(struct inode *inode, struct file *file)
316 {
317         return single_open(file, link_keys_show, inode->i_private);
318 }
319
320 static const struct file_operations link_keys_fops = {
321         .open           = link_keys_open,
322         .read           = seq_read,
323         .llseek         = seq_lseek,
324         .release        = single_release,
325 };
326
327 static int dev_class_show(struct seq_file *f, void *ptr)
328 {
329         struct hci_dev *hdev = f->private;
330
331         hci_dev_lock(hdev);
332         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
333                    hdev->dev_class[1], hdev->dev_class[0]);
334         hci_dev_unlock(hdev);
335
336         return 0;
337 }
338
339 static int dev_class_open(struct inode *inode, struct file *file)
340 {
341         return single_open(file, dev_class_show, inode->i_private);
342 }
343
344 static const struct file_operations dev_class_fops = {
345         .open           = dev_class_open,
346         .read           = seq_read,
347         .llseek         = seq_lseek,
348         .release        = single_release,
349 };
350
351 static int voice_setting_get(void *data, u64 *val)
352 {
353         struct hci_dev *hdev = data;
354
355         hci_dev_lock(hdev);
356         *val = hdev->voice_setting;
357         hci_dev_unlock(hdev);
358
359         return 0;
360 }
361
362 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
363                         NULL, "0x%4.4llx\n");
364
365 static int auto_accept_delay_set(void *data, u64 val)
366 {
367         struct hci_dev *hdev = data;
368
369         hci_dev_lock(hdev);
370         hdev->auto_accept_delay = val;
371         hci_dev_unlock(hdev);
372
373         return 0;
374 }
375
376 static int auto_accept_delay_get(void *data, u64 *val)
377 {
378         struct hci_dev *hdev = data;
379
380         hci_dev_lock(hdev);
381         *val = hdev->auto_accept_delay;
382         hci_dev_unlock(hdev);
383
384         return 0;
385 }
386
387 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
388                         auto_accept_delay_set, "%llu\n");
389
390 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
391                                      size_t count, loff_t *ppos)
392 {
393         struct hci_dev *hdev = file->private_data;
394         char buf[3];
395
396         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
397         buf[1] = '\n';
398         buf[2] = '\0';
399         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
400 }
401
402 static ssize_t force_sc_support_write(struct file *file,
403                                       const char __user *user_buf,
404                                       size_t count, loff_t *ppos)
405 {
406         struct hci_dev *hdev = file->private_data;
407         char buf[32];
408         size_t buf_size = min(count, (sizeof(buf)-1));
409         bool enable;
410
411         if (test_bit(HCI_UP, &hdev->flags))
412                 return -EBUSY;
413
414         if (copy_from_user(buf, user_buf, buf_size))
415                 return -EFAULT;
416
417         buf[buf_size] = '\0';
418         if (strtobool(buf, &enable))
419                 return -EINVAL;
420
421         if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
422                 return -EALREADY;
423
424         change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
425
426         return count;
427 }
428
429 static const struct file_operations force_sc_support_fops = {
430         .open           = simple_open,
431         .read           = force_sc_support_read,
432         .write          = force_sc_support_write,
433         .llseek         = default_llseek,
434 };
435
436 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
437                                  size_t count, loff_t *ppos)
438 {
439         struct hci_dev *hdev = file->private_data;
440         char buf[3];
441
442         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
443         buf[1] = '\n';
444         buf[2] = '\0';
445         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
446 }
447
448 static const struct file_operations sc_only_mode_fops = {
449         .open           = simple_open,
450         .read           = sc_only_mode_read,
451         .llseek         = default_llseek,
452 };
453
454 static int idle_timeout_set(void *data, u64 val)
455 {
456         struct hci_dev *hdev = data;
457
458         if (val != 0 && (val < 500 || val > 3600000))
459                 return -EINVAL;
460
461         hci_dev_lock(hdev);
462         hdev->idle_timeout = val;
463         hci_dev_unlock(hdev);
464
465         return 0;
466 }
467
468 static int idle_timeout_get(void *data, u64 *val)
469 {
470         struct hci_dev *hdev = data;
471
472         hci_dev_lock(hdev);
473         *val = hdev->idle_timeout;
474         hci_dev_unlock(hdev);
475
476         return 0;
477 }
478
479 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
480                         idle_timeout_set, "%llu\n");
481
482 static int rpa_timeout_set(void *data, u64 val)
483 {
484         struct hci_dev *hdev = data;
485
486         /* Require the RPA timeout to be at least 30 seconds and at most
487          * 24 hours.
488          */
489         if (val < 30 || val > (60 * 60 * 24))
490                 return -EINVAL;
491
492         hci_dev_lock(hdev);
493         hdev->rpa_timeout = val;
494         hci_dev_unlock(hdev);
495
496         return 0;
497 }
498
499 static int rpa_timeout_get(void *data, u64 *val)
500 {
501         struct hci_dev *hdev = data;
502
503         hci_dev_lock(hdev);
504         *val = hdev->rpa_timeout;
505         hci_dev_unlock(hdev);
506
507         return 0;
508 }
509
510 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
511                         rpa_timeout_set, "%llu\n");
512
513 static int sniff_min_interval_set(void *data, u64 val)
514 {
515         struct hci_dev *hdev = data;
516
517         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
518                 return -EINVAL;
519
520         hci_dev_lock(hdev);
521         hdev->sniff_min_interval = val;
522         hci_dev_unlock(hdev);
523
524         return 0;
525 }
526
527 static int sniff_min_interval_get(void *data, u64 *val)
528 {
529         struct hci_dev *hdev = data;
530
531         hci_dev_lock(hdev);
532         *val = hdev->sniff_min_interval;
533         hci_dev_unlock(hdev);
534
535         return 0;
536 }
537
538 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
539                         sniff_min_interval_set, "%llu\n");
540
541 static int sniff_max_interval_set(void *data, u64 val)
542 {
543         struct hci_dev *hdev = data;
544
545         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
546                 return -EINVAL;
547
548         hci_dev_lock(hdev);
549         hdev->sniff_max_interval = val;
550         hci_dev_unlock(hdev);
551
552         return 0;
553 }
554
555 static int sniff_max_interval_get(void *data, u64 *val)
556 {
557         struct hci_dev *hdev = data;
558
559         hci_dev_lock(hdev);
560         *val = hdev->sniff_max_interval;
561         hci_dev_unlock(hdev);
562
563         return 0;
564 }
565
566 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
567                         sniff_max_interval_set, "%llu\n");
568
569 static int conn_info_min_age_set(void *data, u64 val)
570 {
571         struct hci_dev *hdev = data;
572
573         if (val == 0 || val > hdev->conn_info_max_age)
574                 return -EINVAL;
575
576         hci_dev_lock(hdev);
577         hdev->conn_info_min_age = val;
578         hci_dev_unlock(hdev);
579
580         return 0;
581 }
582
583 static int conn_info_min_age_get(void *data, u64 *val)
584 {
585         struct hci_dev *hdev = data;
586
587         hci_dev_lock(hdev);
588         *val = hdev->conn_info_min_age;
589         hci_dev_unlock(hdev);
590
591         return 0;
592 }
593
594 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
595                         conn_info_min_age_set, "%llu\n");
596
597 static int conn_info_max_age_set(void *data, u64 val)
598 {
599         struct hci_dev *hdev = data;
600
601         if (val == 0 || val < hdev->conn_info_min_age)
602                 return -EINVAL;
603
604         hci_dev_lock(hdev);
605         hdev->conn_info_max_age = val;
606         hci_dev_unlock(hdev);
607
608         return 0;
609 }
610
611 static int conn_info_max_age_get(void *data, u64 *val)
612 {
613         struct hci_dev *hdev = data;
614
615         hci_dev_lock(hdev);
616         *val = hdev->conn_info_max_age;
617         hci_dev_unlock(hdev);
618
619         return 0;
620 }
621
622 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
623                         conn_info_max_age_set, "%llu\n");
624
625 static int identity_show(struct seq_file *f, void *p)
626 {
627         struct hci_dev *hdev = f->private;
628         bdaddr_t addr;
629         u8 addr_type;
630
631         hci_dev_lock(hdev);
632
633         hci_copy_identity_address(hdev, &addr, &addr_type);
634
635         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
636                    16, hdev->irk, &hdev->rpa);
637
638         hci_dev_unlock(hdev);
639
640         return 0;
641 }
642
643 static int identity_open(struct inode *inode, struct file *file)
644 {
645         return single_open(file, identity_show, inode->i_private);
646 }
647
648 static const struct file_operations identity_fops = {
649         .open           = identity_open,
650         .read           = seq_read,
651         .llseek         = seq_lseek,
652         .release        = single_release,
653 };
654
655 static int random_address_show(struct seq_file *f, void *p)
656 {
657         struct hci_dev *hdev = f->private;
658
659         hci_dev_lock(hdev);
660         seq_printf(f, "%pMR\n", &hdev->random_addr);
661         hci_dev_unlock(hdev);
662
663         return 0;
664 }
665
666 static int random_address_open(struct inode *inode, struct file *file)
667 {
668         return single_open(file, random_address_show, inode->i_private);
669 }
670
671 static const struct file_operations random_address_fops = {
672         .open           = random_address_open,
673         .read           = seq_read,
674         .llseek         = seq_lseek,
675         .release        = single_release,
676 };
677
678 static int static_address_show(struct seq_file *f, void *p)
679 {
680         struct hci_dev *hdev = f->private;
681
682         hci_dev_lock(hdev);
683         seq_printf(f, "%pMR\n", &hdev->static_addr);
684         hci_dev_unlock(hdev);
685
686         return 0;
687 }
688
689 static int static_address_open(struct inode *inode, struct file *file)
690 {
691         return single_open(file, static_address_show, inode->i_private);
692 }
693
694 static const struct file_operations static_address_fops = {
695         .open           = static_address_open,
696         .read           = seq_read,
697         .llseek         = seq_lseek,
698         .release        = single_release,
699 };
700
701 static ssize_t force_static_address_read(struct file *file,
702                                          char __user *user_buf,
703                                          size_t count, loff_t *ppos)
704 {
705         struct hci_dev *hdev = file->private_data;
706         char buf[3];
707
708         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
709         buf[1] = '\n';
710         buf[2] = '\0';
711         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
712 }
713
714 static ssize_t force_static_address_write(struct file *file,
715                                           const char __user *user_buf,
716                                           size_t count, loff_t *ppos)
717 {
718         struct hci_dev *hdev = file->private_data;
719         char buf[32];
720         size_t buf_size = min(count, (sizeof(buf)-1));
721         bool enable;
722
723         if (test_bit(HCI_UP, &hdev->flags))
724                 return -EBUSY;
725
726         if (copy_from_user(buf, user_buf, buf_size))
727                 return -EFAULT;
728
729         buf[buf_size] = '\0';
730         if (strtobool(buf, &enable))
731                 return -EINVAL;
732
733         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
734                 return -EALREADY;
735
736         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
737
738         return count;
739 }
740
741 static const struct file_operations force_static_address_fops = {
742         .open           = simple_open,
743         .read           = force_static_address_read,
744         .write          = force_static_address_write,
745         .llseek         = default_llseek,
746 };
747
748 static int white_list_show(struct seq_file *f, void *ptr)
749 {
750         struct hci_dev *hdev = f->private;
751         struct bdaddr_list *b;
752
753         hci_dev_lock(hdev);
754         list_for_each_entry(b, &hdev->le_white_list, list)
755                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
756         hci_dev_unlock(hdev);
757
758         return 0;
759 }
760
761 static int white_list_open(struct inode *inode, struct file *file)
762 {
763         return single_open(file, white_list_show, inode->i_private);
764 }
765
766 static const struct file_operations white_list_fops = {
767         .open           = white_list_open,
768         .read           = seq_read,
769         .llseek         = seq_lseek,
770         .release        = single_release,
771 };
772
773 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
774 {
775         struct hci_dev *hdev = f->private;
776         struct list_head *p, *n;
777
778         hci_dev_lock(hdev);
779         list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
780                 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
781                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
782                            &irk->bdaddr, irk->addr_type,
783                            16, irk->val, &irk->rpa);
784         }
785         hci_dev_unlock(hdev);
786
787         return 0;
788 }
789
790 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
791 {
792         return single_open(file, identity_resolving_keys_show,
793                            inode->i_private);
794 }
795
796 static const struct file_operations identity_resolving_keys_fops = {
797         .open           = identity_resolving_keys_open,
798         .read           = seq_read,
799         .llseek         = seq_lseek,
800         .release        = single_release,
801 };
802
803 static int long_term_keys_show(struct seq_file *f, void *ptr)
804 {
805         struct hci_dev *hdev = f->private;
806         struct list_head *p, *n;
807
808         hci_dev_lock(hdev);
809         list_for_each_safe(p, n, &hdev->long_term_keys) {
810                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
811                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
812                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
813                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
814                            __le64_to_cpu(ltk->rand), 16, ltk->val);
815         }
816         hci_dev_unlock(hdev);
817
818         return 0;
819 }
820
821 static int long_term_keys_open(struct inode *inode, struct file *file)
822 {
823         return single_open(file, long_term_keys_show, inode->i_private);
824 }
825
826 static const struct file_operations long_term_keys_fops = {
827         .open           = long_term_keys_open,
828         .read           = seq_read,
829         .llseek         = seq_lseek,
830         .release        = single_release,
831 };
832
833 static int conn_min_interval_set(void *data, u64 val)
834 {
835         struct hci_dev *hdev = data;
836
837         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
838                 return -EINVAL;
839
840         hci_dev_lock(hdev);
841         hdev->le_conn_min_interval = val;
842         hci_dev_unlock(hdev);
843
844         return 0;
845 }
846
847 static int conn_min_interval_get(void *data, u64 *val)
848 {
849         struct hci_dev *hdev = data;
850
851         hci_dev_lock(hdev);
852         *val = hdev->le_conn_min_interval;
853         hci_dev_unlock(hdev);
854
855         return 0;
856 }
857
858 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
859                         conn_min_interval_set, "%llu\n");
860
861 static int conn_max_interval_set(void *data, u64 val)
862 {
863         struct hci_dev *hdev = data;
864
865         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
866                 return -EINVAL;
867
868         hci_dev_lock(hdev);
869         hdev->le_conn_max_interval = val;
870         hci_dev_unlock(hdev);
871
872         return 0;
873 }
874
875 static int conn_max_interval_get(void *data, u64 *val)
876 {
877         struct hci_dev *hdev = data;
878
879         hci_dev_lock(hdev);
880         *val = hdev->le_conn_max_interval;
881         hci_dev_unlock(hdev);
882
883         return 0;
884 }
885
886 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
887                         conn_max_interval_set, "%llu\n");
888
889 static int conn_latency_set(void *data, u64 val)
890 {
891         struct hci_dev *hdev = data;
892
893         if (val > 0x01f3)
894                 return -EINVAL;
895
896         hci_dev_lock(hdev);
897         hdev->le_conn_latency = val;
898         hci_dev_unlock(hdev);
899
900         return 0;
901 }
902
903 static int conn_latency_get(void *data, u64 *val)
904 {
905         struct hci_dev *hdev = data;
906
907         hci_dev_lock(hdev);
908         *val = hdev->le_conn_latency;
909         hci_dev_unlock(hdev);
910
911         return 0;
912 }
913
914 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
915                         conn_latency_set, "%llu\n");
916
917 static int supervision_timeout_set(void *data, u64 val)
918 {
919         struct hci_dev *hdev = data;
920
921         if (val < 0x000a || val > 0x0c80)
922                 return -EINVAL;
923
924         hci_dev_lock(hdev);
925         hdev->le_supv_timeout = val;
926         hci_dev_unlock(hdev);
927
928         return 0;
929 }
930
931 static int supervision_timeout_get(void *data, u64 *val)
932 {
933         struct hci_dev *hdev = data;
934
935         hci_dev_lock(hdev);
936         *val = hdev->le_supv_timeout;
937         hci_dev_unlock(hdev);
938
939         return 0;
940 }
941
942 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
943                         supervision_timeout_set, "%llu\n");
944
945 static int adv_channel_map_set(void *data, u64 val)
946 {
947         struct hci_dev *hdev = data;
948
949         if (val < 0x01 || val > 0x07)
950                 return -EINVAL;
951
952         hci_dev_lock(hdev);
953         hdev->le_adv_channel_map = val;
954         hci_dev_unlock(hdev);
955
956         return 0;
957 }
958
959 static int adv_channel_map_get(void *data, u64 *val)
960 {
961         struct hci_dev *hdev = data;
962
963         hci_dev_lock(hdev);
964         *val = hdev->le_adv_channel_map;
965         hci_dev_unlock(hdev);
966
967         return 0;
968 }
969
970 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
971                         adv_channel_map_set, "%llu\n");
972
973 static int device_list_show(struct seq_file *f, void *ptr)
974 {
975         struct hci_dev *hdev = f->private;
976         struct hci_conn_params *p;
977
978         hci_dev_lock(hdev);
979         list_for_each_entry(p, &hdev->le_conn_params, list) {
980                 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
981                            p->auto_connect);
982         }
983         hci_dev_unlock(hdev);
984
985         return 0;
986 }
987
988 static int device_list_open(struct inode *inode, struct file *file)
989 {
990         return single_open(file, device_list_show, inode->i_private);
991 }
992
993 static const struct file_operations device_list_fops = {
994         .open           = device_list_open,
995         .read           = seq_read,
996         .llseek         = seq_lseek,
997         .release        = single_release,
998 };
999
1000 /* ---- HCI requests ---- */
1001
1002 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1003 {
1004         BT_DBG("%s result 0x%2.2x", hdev->name, result);
1005
1006         if (hdev->req_status == HCI_REQ_PEND) {
1007                 hdev->req_result = result;
1008                 hdev->req_status = HCI_REQ_DONE;
1009                 wake_up_interruptible(&hdev->req_wait_q);
1010         }
1011 }
1012
1013 static void hci_req_cancel(struct hci_dev *hdev, int err)
1014 {
1015         BT_DBG("%s err 0x%2.2x", hdev->name, err);
1016
1017         if (hdev->req_status == HCI_REQ_PEND) {
1018                 hdev->req_result = err;
1019                 hdev->req_status = HCI_REQ_CANCELED;
1020                 wake_up_interruptible(&hdev->req_wait_q);
1021         }
1022 }
1023
1024 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1025                                             u8 event)
1026 {
1027         struct hci_ev_cmd_complete *ev;
1028         struct hci_event_hdr *hdr;
1029         struct sk_buff *skb;
1030
1031         hci_dev_lock(hdev);
1032
1033         skb = hdev->recv_evt;
1034         hdev->recv_evt = NULL;
1035
1036         hci_dev_unlock(hdev);
1037
1038         if (!skb)
1039                 return ERR_PTR(-ENODATA);
1040
1041         if (skb->len < sizeof(*hdr)) {
1042                 BT_ERR("Too short HCI event");
1043                 goto failed;
1044         }
1045
1046         hdr = (void *) skb->data;
1047         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1048
1049         if (event) {
1050                 if (hdr->evt != event)
1051                         goto failed;
1052                 return skb;
1053         }
1054
1055         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1056                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1057                 goto failed;
1058         }
1059
1060         if (skb->len < sizeof(*ev)) {
1061                 BT_ERR("Too short cmd_complete event");
1062                 goto failed;
1063         }
1064
1065         ev = (void *) skb->data;
1066         skb_pull(skb, sizeof(*ev));
1067
1068         if (opcode == __le16_to_cpu(ev->opcode))
1069                 return skb;
1070
1071         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1072                __le16_to_cpu(ev->opcode));
1073
1074 failed:
1075         kfree_skb(skb);
1076         return ERR_PTR(-ENODATA);
1077 }
1078
1079 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1080                                   const void *param, u8 event, u32 timeout)
1081 {
1082         DECLARE_WAITQUEUE(wait, current);
1083         struct hci_request req;
1084         int err = 0;
1085
1086         BT_DBG("%s", hdev->name);
1087
1088         hci_req_init(&req, hdev);
1089
1090         hci_req_add_ev(&req, opcode, plen, param, event);
1091
1092         hdev->req_status = HCI_REQ_PEND;
1093
1094         err = hci_req_run(&req, hci_req_sync_complete);
1095         if (err < 0)
1096                 return ERR_PTR(err);
1097
1098         add_wait_queue(&hdev->req_wait_q, &wait);
1099         set_current_state(TASK_INTERRUPTIBLE);
1100
1101         schedule_timeout(timeout);
1102
1103         remove_wait_queue(&hdev->req_wait_q, &wait);
1104
1105         if (signal_pending(current))
1106                 return ERR_PTR(-EINTR);
1107
1108         switch (hdev->req_status) {
1109         case HCI_REQ_DONE:
1110                 err = -bt_to_errno(hdev->req_result);
1111                 break;
1112
1113         case HCI_REQ_CANCELED:
1114                 err = -hdev->req_result;
1115                 break;
1116
1117         default:
1118                 err = -ETIMEDOUT;
1119                 break;
1120         }
1121
1122         hdev->req_status = hdev->req_result = 0;
1123
1124         BT_DBG("%s end: err %d", hdev->name, err);
1125
1126         if (err < 0)
1127                 return ERR_PTR(err);
1128
1129         return hci_get_cmd_complete(hdev, opcode, event);
1130 }
1131 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1132
1133 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1134                                const void *param, u32 timeout)
1135 {
1136         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1137 }
1138 EXPORT_SYMBOL(__hci_cmd_sync);
1139
1140 /* Execute request and wait for completion. */
1141 static int __hci_req_sync(struct hci_dev *hdev,
1142                           void (*func)(struct hci_request *req,
1143                                       unsigned long opt),
1144                           unsigned long opt, __u32 timeout)
1145 {
1146         struct hci_request req;
1147         DECLARE_WAITQUEUE(wait, current);
1148         int err = 0;
1149
1150         BT_DBG("%s start", hdev->name);
1151
1152         hci_req_init(&req, hdev);
1153
1154         hdev->req_status = HCI_REQ_PEND;
1155
1156         func(&req, opt);
1157
1158         err = hci_req_run(&req, hci_req_sync_complete);
1159         if (err < 0) {
1160                 hdev->req_status = 0;
1161
1162                 /* ENODATA means the HCI request command queue is empty.
1163                  * This can happen when a request with conditionals doesn't
1164                  * trigger any commands to be sent. This is normal behavior
1165                  * and should not trigger an error return.
1166                  */
1167                 if (err == -ENODATA)
1168                         return 0;
1169
1170                 return err;
1171         }
1172
1173         add_wait_queue(&hdev->req_wait_q, &wait);
1174         set_current_state(TASK_INTERRUPTIBLE);
1175
1176         schedule_timeout(timeout);
1177
1178         remove_wait_queue(&hdev->req_wait_q, &wait);
1179
1180         if (signal_pending(current))
1181                 return -EINTR;
1182
1183         switch (hdev->req_status) {
1184         case HCI_REQ_DONE:
1185                 err = -bt_to_errno(hdev->req_result);
1186                 break;
1187
1188         case HCI_REQ_CANCELED:
1189                 err = -hdev->req_result;
1190                 break;
1191
1192         default:
1193                 err = -ETIMEDOUT;
1194                 break;
1195         }
1196
1197         hdev->req_status = hdev->req_result = 0;
1198
1199         BT_DBG("%s end: err %d", hdev->name, err);
1200
1201         return err;
1202 }
1203
1204 static int hci_req_sync(struct hci_dev *hdev,
1205                         void (*req)(struct hci_request *req,
1206                                     unsigned long opt),
1207                         unsigned long opt, __u32 timeout)
1208 {
1209         int ret;
1210
1211         if (!test_bit(HCI_UP, &hdev->flags))
1212                 return -ENETDOWN;
1213
1214         /* Serialize all requests */
1215         hci_req_lock(hdev);
1216         ret = __hci_req_sync(hdev, req, opt, timeout);
1217         hci_req_unlock(hdev);
1218
1219         return ret;
1220 }
1221
1222 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1223 {
1224         BT_DBG("%s %ld", req->hdev->name, opt);
1225
1226         /* Reset device */
1227         set_bit(HCI_RESET, &req->hdev->flags);
1228         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1229 }
1230
1231 static void bredr_init(struct hci_request *req)
1232 {
1233         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1234
1235         /* Read Local Supported Features */
1236         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1237
1238         /* Read Local Version */
1239         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1240
1241         /* Read BD Address */
1242         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1243 }
1244
1245 static void amp_init(struct hci_request *req)
1246 {
1247         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1248
1249         /* Read Local Version */
1250         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1251
1252         /* Read Local Supported Commands */
1253         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1254
1255         /* Read Local Supported Features */
1256         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1257
1258         /* Read Local AMP Info */
1259         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1260
1261         /* Read Data Blk size */
1262         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1263
1264         /* Read Flow Control Mode */
1265         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1266
1267         /* Read Location Data */
1268         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1269 }
1270
1271 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1272 {
1273         struct hci_dev *hdev = req->hdev;
1274
1275         BT_DBG("%s %ld", hdev->name, opt);
1276
1277         /* Reset */
1278         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1279                 hci_reset_req(req, 0);
1280
1281         switch (hdev->dev_type) {
1282         case HCI_BREDR:
1283                 bredr_init(req);
1284                 break;
1285
1286         case HCI_AMP:
1287                 amp_init(req);
1288                 break;
1289
1290         default:
1291                 BT_ERR("Unknown device type %d", hdev->dev_type);
1292                 break;
1293         }
1294 }
1295
1296 static void bredr_setup(struct hci_request *req)
1297 {
1298         struct hci_dev *hdev = req->hdev;
1299
1300         __le16 param;
1301         __u8 flt_type;
1302
1303         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1304         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1305
1306         /* Read Class of Device */
1307         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1308
1309         /* Read Local Name */
1310         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1311
1312         /* Read Voice Setting */
1313         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1314
1315         /* Read Number of Supported IAC */
1316         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1317
1318         /* Read Current IAC LAP */
1319         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1320
1321         /* Clear Event Filters */
1322         flt_type = HCI_FLT_CLEAR_ALL;
1323         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1324
1325         /* Connection accept timeout ~20 secs */
1326         param = cpu_to_le16(0x7d00);
1327         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1328
1329         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1330          * but it does not support page scan related HCI commands.
1331          */
1332         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1333                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1334                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1335         }
1336 }
1337
1338 static void le_setup(struct hci_request *req)
1339 {
1340         struct hci_dev *hdev = req->hdev;
1341
1342         /* Read LE Buffer Size */
1343         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1344
1345         /* Read LE Local Supported Features */
1346         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1347
1348         /* Read LE Supported States */
1349         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1350
1351         /* Read LE White List Size */
1352         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1353
1354         /* Clear LE White List */
1355         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1356
1357         /* LE-only controllers have LE implicitly enabled */
1358         if (!lmp_bredr_capable(hdev))
1359                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1360 }
1361
1362 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1363 {
1364         if (lmp_ext_inq_capable(hdev))
1365                 return 0x02;
1366
1367         if (lmp_inq_rssi_capable(hdev))
1368                 return 0x01;
1369
1370         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1371             hdev->lmp_subver == 0x0757)
1372                 return 0x01;
1373
1374         if (hdev->manufacturer == 15) {
1375                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1376                         return 0x01;
1377                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1378                         return 0x01;
1379                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1380                         return 0x01;
1381         }
1382
1383         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1384             hdev->lmp_subver == 0x1805)
1385                 return 0x01;
1386
1387         return 0x00;
1388 }
1389
1390 static void hci_setup_inquiry_mode(struct hci_request *req)
1391 {
1392         u8 mode;
1393
1394         mode = hci_get_inquiry_mode(req->hdev);
1395
1396         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1397 }
1398
1399 static void hci_setup_event_mask(struct hci_request *req)
1400 {
1401         struct hci_dev *hdev = req->hdev;
1402
1403         /* The second byte is 0xff instead of 0x9f (two reserved bits
1404          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1405          * command otherwise.
1406          */
1407         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1408
1409         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1410          * any event mask for pre 1.2 devices.
1411          */
1412         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1413                 return;
1414
1415         if (lmp_bredr_capable(hdev)) {
1416                 events[4] |= 0x01; /* Flow Specification Complete */
1417                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1418                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1419                 events[5] |= 0x08; /* Synchronous Connection Complete */
1420                 events[5] |= 0x10; /* Synchronous Connection Changed */
1421         } else {
1422                 /* Use a different default for LE-only devices */
1423                 memset(events, 0, sizeof(events));
1424                 events[0] |= 0x10; /* Disconnection Complete */
1425                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1426                 events[1] |= 0x20; /* Command Complete */
1427                 events[1] |= 0x40; /* Command Status */
1428                 events[1] |= 0x80; /* Hardware Error */
1429                 events[2] |= 0x04; /* Number of Completed Packets */
1430                 events[3] |= 0x02; /* Data Buffer Overflow */
1431
1432                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1433                         events[0] |= 0x80; /* Encryption Change */
1434                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
1435                 }
1436         }
1437
1438         if (lmp_inq_rssi_capable(hdev))
1439                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1440
1441         if (lmp_sniffsubr_capable(hdev))
1442                 events[5] |= 0x20; /* Sniff Subrating */
1443
1444         if (lmp_pause_enc_capable(hdev))
1445                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1446
1447         if (lmp_ext_inq_capable(hdev))
1448                 events[5] |= 0x40; /* Extended Inquiry Result */
1449
1450         if (lmp_no_flush_capable(hdev))
1451                 events[7] |= 0x01; /* Enhanced Flush Complete */
1452
1453         if (lmp_lsto_capable(hdev))
1454                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1455
1456         if (lmp_ssp_capable(hdev)) {
1457                 events[6] |= 0x01;      /* IO Capability Request */
1458                 events[6] |= 0x02;      /* IO Capability Response */
1459                 events[6] |= 0x04;      /* User Confirmation Request */
1460                 events[6] |= 0x08;      /* User Passkey Request */
1461                 events[6] |= 0x10;      /* Remote OOB Data Request */
1462                 events[6] |= 0x20;      /* Simple Pairing Complete */
1463                 events[7] |= 0x04;      /* User Passkey Notification */
1464                 events[7] |= 0x08;      /* Keypress Notification */
1465                 events[7] |= 0x10;      /* Remote Host Supported
1466                                          * Features Notification
1467                                          */
1468         }
1469
1470         if (lmp_le_capable(hdev))
1471                 events[7] |= 0x20;      /* LE Meta-Event */
1472
1473         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1474 }
1475
1476 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1477 {
1478         struct hci_dev *hdev = req->hdev;
1479
1480         if (lmp_bredr_capable(hdev))
1481                 bredr_setup(req);
1482         else
1483                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1484
1485         if (lmp_le_capable(hdev))
1486                 le_setup(req);
1487
1488         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1489          * local supported commands HCI command.
1490          */
1491         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1492                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1493
1494         if (lmp_ssp_capable(hdev)) {
1495                 /* When SSP is available, then the host features page
1496                  * should also be available as well. However some
1497                  * controllers list the max_page as 0 as long as SSP
1498                  * has not been enabled. To achieve proper debugging
1499                  * output, force the minimum max_page to 1 at least.
1500                  */
1501                 hdev->max_page = 0x01;
1502
1503                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1504                         u8 mode = 0x01;
1505                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1506                                     sizeof(mode), &mode);
1507                 } else {
1508                         struct hci_cp_write_eir cp;
1509
1510                         memset(hdev->eir, 0, sizeof(hdev->eir));
1511                         memset(&cp, 0, sizeof(cp));
1512
1513                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1514                 }
1515         }
1516
1517         if (lmp_inq_rssi_capable(hdev))
1518                 hci_setup_inquiry_mode(req);
1519
1520         if (lmp_inq_tx_pwr_capable(hdev))
1521                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1522
1523         if (lmp_ext_feat_capable(hdev)) {
1524                 struct hci_cp_read_local_ext_features cp;
1525
1526                 cp.page = 0x01;
1527                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1528                             sizeof(cp), &cp);
1529         }
1530
1531         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1532                 u8 enable = 1;
1533                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1534                             &enable);
1535         }
1536 }
1537
1538 static void hci_setup_link_policy(struct hci_request *req)
1539 {
1540         struct hci_dev *hdev = req->hdev;
1541         struct hci_cp_write_def_link_policy cp;
1542         u16 link_policy = 0;
1543
1544         if (lmp_rswitch_capable(hdev))
1545                 link_policy |= HCI_LP_RSWITCH;
1546         if (lmp_hold_capable(hdev))
1547                 link_policy |= HCI_LP_HOLD;
1548         if (lmp_sniff_capable(hdev))
1549                 link_policy |= HCI_LP_SNIFF;
1550         if (lmp_park_capable(hdev))
1551                 link_policy |= HCI_LP_PARK;
1552
1553         cp.policy = cpu_to_le16(link_policy);
1554         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1555 }
1556
1557 static void hci_set_le_support(struct hci_request *req)
1558 {
1559         struct hci_dev *hdev = req->hdev;
1560         struct hci_cp_write_le_host_supported cp;
1561
1562         /* LE-only devices do not support explicit enablement */
1563         if (!lmp_bredr_capable(hdev))
1564                 return;
1565
1566         memset(&cp, 0, sizeof(cp));
1567
1568         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1569                 cp.le = 0x01;
1570                 cp.simul = lmp_le_br_capable(hdev);
1571         }
1572
1573         if (cp.le != lmp_host_le_capable(hdev))
1574                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1575                             &cp);
1576 }
1577
1578 static void hci_set_event_mask_page_2(struct hci_request *req)
1579 {
1580         struct hci_dev *hdev = req->hdev;
1581         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1582
1583         /* If Connectionless Slave Broadcast master role is supported
1584          * enable all necessary events for it.
1585          */
1586         if (lmp_csb_master_capable(hdev)) {
1587                 events[1] |= 0x40;      /* Triggered Clock Capture */
1588                 events[1] |= 0x80;      /* Synchronization Train Complete */
1589                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1590                 events[2] |= 0x20;      /* CSB Channel Map Change */
1591         }
1592
1593         /* If Connectionless Slave Broadcast slave role is supported
1594          * enable all necessary events for it.
1595          */
1596         if (lmp_csb_slave_capable(hdev)) {
1597                 events[2] |= 0x01;      /* Synchronization Train Received */
1598                 events[2] |= 0x02;      /* CSB Receive */
1599                 events[2] |= 0x04;      /* CSB Timeout */
1600                 events[2] |= 0x08;      /* Truncated Page Complete */
1601         }
1602
1603         /* Enable Authenticated Payload Timeout Expired event if supported */
1604         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1605                 events[2] |= 0x80;
1606
1607         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1608 }
1609
1610 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1611 {
1612         struct hci_dev *hdev = req->hdev;
1613         u8 p;
1614
1615         hci_setup_event_mask(req);
1616
1617         /* Some Broadcom based Bluetooth controllers do not support the
1618          * Delete Stored Link Key command. They are clearly indicating its
1619          * absence in the bit mask of supported commands.
1620          *
1621          * Check the supported commands and only if the the command is marked
1622          * as supported send it. If not supported assume that the controller
1623          * does not have actual support for stored link keys which makes this
1624          * command redundant anyway.
1625          *
1626          * Some controllers indicate that they support handling deleting
1627          * stored link keys, but they don't. The quirk lets a driver
1628          * just disable this command.
1629          */
1630         if (hdev->commands[6] & 0x80 &&
1631             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1632                 struct hci_cp_delete_stored_link_key cp;
1633
1634                 bacpy(&cp.bdaddr, BDADDR_ANY);
1635                 cp.delete_all = 0x01;
1636                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1637                             sizeof(cp), &cp);
1638         }
1639
1640         if (hdev->commands[5] & 0x10)
1641                 hci_setup_link_policy(req);
1642
1643         if (lmp_le_capable(hdev)) {
1644                 u8 events[8];
1645
1646                 memset(events, 0, sizeof(events));
1647                 events[0] = 0x0f;
1648
1649                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1650                         events[0] |= 0x10;      /* LE Long Term Key Request */
1651
1652                 /* If controller supports the Connection Parameters Request
1653                  * Link Layer Procedure, enable the corresponding event.
1654                  */
1655                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1656                         events[0] |= 0x20;      /* LE Remote Connection
1657                                                  * Parameter Request
1658                                                  */
1659
1660                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1661                             events);
1662
1663                 if (hdev->commands[25] & 0x40) {
1664                         /* Read LE Advertising Channel TX Power */
1665                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1666                 }
1667
1668                 hci_set_le_support(req);
1669         }
1670
1671         /* Read features beyond page 1 if available */
1672         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1673                 struct hci_cp_read_local_ext_features cp;
1674
1675                 cp.page = p;
1676                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1677                             sizeof(cp), &cp);
1678         }
1679 }
1680
1681 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1682 {
1683         struct hci_dev *hdev = req->hdev;
1684
1685         /* Set event mask page 2 if the HCI command for it is supported */
1686         if (hdev->commands[22] & 0x04)
1687                 hci_set_event_mask_page_2(req);
1688
1689         /* Check for Synchronization Train support */
1690         if (lmp_sync_train_capable(hdev))
1691                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1692
1693         /* Enable Secure Connections if supported and configured */
1694         if ((lmp_sc_capable(hdev) ||
1695              test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1696             test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1697                 u8 support = 0x01;
1698                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1699                             sizeof(support), &support);
1700         }
1701 }
1702
1703 static int __hci_init(struct hci_dev *hdev)
1704 {
1705         int err;
1706
1707         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1708         if (err < 0)
1709                 return err;
1710
1711         /* The Device Under Test (DUT) mode is special and available for
1712          * all controller types. So just create it early on.
1713          */
1714         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1715                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1716                                     &dut_mode_fops);
1717         }
1718
1719         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1720          * BR/EDR/LE type controllers. AMP controllers only need the
1721          * first stage init.
1722          */
1723         if (hdev->dev_type != HCI_BREDR)
1724                 return 0;
1725
1726         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1727         if (err < 0)
1728                 return err;
1729
1730         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1731         if (err < 0)
1732                 return err;
1733
1734         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1735         if (err < 0)
1736                 return err;
1737
1738         /* Only create debugfs entries during the initial setup
1739          * phase and not every time the controller gets powered on.
1740          */
1741         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1742                 return 0;
1743
1744         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1745                             &features_fops);
1746         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1747                            &hdev->manufacturer);
1748         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1749         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1750         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1751                             &blacklist_fops);
1752         debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1753                             &whitelist_fops);
1754         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1755
1756         debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1757                             &conn_info_min_age_fops);
1758         debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1759                             &conn_info_max_age_fops);
1760
1761         if (lmp_bredr_capable(hdev)) {
1762                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1763                                     hdev, &inquiry_cache_fops);
1764                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1765                                     hdev, &link_keys_fops);
1766                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1767                                     hdev, &dev_class_fops);
1768                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1769                                     hdev, &voice_setting_fops);
1770         }
1771
1772         if (lmp_ssp_capable(hdev)) {
1773                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1774                                     hdev, &auto_accept_delay_fops);
1775                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1776                                     hdev, &force_sc_support_fops);
1777                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1778                                     hdev, &sc_only_mode_fops);
1779         }
1780
1781         if (lmp_sniff_capable(hdev)) {
1782                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1783                                     hdev, &idle_timeout_fops);
1784                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1785                                     hdev, &sniff_min_interval_fops);
1786                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1787                                     hdev, &sniff_max_interval_fops);
1788         }
1789
1790         if (lmp_le_capable(hdev)) {
1791                 debugfs_create_file("identity", 0400, hdev->debugfs,
1792                                     hdev, &identity_fops);
1793                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1794                                     hdev, &rpa_timeout_fops);
1795                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1796                                     hdev, &random_address_fops);
1797                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1798                                     hdev, &static_address_fops);
1799
1800                 /* For controllers with a public address, provide a debug
1801                  * option to force the usage of the configured static
1802                  * address. By default the public address is used.
1803                  */
1804                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1805                         debugfs_create_file("force_static_address", 0644,
1806                                             hdev->debugfs, hdev,
1807                                             &force_static_address_fops);
1808
1809                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1810                                   &hdev->le_white_list_size);
1811                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1812                                     &white_list_fops);
1813                 debugfs_create_file("identity_resolving_keys", 0400,
1814                                     hdev->debugfs, hdev,
1815                                     &identity_resolving_keys_fops);
1816                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1817                                     hdev, &long_term_keys_fops);
1818                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1819                                     hdev, &conn_min_interval_fops);
1820                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1821                                     hdev, &conn_max_interval_fops);
1822                 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1823                                     hdev, &conn_latency_fops);
1824                 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1825                                     hdev, &supervision_timeout_fops);
1826                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1827                                     hdev, &adv_channel_map_fops);
1828                 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1829                                     &device_list_fops);
1830                 debugfs_create_u16("discov_interleaved_timeout", 0644,
1831                                    hdev->debugfs,
1832                                    &hdev->discov_interleaved_timeout);
1833         }
1834
1835         return 0;
1836 }
1837
1838 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1839 {
1840         struct hci_dev *hdev = req->hdev;
1841
1842         BT_DBG("%s %ld", hdev->name, opt);
1843
1844         /* Reset */
1845         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1846                 hci_reset_req(req, 0);
1847
1848         /* Read Local Version */
1849         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1850
1851         /* Read BD Address */
1852         if (hdev->set_bdaddr)
1853                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1854 }
1855
1856 static int __hci_unconf_init(struct hci_dev *hdev)
1857 {
1858         int err;
1859
1860         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1861                 return 0;
1862
1863         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1864         if (err < 0)
1865                 return err;
1866
1867         return 0;
1868 }
1869
1870 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1871 {
1872         __u8 scan = opt;
1873
1874         BT_DBG("%s %x", req->hdev->name, scan);
1875
1876         /* Inquiry and Page scans */
1877         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1878 }
1879
1880 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1881 {
1882         __u8 auth = opt;
1883
1884         BT_DBG("%s %x", req->hdev->name, auth);
1885
1886         /* Authentication */
1887         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1888 }
1889
1890 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1891 {
1892         __u8 encrypt = opt;
1893
1894         BT_DBG("%s %x", req->hdev->name, encrypt);
1895
1896         /* Encryption */
1897         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1898 }
1899
1900 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1901 {
1902         __le16 policy = cpu_to_le16(opt);
1903
1904         BT_DBG("%s %x", req->hdev->name, policy);
1905
1906         /* Default link policy */
1907         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1908 }
1909
1910 /* Get HCI device by index.
1911  * Device is held on return. */
1912 struct hci_dev *hci_dev_get(int index)
1913 {
1914         struct hci_dev *hdev = NULL, *d;
1915
1916         BT_DBG("%d", index);
1917
1918         if (index < 0)
1919                 return NULL;
1920
1921         read_lock(&hci_dev_list_lock);
1922         list_for_each_entry(d, &hci_dev_list, list) {
1923                 if (d->id == index) {
1924                         hdev = hci_dev_hold(d);
1925                         break;
1926                 }
1927         }
1928         read_unlock(&hci_dev_list_lock);
1929         return hdev;
1930 }
1931
1932 /* ---- Inquiry support ---- */
1933
1934 bool hci_discovery_active(struct hci_dev *hdev)
1935 {
1936         struct discovery_state *discov = &hdev->discovery;
1937
1938         switch (discov->state) {
1939         case DISCOVERY_FINDING:
1940         case DISCOVERY_RESOLVING:
1941                 return true;
1942
1943         default:
1944                 return false;
1945         }
1946 }
1947
1948 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1949 {
1950         int old_state = hdev->discovery.state;
1951
1952         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1953
1954         if (old_state == state)
1955                 return;
1956
1957         hdev->discovery.state = state;
1958
1959         switch (state) {
1960         case DISCOVERY_STOPPED:
1961                 hci_update_background_scan(hdev);
1962
1963                 if (old_state != DISCOVERY_STARTING)
1964                         mgmt_discovering(hdev, 0);
1965                 break;
1966         case DISCOVERY_STARTING:
1967                 break;
1968         case DISCOVERY_FINDING:
1969                 mgmt_discovering(hdev, 1);
1970                 break;
1971         case DISCOVERY_RESOLVING:
1972                 break;
1973         case DISCOVERY_STOPPING:
1974                 break;
1975         }
1976 }
1977
1978 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1979 {
1980         struct discovery_state *cache = &hdev->discovery;
1981         struct inquiry_entry *p, *n;
1982
1983         list_for_each_entry_safe(p, n, &cache->all, all) {
1984                 list_del(&p->all);
1985                 kfree(p);
1986         }
1987
1988         INIT_LIST_HEAD(&cache->unknown);
1989         INIT_LIST_HEAD(&cache->resolve);
1990 }
1991
1992 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1993                                                bdaddr_t *bdaddr)
1994 {
1995         struct discovery_state *cache = &hdev->discovery;
1996         struct inquiry_entry *e;
1997
1998         BT_DBG("cache %p, %pMR", cache, bdaddr);
1999
2000         list_for_each_entry(e, &cache->all, all) {
2001                 if (!bacmp(&e->data.bdaddr, bdaddr))
2002                         return e;
2003         }
2004
2005         return NULL;
2006 }
2007
2008 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2009                                                        bdaddr_t *bdaddr)
2010 {
2011         struct discovery_state *cache = &hdev->discovery;
2012         struct inquiry_entry *e;
2013
2014         BT_DBG("cache %p, %pMR", cache, bdaddr);
2015
2016         list_for_each_entry(e, &cache->unknown, list) {
2017                 if (!bacmp(&e->data.bdaddr, bdaddr))
2018                         return e;
2019         }
2020
2021         return NULL;
2022 }
2023
2024 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2025                                                        bdaddr_t *bdaddr,
2026                                                        int state)
2027 {
2028         struct discovery_state *cache = &hdev->discovery;
2029         struct inquiry_entry *e;
2030
2031         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2032
2033         list_for_each_entry(e, &cache->resolve, list) {
2034                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2035                         return e;
2036                 if (!bacmp(&e->data.bdaddr, bdaddr))
2037                         return e;
2038         }
2039
2040         return NULL;
2041 }
2042
2043 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2044                                       struct inquiry_entry *ie)
2045 {
2046         struct discovery_state *cache = &hdev->discovery;
2047         struct list_head *pos = &cache->resolve;
2048         struct inquiry_entry *p;
2049
2050         list_del(&ie->list);
2051
2052         list_for_each_entry(p, &cache->resolve, list) {
2053                 if (p->name_state != NAME_PENDING &&
2054                     abs(p->data.rssi) >= abs(ie->data.rssi))
2055                         break;
2056                 pos = &p->list;
2057         }
2058
2059         list_add(&ie->list, pos);
2060 }
2061
2062 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2063                              bool name_known)
2064 {
2065         struct discovery_state *cache = &hdev->discovery;
2066         struct inquiry_entry *ie;
2067         u32 flags = 0;
2068
2069         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2070
2071         hci_remove_remote_oob_data(hdev, &data->bdaddr);
2072
2073         if (!data->ssp_mode)
2074                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2075
2076         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2077         if (ie) {
2078                 if (!ie->data.ssp_mode)
2079                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2080
2081                 if (ie->name_state == NAME_NEEDED &&
2082                     data->rssi != ie->data.rssi) {
2083                         ie->data.rssi = data->rssi;
2084                         hci_inquiry_cache_update_resolve(hdev, ie);
2085                 }
2086
2087                 goto update;
2088         }
2089
2090         /* Entry not in the cache. Add new one. */
2091         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
2092         if (!ie) {
2093                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2094                 goto done;
2095         }
2096
2097         list_add(&ie->all, &cache->all);
2098
2099         if (name_known) {
2100                 ie->name_state = NAME_KNOWN;
2101         } else {
2102                 ie->name_state = NAME_NOT_KNOWN;
2103                 list_add(&ie->list, &cache->unknown);
2104         }
2105
2106 update:
2107         if (name_known && ie->name_state != NAME_KNOWN &&
2108             ie->name_state != NAME_PENDING) {
2109                 ie->name_state = NAME_KNOWN;
2110                 list_del(&ie->list);
2111         }
2112
2113         memcpy(&ie->data, data, sizeof(*data));
2114         ie->timestamp = jiffies;
2115         cache->timestamp = jiffies;
2116
2117         if (ie->name_state == NAME_NOT_KNOWN)
2118                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2119
2120 done:
2121         return flags;
2122 }
2123
2124 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2125 {
2126         struct discovery_state *cache = &hdev->discovery;
2127         struct inquiry_info *info = (struct inquiry_info *) buf;
2128         struct inquiry_entry *e;
2129         int copied = 0;
2130
2131         list_for_each_entry(e, &cache->all, all) {
2132                 struct inquiry_data *data = &e->data;
2133
2134                 if (copied >= num)
2135                         break;
2136
2137                 bacpy(&info->bdaddr, &data->bdaddr);
2138                 info->pscan_rep_mode    = data->pscan_rep_mode;
2139                 info->pscan_period_mode = data->pscan_period_mode;
2140                 info->pscan_mode        = data->pscan_mode;
2141                 memcpy(info->dev_class, data->dev_class, 3);
2142                 info->clock_offset      = data->clock_offset;
2143
2144                 info++;
2145                 copied++;
2146         }
2147
2148         BT_DBG("cache %p, copied %d", cache, copied);
2149         return copied;
2150 }
2151
2152 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2153 {
2154         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2155         struct hci_dev *hdev = req->hdev;
2156         struct hci_cp_inquiry cp;
2157
2158         BT_DBG("%s", hdev->name);
2159
2160         if (test_bit(HCI_INQUIRY, &hdev->flags))
2161                 return;
2162
2163         /* Start Inquiry */
2164         memcpy(&cp.lap, &ir->lap, 3);
2165         cp.length  = ir->length;
2166         cp.num_rsp = ir->num_rsp;
2167         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2168 }
2169
2170 static int wait_inquiry(void *word)
2171 {
2172         schedule();
2173         return signal_pending(current);
2174 }
2175
2176 int hci_inquiry(void __user *arg)
2177 {
2178         __u8 __user *ptr = arg;
2179         struct hci_inquiry_req ir;
2180         struct hci_dev *hdev;
2181         int err = 0, do_inquiry = 0, max_rsp;
2182         long timeo;
2183         __u8 *buf;
2184
2185         if (copy_from_user(&ir, ptr, sizeof(ir)))
2186                 return -EFAULT;
2187
2188         hdev = hci_dev_get(ir.dev_id);
2189         if (!hdev)
2190                 return -ENODEV;
2191
2192         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2193                 err = -EBUSY;
2194                 goto done;
2195         }
2196
2197         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2198                 err = -EOPNOTSUPP;
2199                 goto done;
2200         }
2201
2202         if (hdev->dev_type != HCI_BREDR) {
2203                 err = -EOPNOTSUPP;
2204                 goto done;
2205         }
2206
2207         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2208                 err = -EOPNOTSUPP;
2209                 goto done;
2210         }
2211
2212         hci_dev_lock(hdev);
2213         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2214             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2215                 hci_inquiry_cache_flush(hdev);
2216                 do_inquiry = 1;
2217         }
2218         hci_dev_unlock(hdev);
2219
2220         timeo = ir.length * msecs_to_jiffies(2000);
2221
2222         if (do_inquiry) {
2223                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2224                                    timeo);
2225                 if (err < 0)
2226                         goto done;
2227
2228                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2229                  * cleared). If it is interrupted by a signal, return -EINTR.
2230                  */
2231                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2232                                 TASK_INTERRUPTIBLE))
2233                         return -EINTR;
2234         }
2235
2236         /* for unlimited number of responses we will use buffer with
2237          * 255 entries
2238          */
2239         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2240
2241         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2242          * copy it to the user space.
2243          */
2244         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2245         if (!buf) {
2246                 err = -ENOMEM;
2247                 goto done;
2248         }
2249
2250         hci_dev_lock(hdev);
2251         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2252         hci_dev_unlock(hdev);
2253
2254         BT_DBG("num_rsp %d", ir.num_rsp);
2255
2256         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2257                 ptr += sizeof(ir);
2258                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2259                                  ir.num_rsp))
2260                         err = -EFAULT;
2261         } else
2262                 err = -EFAULT;
2263
2264         kfree(buf);
2265
2266 done:
2267         hci_dev_put(hdev);
2268         return err;
2269 }
2270
2271 static int hci_dev_do_open(struct hci_dev *hdev)
2272 {
2273         int ret = 0;
2274
2275         BT_DBG("%s %p", hdev->name, hdev);
2276
2277         hci_req_lock(hdev);
2278
2279         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2280                 ret = -ENODEV;
2281                 goto done;
2282         }
2283
2284         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2285             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2286                 /* Check for rfkill but allow the HCI setup stage to
2287                  * proceed (which in itself doesn't cause any RF activity).
2288                  */
2289                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2290                         ret = -ERFKILL;
2291                         goto done;
2292                 }
2293
2294                 /* Check for valid public address or a configured static
2295                  * random adddress, but let the HCI setup proceed to
2296                  * be able to determine if there is a public address
2297                  * or not.
2298                  *
2299                  * In case of user channel usage, it is not important
2300                  * if a public address or static random address is
2301                  * available.
2302                  *
2303                  * This check is only valid for BR/EDR controllers
2304                  * since AMP controllers do not have an address.
2305                  */
2306                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2307                     hdev->dev_type == HCI_BREDR &&
2308                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2309                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2310                         ret = -EADDRNOTAVAIL;
2311                         goto done;
2312                 }
2313         }
2314
2315         if (test_bit(HCI_UP, &hdev->flags)) {
2316                 ret = -EALREADY;
2317                 goto done;
2318         }
2319
2320         if (hdev->open(hdev)) {
2321                 ret = -EIO;
2322                 goto done;
2323         }
2324
2325         atomic_set(&hdev->cmd_cnt, 1);
2326         set_bit(HCI_INIT, &hdev->flags);
2327
2328         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2329                 if (hdev->setup)
2330                         ret = hdev->setup(hdev);
2331
2332                 /* The transport driver can set these quirks before
2333                  * creating the HCI device or in its setup callback.
2334                  *
2335                  * In case any of them is set, the controller has to
2336                  * start up as unconfigured.
2337                  */
2338                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2339                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2340                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2341
2342                 /* For an unconfigured controller it is required to
2343                  * read at least the version information provided by
2344                  * the Read Local Version Information command.
2345                  *
2346                  * If the set_bdaddr driver callback is provided, then
2347                  * also the original Bluetooth public device address
2348                  * will be read using the Read BD Address command.
2349                  */
2350                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2351                         ret = __hci_unconf_init(hdev);
2352         }
2353
2354         if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2355                 /* If public address change is configured, ensure that
2356                  * the address gets programmed. If the driver does not
2357                  * support changing the public address, fail the power
2358                  * on procedure.
2359                  */
2360                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2361                     hdev->set_bdaddr)
2362                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2363                 else
2364                         ret = -EADDRNOTAVAIL;
2365         }
2366
2367         if (!ret) {
2368                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2369                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2370                         ret = __hci_init(hdev);
2371         }
2372
2373         clear_bit(HCI_INIT, &hdev->flags);
2374
2375         if (!ret) {
2376                 hci_dev_hold(hdev);
2377                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2378                 set_bit(HCI_UP, &hdev->flags);
2379                 hci_notify(hdev, HCI_DEV_UP);
2380                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2381                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2382                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2383                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2384                     hdev->dev_type == HCI_BREDR) {
2385                         hci_dev_lock(hdev);
2386                         mgmt_powered(hdev, 1);
2387                         hci_dev_unlock(hdev);
2388                 }
2389         } else {
2390                 /* Init failed, cleanup */
2391                 flush_work(&hdev->tx_work);
2392                 flush_work(&hdev->cmd_work);
2393                 flush_work(&hdev->rx_work);
2394
2395                 skb_queue_purge(&hdev->cmd_q);
2396                 skb_queue_purge(&hdev->rx_q);
2397
2398                 if (hdev->flush)
2399                         hdev->flush(hdev);
2400
2401                 if (hdev->sent_cmd) {
2402                         kfree_skb(hdev->sent_cmd);
2403                         hdev->sent_cmd = NULL;
2404                 }
2405
2406                 hdev->close(hdev);
2407                 hdev->flags &= BIT(HCI_RAW);
2408         }
2409
2410 done:
2411         hci_req_unlock(hdev);
2412         return ret;
2413 }
2414
2415 /* ---- HCI ioctl helpers ---- */
2416
2417 int hci_dev_open(__u16 dev)
2418 {
2419         struct hci_dev *hdev;
2420         int err;
2421
2422         hdev = hci_dev_get(dev);
2423         if (!hdev)
2424                 return -ENODEV;
2425
2426         /* Devices that are marked as unconfigured can only be powered
2427          * up as user channel. Trying to bring them up as normal devices
2428          * will result into a failure. Only user channel operation is
2429          * possible.
2430          *
2431          * When this function is called for a user channel, the flag
2432          * HCI_USER_CHANNEL will be set first before attempting to
2433          * open the device.
2434          */
2435         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2436             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2437                 err = -EOPNOTSUPP;
2438                 goto done;
2439         }
2440
2441         /* We need to ensure that no other power on/off work is pending
2442          * before proceeding to call hci_dev_do_open. This is
2443          * particularly important if the setup procedure has not yet
2444          * completed.
2445          */
2446         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2447                 cancel_delayed_work(&hdev->power_off);
2448
2449         /* After this call it is guaranteed that the setup procedure
2450          * has finished. This means that error conditions like RFKILL
2451          * or no valid public or static random address apply.
2452          */
2453         flush_workqueue(hdev->req_workqueue);
2454
2455         /* For controllers not using the management interface and that
2456          * are brought up using legacy ioctl, set the HCI_PAIRABLE bit
2457          * so that pairing works for them. Once the management interface
2458          * is in use this bit will be cleared again and userspace has
2459          * to explicitly enable it.
2460          */
2461         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2462             !test_bit(HCI_MGMT, &hdev->dev_flags))
2463                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2464
2465         err = hci_dev_do_open(hdev);
2466
2467 done:
2468         hci_dev_put(hdev);
2469         return err;
2470 }
2471
2472 /* This function requires the caller holds hdev->lock */
2473 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2474 {
2475         struct hci_conn_params *p;
2476
2477         list_for_each_entry(p, &hdev->le_conn_params, list)
2478                 list_del_init(&p->action);
2479
2480         BT_DBG("All LE pending actions cleared");
2481 }
2482
2483 static int hci_dev_do_close(struct hci_dev *hdev)
2484 {
2485         BT_DBG("%s %p", hdev->name, hdev);
2486
2487         cancel_delayed_work(&hdev->power_off);
2488
2489         hci_req_cancel(hdev, ENODEV);
2490         hci_req_lock(hdev);
2491
2492         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2493                 cancel_delayed_work_sync(&hdev->cmd_timer);
2494                 hci_req_unlock(hdev);
2495                 return 0;
2496         }
2497
2498         /* Flush RX and TX works */
2499         flush_work(&hdev->tx_work);
2500         flush_work(&hdev->rx_work);
2501
2502         if (hdev->discov_timeout > 0) {
2503                 cancel_delayed_work(&hdev->discov_off);
2504                 hdev->discov_timeout = 0;
2505                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2506                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2507         }
2508
2509         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2510                 cancel_delayed_work(&hdev->service_cache);
2511
2512         cancel_delayed_work_sync(&hdev->le_scan_disable);
2513
2514         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2515                 cancel_delayed_work_sync(&hdev->rpa_expired);
2516
2517         hci_dev_lock(hdev);
2518         hci_inquiry_cache_flush(hdev);
2519         hci_conn_hash_flush(hdev);
2520         hci_pend_le_actions_clear(hdev);
2521         hci_dev_unlock(hdev);
2522
2523         hci_notify(hdev, HCI_DEV_DOWN);
2524
2525         if (hdev->flush)
2526                 hdev->flush(hdev);
2527
2528         /* Reset device */
2529         skb_queue_purge(&hdev->cmd_q);
2530         atomic_set(&hdev->cmd_cnt, 1);
2531         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2532             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2533             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2534                 set_bit(HCI_INIT, &hdev->flags);
2535                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2536                 clear_bit(HCI_INIT, &hdev->flags);
2537         }
2538
2539         /* flush cmd  work */
2540         flush_work(&hdev->cmd_work);
2541
2542         /* Drop queues */
2543         skb_queue_purge(&hdev->rx_q);
2544         skb_queue_purge(&hdev->cmd_q);
2545         skb_queue_purge(&hdev->raw_q);
2546
2547         /* Drop last sent command */
2548         if (hdev->sent_cmd) {
2549                 cancel_delayed_work_sync(&hdev->cmd_timer);
2550                 kfree_skb(hdev->sent_cmd);
2551                 hdev->sent_cmd = NULL;
2552         }
2553
2554         kfree_skb(hdev->recv_evt);
2555         hdev->recv_evt = NULL;
2556
2557         /* After this point our queues are empty
2558          * and no tasks are scheduled. */
2559         hdev->close(hdev);
2560
2561         /* Clear flags */
2562         hdev->flags &= BIT(HCI_RAW);
2563         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2564
2565         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2566                 if (hdev->dev_type == HCI_BREDR) {
2567                         hci_dev_lock(hdev);
2568                         mgmt_powered(hdev, 0);
2569                         hci_dev_unlock(hdev);
2570                 }
2571         }
2572
2573         /* Controller radio is available but is currently powered down */
2574         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2575
2576         memset(hdev->eir, 0, sizeof(hdev->eir));
2577         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2578         bacpy(&hdev->random_addr, BDADDR_ANY);
2579
2580         hci_req_unlock(hdev);
2581
2582         hci_dev_put(hdev);
2583         return 0;
2584 }
2585
2586 int hci_dev_close(__u16 dev)
2587 {
2588         struct hci_dev *hdev;
2589         int err;
2590
2591         hdev = hci_dev_get(dev);
2592         if (!hdev)
2593                 return -ENODEV;
2594
2595         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2596                 err = -EBUSY;
2597                 goto done;
2598         }
2599
2600         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2601                 cancel_delayed_work(&hdev->power_off);
2602
2603         err = hci_dev_do_close(hdev);
2604
2605 done:
2606         hci_dev_put(hdev);
2607         return err;
2608 }
2609
2610 int hci_dev_reset(__u16 dev)
2611 {
2612         struct hci_dev *hdev;
2613         int ret = 0;
2614
2615         hdev = hci_dev_get(dev);
2616         if (!hdev)
2617                 return -ENODEV;
2618
2619         hci_req_lock(hdev);
2620
2621         if (!test_bit(HCI_UP, &hdev->flags)) {
2622                 ret = -ENETDOWN;
2623                 goto done;
2624         }
2625
2626         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2627                 ret = -EBUSY;
2628                 goto done;
2629         }
2630
2631         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2632                 ret = -EOPNOTSUPP;
2633                 goto done;
2634         }
2635
2636         /* Drop queues */
2637         skb_queue_purge(&hdev->rx_q);
2638         skb_queue_purge(&hdev->cmd_q);
2639
2640         hci_dev_lock(hdev);
2641         hci_inquiry_cache_flush(hdev);
2642         hci_conn_hash_flush(hdev);
2643         hci_dev_unlock(hdev);
2644
2645         if (hdev->flush)
2646                 hdev->flush(hdev);
2647
2648         atomic_set(&hdev->cmd_cnt, 1);
2649         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2650
2651         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2652
2653 done:
2654         hci_req_unlock(hdev);
2655         hci_dev_put(hdev);
2656         return ret;
2657 }
2658
2659 int hci_dev_reset_stat(__u16 dev)
2660 {
2661         struct hci_dev *hdev;
2662         int ret = 0;
2663
2664         hdev = hci_dev_get(dev);
2665         if (!hdev)
2666                 return -ENODEV;
2667
2668         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2669                 ret = -EBUSY;
2670                 goto done;
2671         }
2672
2673         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2674                 ret = -EOPNOTSUPP;
2675                 goto done;
2676         }
2677
2678         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2679
2680 done:
2681         hci_dev_put(hdev);
2682         return ret;
2683 }
2684
2685 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2686 {
2687         bool conn_changed, discov_changed;
2688
2689         BT_DBG("%s scan 0x%02x", hdev->name, scan);
2690
2691         if ((scan & SCAN_PAGE))
2692                 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2693                                                  &hdev->dev_flags);
2694         else
2695                 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2696                                                   &hdev->dev_flags);
2697
2698         if ((scan & SCAN_INQUIRY)) {
2699                 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2700                                                    &hdev->dev_flags);
2701         } else {
2702                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2703                 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2704                                                     &hdev->dev_flags);
2705         }
2706
2707         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2708                 return;
2709
2710         if (conn_changed || discov_changed) {
2711                 /* In case this was disabled through mgmt */
2712                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2713
2714                 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2715                         mgmt_update_adv_data(hdev);
2716
2717                 mgmt_new_settings(hdev);
2718         }
2719 }
2720
2721 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2722 {
2723         struct hci_dev *hdev;
2724         struct hci_dev_req dr;
2725         int err = 0;
2726
2727         if (copy_from_user(&dr, arg, sizeof(dr)))
2728                 return -EFAULT;
2729
2730         hdev = hci_dev_get(dr.dev_id);
2731         if (!hdev)
2732                 return -ENODEV;
2733
2734         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2735                 err = -EBUSY;
2736                 goto done;
2737         }
2738
2739         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2740                 err = -EOPNOTSUPP;
2741                 goto done;
2742         }
2743
2744         if (hdev->dev_type != HCI_BREDR) {
2745                 err = -EOPNOTSUPP;
2746                 goto done;
2747         }
2748
2749         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2750                 err = -EOPNOTSUPP;
2751                 goto done;
2752         }
2753
2754         switch (cmd) {
2755         case HCISETAUTH:
2756                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2757                                    HCI_INIT_TIMEOUT);
2758                 break;
2759
2760         case HCISETENCRYPT:
2761                 if (!lmp_encrypt_capable(hdev)) {
2762                         err = -EOPNOTSUPP;
2763                         break;
2764                 }
2765
2766                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2767                         /* Auth must be enabled first */
2768                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2769                                            HCI_INIT_TIMEOUT);
2770                         if (err)
2771                                 break;
2772                 }
2773
2774                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2775                                    HCI_INIT_TIMEOUT);
2776                 break;
2777
2778         case HCISETSCAN:
2779                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2780                                    HCI_INIT_TIMEOUT);
2781
2782                 /* Ensure that the connectable and discoverable states
2783                  * get correctly modified as this was a non-mgmt change.
2784                  */
2785                 if (!err)
2786                         hci_update_scan_state(hdev, dr.dev_opt);
2787                 break;
2788
2789         case HCISETLINKPOL:
2790                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2791                                    HCI_INIT_TIMEOUT);
2792                 break;
2793
2794         case HCISETLINKMODE:
2795                 hdev->link_mode = ((__u16) dr.dev_opt) &
2796                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2797                 break;
2798
2799         case HCISETPTYPE:
2800                 hdev->pkt_type = (__u16) dr.dev_opt;
2801                 break;
2802
2803         case HCISETACLMTU:
2804                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2805                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2806                 break;
2807
2808         case HCISETSCOMTU:
2809                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2810                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2811                 break;
2812
2813         default:
2814                 err = -EINVAL;
2815                 break;
2816         }
2817
2818 done:
2819         hci_dev_put(hdev);
2820         return err;
2821 }
2822
2823 int hci_get_dev_list(void __user *arg)
2824 {
2825         struct hci_dev *hdev;
2826         struct hci_dev_list_req *dl;
2827         struct hci_dev_req *dr;
2828         int n = 0, size, err;
2829         __u16 dev_num;
2830
2831         if (get_user(dev_num, (__u16 __user *) arg))
2832                 return -EFAULT;
2833
2834         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2835                 return -EINVAL;
2836
2837         size = sizeof(*dl) + dev_num * sizeof(*dr);
2838
2839         dl = kzalloc(size, GFP_KERNEL);
2840         if (!dl)
2841                 return -ENOMEM;
2842
2843         dr = dl->dev_req;
2844
2845         read_lock(&hci_dev_list_lock);
2846         list_for_each_entry(hdev, &hci_dev_list, list) {
2847                 unsigned long flags = hdev->flags;
2848
2849                 /* When the auto-off is configured it means the transport
2850                  * is running, but in that case still indicate that the
2851                  * device is actually down.
2852                  */
2853                 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2854                         flags &= ~BIT(HCI_UP);
2855
2856                 (dr + n)->dev_id  = hdev->id;
2857                 (dr + n)->dev_opt = flags;
2858
2859                 if (++n >= dev_num)
2860                         break;
2861         }
2862         read_unlock(&hci_dev_list_lock);
2863
2864         dl->dev_num = n;
2865         size = sizeof(*dl) + n * sizeof(*dr);
2866
2867         err = copy_to_user(arg, dl, size);
2868         kfree(dl);
2869
2870         return err ? -EFAULT : 0;
2871 }
2872
2873 int hci_get_dev_info(void __user *arg)
2874 {
2875         struct hci_dev *hdev;
2876         struct hci_dev_info di;
2877         unsigned long flags;
2878         int err = 0;
2879
2880         if (copy_from_user(&di, arg, sizeof(di)))
2881                 return -EFAULT;
2882
2883         hdev = hci_dev_get(di.dev_id);
2884         if (!hdev)
2885                 return -ENODEV;
2886
2887         /* When the auto-off is configured it means the transport
2888          * is running, but in that case still indicate that the
2889          * device is actually down.
2890          */
2891         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2892                 flags = hdev->flags & ~BIT(HCI_UP);
2893         else
2894                 flags = hdev->flags;
2895
2896         strcpy(di.name, hdev->name);
2897         di.bdaddr   = hdev->bdaddr;
2898         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2899         di.flags    = flags;
2900         di.pkt_type = hdev->pkt_type;
2901         if (lmp_bredr_capable(hdev)) {
2902                 di.acl_mtu  = hdev->acl_mtu;
2903                 di.acl_pkts = hdev->acl_pkts;
2904                 di.sco_mtu  = hdev->sco_mtu;
2905                 di.sco_pkts = hdev->sco_pkts;
2906         } else {
2907                 di.acl_mtu  = hdev->le_mtu;
2908                 di.acl_pkts = hdev->le_pkts;
2909                 di.sco_mtu  = 0;
2910                 di.sco_pkts = 0;
2911         }
2912         di.link_policy = hdev->link_policy;
2913         di.link_mode   = hdev->link_mode;
2914
2915         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2916         memcpy(&di.features, &hdev->features, sizeof(di.features));
2917
2918         if (copy_to_user(arg, &di, sizeof(di)))
2919                 err = -EFAULT;
2920
2921         hci_dev_put(hdev);
2922
2923         return err;
2924 }
2925
2926 /* ---- Interface to HCI drivers ---- */
2927
2928 static int hci_rfkill_set_block(void *data, bool blocked)
2929 {
2930         struct hci_dev *hdev = data;
2931
2932         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2933
2934         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2935                 return -EBUSY;
2936
2937         if (blocked) {
2938                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2939                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2940                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
2941                         hci_dev_do_close(hdev);
2942         } else {
2943                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2944         }
2945
2946         return 0;
2947 }
2948
2949 static const struct rfkill_ops hci_rfkill_ops = {
2950         .set_block = hci_rfkill_set_block,
2951 };
2952
2953 static void hci_power_on(struct work_struct *work)
2954 {
2955         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2956         int err;
2957
2958         BT_DBG("%s", hdev->name);
2959
2960         err = hci_dev_do_open(hdev);
2961         if (err < 0) {
2962                 mgmt_set_powered_failed(hdev, err);
2963                 return;
2964         }
2965
2966         /* During the HCI setup phase, a few error conditions are
2967          * ignored and they need to be checked now. If they are still
2968          * valid, it is important to turn the device back off.
2969          */
2970         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2971             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2972             (hdev->dev_type == HCI_BREDR &&
2973              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2974              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2975                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2976                 hci_dev_do_close(hdev);
2977         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2978                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2979                                    HCI_AUTO_OFF_TIMEOUT);
2980         }
2981
2982         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2983                 /* For unconfigured devices, set the HCI_RAW flag
2984                  * so that userspace can easily identify them.
2985                  */
2986                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2987                         set_bit(HCI_RAW, &hdev->flags);
2988
2989                 /* For fully configured devices, this will send
2990                  * the Index Added event. For unconfigured devices,
2991                  * it will send Unconfigued Index Added event.
2992                  *
2993                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2994                  * and no event will be send.
2995                  */
2996                 mgmt_index_added(hdev);
2997         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2998                 /* When the controller is now configured, then it
2999                  * is important to clear the HCI_RAW flag.
3000                  */
3001                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3002                         clear_bit(HCI_RAW, &hdev->flags);
3003
3004                 /* Powering on the controller with HCI_CONFIG set only
3005                  * happens with the transition from unconfigured to
3006                  * configured. This will send the Index Added event.
3007                  */
3008                 mgmt_index_added(hdev);
3009         }
3010 }
3011
3012 static void hci_power_off(struct work_struct *work)
3013 {
3014         struct hci_dev *hdev = container_of(work, struct hci_dev,
3015                                             power_off.work);
3016
3017         BT_DBG("%s", hdev->name);
3018
3019         hci_dev_do_close(hdev);
3020 }
3021
3022 static void hci_discov_off(struct work_struct *work)
3023 {
3024         struct hci_dev *hdev;
3025
3026         hdev = container_of(work, struct hci_dev, discov_off.work);
3027
3028         BT_DBG("%s", hdev->name);
3029
3030         mgmt_discoverable_timeout(hdev);
3031 }
3032
3033 void hci_uuids_clear(struct hci_dev *hdev)
3034 {
3035         struct bt_uuid *uuid, *tmp;
3036
3037         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3038                 list_del(&uuid->list);
3039                 kfree(uuid);
3040         }
3041 }
3042
3043 void hci_link_keys_clear(struct hci_dev *hdev)
3044 {
3045         struct list_head *p, *n;
3046
3047         list_for_each_safe(p, n, &hdev->link_keys) {
3048                 struct link_key *key;
3049
3050                 key = list_entry(p, struct link_key, list);
3051
3052                 list_del(p);
3053                 kfree(key);
3054         }
3055 }
3056
3057 void hci_smp_ltks_clear(struct hci_dev *hdev)
3058 {
3059         struct smp_ltk *k, *tmp;
3060
3061         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3062                 list_del(&k->list);
3063                 kfree(k);
3064         }
3065 }
3066
3067 void hci_smp_irks_clear(struct hci_dev *hdev)
3068 {
3069         struct smp_irk *k, *tmp;
3070
3071         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3072                 list_del(&k->list);
3073                 kfree(k);
3074         }
3075 }
3076
3077 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3078 {
3079         struct link_key *k;
3080
3081         list_for_each_entry(k, &hdev->link_keys, list)
3082                 if (bacmp(bdaddr, &k->bdaddr) == 0)
3083                         return k;
3084
3085         return NULL;
3086 }
3087
3088 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3089                                u8 key_type, u8 old_key_type)
3090 {
3091         /* Legacy key */
3092         if (key_type < 0x03)
3093                 return true;
3094
3095         /* Debug keys are insecure so don't store them persistently */
3096         if (key_type == HCI_LK_DEBUG_COMBINATION)
3097                 return false;
3098
3099         /* Changed combination key and there's no previous one */
3100         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3101                 return false;
3102
3103         /* Security mode 3 case */
3104         if (!conn)
3105                 return true;
3106
3107         /* Neither local nor remote side had no-bonding as requirement */
3108         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3109                 return true;
3110
3111         /* Local side had dedicated bonding as requirement */
3112         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3113                 return true;
3114
3115         /* Remote side had dedicated bonding as requirement */
3116         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3117                 return true;
3118
3119         /* If none of the above criteria match, then don't store the key
3120          * persistently */
3121         return false;
3122 }
3123
3124 static u8 ltk_role(u8 type)
3125 {
3126         if (type == SMP_LTK)
3127                 return HCI_ROLE_MASTER;
3128
3129         return HCI_ROLE_SLAVE;
3130 }
3131
3132 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3133                              u8 role)
3134 {
3135         struct smp_ltk *k;
3136
3137         list_for_each_entry(k, &hdev->long_term_keys, list) {
3138                 if (k->ediv != ediv || k->rand != rand)
3139                         continue;
3140
3141                 if (ltk_role(k->type) != role)
3142                         continue;
3143
3144                 return k;
3145         }
3146
3147         return NULL;
3148 }
3149
3150 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3151                                      u8 addr_type, u8 role)
3152 {
3153         struct smp_ltk *k;
3154
3155         list_for_each_entry(k, &hdev->long_term_keys, list)
3156                 if (addr_type == k->bdaddr_type &&
3157                     bacmp(bdaddr, &k->bdaddr) == 0 &&
3158                     ltk_role(k->type) == role)
3159                         return k;
3160
3161         return NULL;
3162 }
3163
3164 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3165 {
3166         struct smp_irk *irk;
3167
3168         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3169                 if (!bacmp(&irk->rpa, rpa))
3170                         return irk;
3171         }
3172
3173         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3174                 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3175                         bacpy(&irk->rpa, rpa);
3176                         return irk;
3177                 }
3178         }
3179
3180         return NULL;
3181 }
3182
3183 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3184                                      u8 addr_type)
3185 {
3186         struct smp_irk *irk;
3187
3188         /* Identity Address must be public or static random */
3189         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3190                 return NULL;
3191
3192         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3193                 if (addr_type == irk->addr_type &&
3194                     bacmp(bdaddr, &irk->bdaddr) == 0)
3195                         return irk;
3196         }
3197
3198         return NULL;
3199 }
3200
3201 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3202                                   bdaddr_t *bdaddr, u8 *val, u8 type,
3203                                   u8 pin_len, bool *persistent)
3204 {
3205         struct link_key *key, *old_key;
3206         u8 old_key_type;
3207
3208         old_key = hci_find_link_key(hdev, bdaddr);
3209         if (old_key) {
3210                 old_key_type = old_key->type;
3211                 key = old_key;
3212         } else {
3213                 old_key_type = conn ? conn->key_type : 0xff;
3214                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3215                 if (!key)
3216                         return NULL;
3217                 list_add(&key->list, &hdev->link_keys);
3218         }
3219
3220         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3221
3222         /* Some buggy controller combinations generate a changed
3223          * combination key for legacy pairing even when there's no
3224          * previous key */
3225         if (type == HCI_LK_CHANGED_COMBINATION &&
3226             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3227                 type = HCI_LK_COMBINATION;
3228                 if (conn)
3229                         conn->key_type = type;
3230         }
3231
3232         bacpy(&key->bdaddr, bdaddr);
3233         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3234         key->pin_len = pin_len;
3235
3236         if (type == HCI_LK_CHANGED_COMBINATION)
3237                 key->type = old_key_type;
3238         else
3239                 key->type = type;
3240
3241         if (persistent)
3242                 *persistent = hci_persistent_key(hdev, conn, type,
3243                                                  old_key_type);
3244
3245         return key;
3246 }
3247
3248 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3249                             u8 addr_type, u8 type, u8 authenticated,
3250                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3251 {
3252         struct smp_ltk *key, *old_key;
3253         u8 role = ltk_role(type);
3254
3255         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
3256         if (old_key)
3257                 key = old_key;
3258         else {
3259                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3260                 if (!key)
3261                         return NULL;
3262                 list_add(&key->list, &hdev->long_term_keys);
3263         }
3264
3265         bacpy(&key->bdaddr, bdaddr);
3266         key->bdaddr_type = addr_type;
3267         memcpy(key->val, tk, sizeof(key->val));
3268         key->authenticated = authenticated;
3269         key->ediv = ediv;
3270         key->rand = rand;
3271         key->enc_size = enc_size;
3272         key->type = type;
3273
3274         return key;
3275 }
3276
3277 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3278                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3279 {
3280         struct smp_irk *irk;
3281
3282         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3283         if (!irk) {
3284                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3285                 if (!irk)
3286                         return NULL;
3287
3288                 bacpy(&irk->bdaddr, bdaddr);
3289                 irk->addr_type = addr_type;
3290
3291                 list_add(&irk->list, &hdev->identity_resolving_keys);
3292         }
3293
3294         memcpy(irk->val, val, 16);
3295         bacpy(&irk->rpa, rpa);
3296
3297         return irk;
3298 }
3299
3300 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3301 {
3302         struct link_key *key;
3303
3304         key = hci_find_link_key(hdev, bdaddr);
3305         if (!key)
3306                 return -ENOENT;
3307
3308         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3309
3310         list_del(&key->list);
3311         kfree(key);
3312
3313         return 0;
3314 }
3315
3316 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3317 {
3318         struct smp_ltk *k, *tmp;
3319         int removed = 0;
3320
3321         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3322                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3323                         continue;
3324
3325                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3326
3327                 list_del(&k->list);
3328                 kfree(k);
3329                 removed++;
3330         }
3331
3332         return removed ? 0 : -ENOENT;
3333 }
3334
3335 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3336 {
3337         struct smp_irk *k, *tmp;
3338
3339         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3340                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3341                         continue;
3342
3343                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3344
3345                 list_del(&k->list);
3346                 kfree(k);
3347         }
3348 }
3349
3350 /* HCI command timer function */
3351 static void hci_cmd_timeout(struct work_struct *work)
3352 {
3353         struct hci_dev *hdev = container_of(work, struct hci_dev,
3354                                             cmd_timer.work);
3355
3356         if (hdev->sent_cmd) {
3357                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3358                 u16 opcode = __le16_to_cpu(sent->opcode);
3359
3360                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3361         } else {
3362                 BT_ERR("%s command tx timeout", hdev->name);
3363         }
3364
3365         atomic_set(&hdev->cmd_cnt, 1);
3366         queue_work(hdev->workqueue, &hdev->cmd_work);
3367 }
3368
3369 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3370                                           bdaddr_t *bdaddr)
3371 {
3372         struct oob_data *data;
3373
3374         list_for_each_entry(data, &hdev->remote_oob_data, list)
3375                 if (bacmp(bdaddr, &data->bdaddr) == 0)
3376                         return data;
3377
3378         return NULL;
3379 }
3380
3381 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3382 {
3383         struct oob_data *data;
3384
3385         data = hci_find_remote_oob_data(hdev, bdaddr);
3386         if (!data)
3387                 return -ENOENT;
3388
3389         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3390
3391         list_del(&data->list);
3392         kfree(data);
3393
3394         return 0;
3395 }
3396
3397 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3398 {
3399         struct oob_data *data, *n;
3400
3401         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3402                 list_del(&data->list);
3403                 kfree(data);
3404         }
3405 }
3406
3407 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3408                             u8 *hash, u8 *randomizer)
3409 {
3410         struct oob_data *data;
3411
3412         data = hci_find_remote_oob_data(hdev, bdaddr);
3413         if (!data) {
3414                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3415                 if (!data)
3416                         return -ENOMEM;
3417
3418                 bacpy(&data->bdaddr, bdaddr);
3419                 list_add(&data->list, &hdev->remote_oob_data);
3420         }
3421
3422         memcpy(data->hash192, hash, sizeof(data->hash192));
3423         memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3424
3425         memset(data->hash256, 0, sizeof(data->hash256));
3426         memset(data->randomizer256, 0, sizeof(data->randomizer256));
3427
3428         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3429
3430         return 0;
3431 }
3432
3433 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3434                                 u8 *hash192, u8 *randomizer192,
3435                                 u8 *hash256, u8 *randomizer256)
3436 {
3437         struct oob_data *data;
3438
3439         data = hci_find_remote_oob_data(hdev, bdaddr);
3440         if (!data) {
3441                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3442                 if (!data)
3443                         return -ENOMEM;
3444
3445                 bacpy(&data->bdaddr, bdaddr);
3446                 list_add(&data->list, &hdev->remote_oob_data);
3447         }
3448
3449         memcpy(data->hash192, hash192, sizeof(data->hash192));
3450         memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3451
3452         memcpy(data->hash256, hash256, sizeof(data->hash256));
3453         memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3454
3455         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3456
3457         return 0;
3458 }
3459
3460 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3461                                          bdaddr_t *bdaddr, u8 type)
3462 {
3463         struct bdaddr_list *b;
3464
3465         list_for_each_entry(b, bdaddr_list, list) {
3466                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3467                         return b;
3468         }
3469
3470         return NULL;
3471 }
3472
3473 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3474 {
3475         struct list_head *p, *n;
3476
3477         list_for_each_safe(p, n, bdaddr_list) {
3478                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3479
3480                 list_del(p);
3481                 kfree(b);
3482         }
3483 }
3484
3485 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3486 {
3487         struct bdaddr_list *entry;
3488
3489         if (!bacmp(bdaddr, BDADDR_ANY))
3490                 return -EBADF;
3491
3492         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3493                 return -EEXIST;
3494
3495         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3496         if (!entry)
3497                 return -ENOMEM;
3498
3499         bacpy(&entry->bdaddr, bdaddr);
3500         entry->bdaddr_type = type;
3501
3502         list_add(&entry->list, list);
3503
3504         return 0;
3505 }
3506
3507 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3508 {
3509         struct bdaddr_list *entry;
3510
3511         if (!bacmp(bdaddr, BDADDR_ANY)) {
3512                 hci_bdaddr_list_clear(list);
3513                 return 0;
3514         }
3515
3516         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3517         if (!entry)
3518                 return -ENOENT;
3519
3520         list_del(&entry->list);
3521         kfree(entry);
3522
3523         return 0;
3524 }
3525
3526 /* This function requires the caller holds hdev->lock */
3527 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3528                                                bdaddr_t *addr, u8 addr_type)
3529 {
3530         struct hci_conn_params *params;
3531
3532         /* The conn params list only contains identity addresses */
3533         if (!hci_is_identity_address(addr, addr_type))
3534                 return NULL;
3535
3536         list_for_each_entry(params, &hdev->le_conn_params, list) {
3537                 if (bacmp(&params->addr, addr) == 0 &&
3538                     params->addr_type == addr_type) {
3539                         return params;
3540                 }
3541         }
3542
3543         return NULL;
3544 }
3545
3546 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3547 {
3548         struct hci_conn *conn;
3549
3550         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3551         if (!conn)
3552                 return false;
3553
3554         if (conn->dst_type != type)
3555                 return false;
3556
3557         if (conn->state != BT_CONNECTED)
3558                 return false;
3559
3560         return true;
3561 }
3562
3563 /* This function requires the caller holds hdev->lock */
3564 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3565                                                   bdaddr_t *addr, u8 addr_type)
3566 {
3567         struct hci_conn_params *param;
3568
3569         /* The list only contains identity addresses */
3570         if (!hci_is_identity_address(addr, addr_type))
3571                 return NULL;
3572
3573         list_for_each_entry(param, list, action) {
3574                 if (bacmp(&param->addr, addr) == 0 &&
3575                     param->addr_type == addr_type)
3576                         return param;
3577         }
3578
3579         return NULL;
3580 }
3581
3582 /* This function requires the caller holds hdev->lock */
3583 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3584                                             bdaddr_t *addr, u8 addr_type)
3585 {
3586         struct hci_conn_params *params;
3587
3588         if (!hci_is_identity_address(addr, addr_type))
3589                 return NULL;
3590
3591         params = hci_conn_params_lookup(hdev, addr, addr_type);
3592         if (params)
3593                 return params;
3594
3595         params = kzalloc(sizeof(*params), GFP_KERNEL);
3596         if (!params) {
3597                 BT_ERR("Out of memory");
3598                 return NULL;
3599         }
3600
3601         bacpy(&params->addr, addr);
3602         params->addr_type = addr_type;
3603
3604         list_add(&params->list, &hdev->le_conn_params);
3605         INIT_LIST_HEAD(&params->action);
3606
3607         params->conn_min_interval = hdev->le_conn_min_interval;
3608         params->conn_max_interval = hdev->le_conn_max_interval;
3609         params->conn_latency = hdev->le_conn_latency;
3610         params->supervision_timeout = hdev->le_supv_timeout;
3611         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3612
3613         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3614
3615         return params;
3616 }
3617
3618 /* This function requires the caller holds hdev->lock */
3619 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3620                         u8 auto_connect)
3621 {
3622         struct hci_conn_params *params;
3623
3624         params = hci_conn_params_add(hdev, addr, addr_type);
3625         if (!params)
3626                 return -EIO;
3627
3628         if (params->auto_connect == auto_connect)
3629                 return 0;
3630
3631         list_del_init(&params->action);
3632
3633         switch (auto_connect) {
3634         case HCI_AUTO_CONN_DISABLED:
3635         case HCI_AUTO_CONN_LINK_LOSS:
3636                 hci_update_background_scan(hdev);
3637                 break;
3638         case HCI_AUTO_CONN_REPORT:
3639                 list_add(&params->action, &hdev->pend_le_reports);
3640                 hci_update_background_scan(hdev);
3641                 break;
3642         case HCI_AUTO_CONN_ALWAYS:
3643                 if (!is_connected(hdev, addr, addr_type)) {
3644                         list_add(&params->action, &hdev->pend_le_conns);
3645                         hci_update_background_scan(hdev);
3646                 }
3647                 break;
3648         }
3649
3650         params->auto_connect = auto_connect;
3651
3652         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3653                auto_connect);
3654
3655         return 0;
3656 }
3657
3658 /* This function requires the caller holds hdev->lock */
3659 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3660 {
3661         struct hci_conn_params *params;
3662
3663         params = hci_conn_params_lookup(hdev, addr, addr_type);
3664         if (!params)
3665                 return;
3666
3667         list_del(&params->action);
3668         list_del(&params->list);
3669         kfree(params);
3670
3671         hci_update_background_scan(hdev);
3672
3673         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3674 }
3675
3676 /* This function requires the caller holds hdev->lock */
3677 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3678 {
3679         struct hci_conn_params *params, *tmp;
3680
3681         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3682                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3683                         continue;
3684                 list_del(&params->list);
3685                 kfree(params);
3686         }
3687
3688         BT_DBG("All LE disabled connection parameters were removed");
3689 }
3690
3691 /* This function requires the caller holds hdev->lock */
3692 void hci_conn_params_clear_all(struct hci_dev *hdev)
3693 {
3694         struct hci_conn_params *params, *tmp;
3695
3696         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3697                 list_del(&params->action);
3698                 list_del(&params->list);
3699                 kfree(params);
3700         }
3701
3702         hci_update_background_scan(hdev);
3703
3704         BT_DBG("All LE connection parameters were removed");
3705 }
3706
3707 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3708 {
3709         if (status) {
3710                 BT_ERR("Failed to start inquiry: status %d", status);
3711
3712                 hci_dev_lock(hdev);
3713                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3714                 hci_dev_unlock(hdev);
3715                 return;
3716         }
3717 }
3718
3719 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3720 {
3721         /* General inquiry access code (GIAC) */
3722         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3723         struct hci_request req;
3724         struct hci_cp_inquiry cp;
3725         int err;
3726
3727         if (status) {
3728                 BT_ERR("Failed to disable LE scanning: status %d", status);
3729                 return;
3730         }
3731
3732         switch (hdev->discovery.type) {
3733         case DISCOV_TYPE_LE:
3734                 hci_dev_lock(hdev);
3735                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3736                 hci_dev_unlock(hdev);
3737                 break;
3738
3739         case DISCOV_TYPE_INTERLEAVED:
3740                 hci_req_init(&req, hdev);
3741
3742                 memset(&cp, 0, sizeof(cp));
3743                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3744                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3745                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3746
3747                 hci_dev_lock(hdev);
3748
3749                 hci_inquiry_cache_flush(hdev);
3750
3751                 err = hci_req_run(&req, inquiry_complete);
3752                 if (err) {
3753                         BT_ERR("Inquiry request failed: err %d", err);
3754                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3755                 }
3756
3757                 hci_dev_unlock(hdev);
3758                 break;
3759         }
3760 }
3761
3762 static void le_scan_disable_work(struct work_struct *work)
3763 {
3764         struct hci_dev *hdev = container_of(work, struct hci_dev,
3765                                             le_scan_disable.work);
3766         struct hci_request req;
3767         int err;
3768
3769         BT_DBG("%s", hdev->name);
3770
3771         hci_req_init(&req, hdev);
3772
3773         hci_req_add_le_scan_disable(&req);
3774
3775         err = hci_req_run(&req, le_scan_disable_work_complete);
3776         if (err)
3777                 BT_ERR("Disable LE scanning request failed: err %d", err);
3778 }
3779
3780 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3781 {
3782         struct hci_dev *hdev = req->hdev;
3783
3784         /* If we're advertising or initiating an LE connection we can't
3785          * go ahead and change the random address at this time. This is
3786          * because the eventual initiator address used for the
3787          * subsequently created connection will be undefined (some
3788          * controllers use the new address and others the one we had
3789          * when the operation started).
3790          *
3791          * In this kind of scenario skip the update and let the random
3792          * address be updated at the next cycle.
3793          */
3794         if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3795             hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3796                 BT_DBG("Deferring random address update");
3797                 return;
3798         }
3799
3800         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3801 }
3802
3803 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3804                               u8 *own_addr_type)
3805 {
3806         struct hci_dev *hdev = req->hdev;
3807         int err;
3808
3809         /* If privacy is enabled use a resolvable private address. If
3810          * current RPA has expired or there is something else than
3811          * the current RPA in use, then generate a new one.
3812          */
3813         if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3814                 int to;
3815
3816                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3817
3818                 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3819                     !bacmp(&hdev->random_addr, &hdev->rpa))
3820                         return 0;
3821
3822                 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3823                 if (err < 0) {
3824                         BT_ERR("%s failed to generate new RPA", hdev->name);
3825                         return err;
3826                 }
3827
3828                 set_random_addr(req, &hdev->rpa);
3829
3830                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3831                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3832
3833                 return 0;
3834         }
3835
3836         /* In case of required privacy without resolvable private address,
3837          * use an unresolvable private address. This is useful for active
3838          * scanning and non-connectable advertising.
3839          */
3840         if (require_privacy) {
3841                 bdaddr_t urpa;
3842
3843                 get_random_bytes(&urpa, 6);
3844                 urpa.b[5] &= 0x3f;      /* Clear two most significant bits */
3845
3846                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3847                 set_random_addr(req, &urpa);
3848                 return 0;
3849         }
3850
3851         /* If forcing static address is in use or there is no public
3852          * address use the static address as random address (but skip
3853          * the HCI command if the current random address is already the
3854          * static one.
3855          */
3856         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3857             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3858                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3859                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3860                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3861                                     &hdev->static_addr);
3862                 return 0;
3863         }
3864
3865         /* Neither privacy nor static address is being used so use a
3866          * public address.
3867          */
3868         *own_addr_type = ADDR_LE_DEV_PUBLIC;
3869
3870         return 0;
3871 }
3872
3873 /* Copy the Identity Address of the controller.
3874  *
3875  * If the controller has a public BD_ADDR, then by default use that one.
3876  * If this is a LE only controller without a public address, default to
3877  * the static random address.
3878  *
3879  * For debugging purposes it is possible to force controllers with a
3880  * public address to use the static random address instead.
3881  */
3882 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3883                                u8 *bdaddr_type)
3884 {
3885         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3886             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3887                 bacpy(bdaddr, &hdev->static_addr);
3888                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3889         } else {
3890                 bacpy(bdaddr, &hdev->bdaddr);
3891                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3892         }
3893 }
3894
3895 /* Alloc HCI device */
3896 struct hci_dev *hci_alloc_dev(void)
3897 {
3898         struct hci_dev *hdev;
3899
3900         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3901         if (!hdev)
3902                 return NULL;
3903
3904         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3905         hdev->esco_type = (ESCO_HV1);
3906         hdev->link_mode = (HCI_LM_ACCEPT);
3907         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3908         hdev->io_capability = 0x03;     /* No Input No Output */
3909         hdev->manufacturer = 0xffff;    /* Default to internal use */
3910         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3911         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3912
3913         hdev->sniff_max_interval = 800;
3914         hdev->sniff_min_interval = 80;
3915
3916         hdev->le_adv_channel_map = 0x07;
3917         hdev->le_scan_interval = 0x0060;
3918         hdev->le_scan_window = 0x0030;
3919         hdev->le_conn_min_interval = 0x0028;
3920         hdev->le_conn_max_interval = 0x0038;
3921         hdev->le_conn_latency = 0x0000;
3922         hdev->le_supv_timeout = 0x002a;
3923
3924         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3925         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3926         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3927         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3928
3929         mutex_init(&hdev->lock);
3930         mutex_init(&hdev->req_lock);
3931
3932         INIT_LIST_HEAD(&hdev->mgmt_pending);
3933         INIT_LIST_HEAD(&hdev->blacklist);
3934         INIT_LIST_HEAD(&hdev->whitelist);
3935         INIT_LIST_HEAD(&hdev->uuids);
3936         INIT_LIST_HEAD(&hdev->link_keys);
3937         INIT_LIST_HEAD(&hdev->long_term_keys);
3938         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3939         INIT_LIST_HEAD(&hdev->remote_oob_data);
3940         INIT_LIST_HEAD(&hdev->le_white_list);
3941         INIT_LIST_HEAD(&hdev->le_conn_params);
3942         INIT_LIST_HEAD(&hdev->pend_le_conns);
3943         INIT_LIST_HEAD(&hdev->pend_le_reports);
3944         INIT_LIST_HEAD(&hdev->conn_hash.list);
3945
3946         INIT_WORK(&hdev->rx_work, hci_rx_work);
3947         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3948         INIT_WORK(&hdev->tx_work, hci_tx_work);
3949         INIT_WORK(&hdev->power_on, hci_power_on);
3950
3951         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3952         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3953         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3954
3955         skb_queue_head_init(&hdev->rx_q);
3956         skb_queue_head_init(&hdev->cmd_q);
3957         skb_queue_head_init(&hdev->raw_q);
3958
3959         init_waitqueue_head(&hdev->req_wait_q);
3960
3961         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3962
3963         hci_init_sysfs(hdev);
3964         discovery_init(hdev);
3965
3966         return hdev;
3967 }
3968 EXPORT_SYMBOL(hci_alloc_dev);
3969
3970 /* Free HCI device */
3971 void hci_free_dev(struct hci_dev *hdev)
3972 {
3973         /* will free via device release */
3974         put_device(&hdev->dev);
3975 }
3976 EXPORT_SYMBOL(hci_free_dev);
3977
3978 /* Register HCI device */
3979 int hci_register_dev(struct hci_dev *hdev)
3980 {
3981         int id, error;
3982
3983         if (!hdev->open || !hdev->close || !hdev->send)
3984                 return -EINVAL;
3985
3986         /* Do not allow HCI_AMP devices to register at index 0,
3987          * so the index can be used as the AMP controller ID.
3988          */
3989         switch (hdev->dev_type) {
3990         case HCI_BREDR:
3991                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3992                 break;
3993         case HCI_AMP:
3994                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3995                 break;
3996         default:
3997                 return -EINVAL;
3998         }
3999
4000         if (id < 0)
4001                 return id;
4002
4003         sprintf(hdev->name, "hci%d", id);
4004         hdev->id = id;
4005
4006         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4007
4008         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4009                                           WQ_MEM_RECLAIM, 1, hdev->name);
4010         if (!hdev->workqueue) {
4011                 error = -ENOMEM;
4012                 goto err;
4013         }
4014
4015         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4016                                               WQ_MEM_RECLAIM, 1, hdev->name);
4017         if (!hdev->req_workqueue) {
4018                 destroy_workqueue(hdev->workqueue);
4019                 error = -ENOMEM;
4020                 goto err;
4021         }
4022
4023         if (!IS_ERR_OR_NULL(bt_debugfs))
4024                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4025
4026         dev_set_name(&hdev->dev, "%s", hdev->name);
4027
4028         hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
4029                                                CRYPTO_ALG_ASYNC);
4030         if (IS_ERR(hdev->tfm_aes)) {
4031                 BT_ERR("Unable to create crypto context");
4032                 error = PTR_ERR(hdev->tfm_aes);
4033                 hdev->tfm_aes = NULL;
4034                 goto err_wqueue;
4035         }
4036
4037         error = device_add(&hdev->dev);
4038         if (error < 0)
4039                 goto err_tfm;
4040
4041         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4042                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4043                                     hdev);
4044         if (hdev->rfkill) {
4045                 if (rfkill_register(hdev->rfkill) < 0) {
4046                         rfkill_destroy(hdev->rfkill);
4047                         hdev->rfkill = NULL;
4048                 }
4049         }
4050
4051         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4052                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4053
4054         set_bit(HCI_SETUP, &hdev->dev_flags);
4055         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4056
4057         if (hdev->dev_type == HCI_BREDR) {
4058                 /* Assume BR/EDR support until proven otherwise (such as
4059                  * through reading supported features during init.
4060                  */
4061                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4062         }
4063
4064         write_lock(&hci_dev_list_lock);
4065         list_add(&hdev->list, &hci_dev_list);
4066         write_unlock(&hci_dev_list_lock);
4067
4068         /* Devices that are marked for raw-only usage are unconfigured
4069          * and should not be included in normal operation.
4070          */
4071         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4072                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4073
4074         hci_notify(hdev, HCI_DEV_REG);
4075         hci_dev_hold(hdev);
4076
4077         queue_work(hdev->req_workqueue, &hdev->power_on);
4078
4079         return id;
4080
4081 err_tfm:
4082         crypto_free_blkcipher(hdev->tfm_aes);
4083 err_wqueue:
4084         destroy_workqueue(hdev->workqueue);
4085         destroy_workqueue(hdev->req_workqueue);
4086 err:
4087         ida_simple_remove(&hci_index_ida, hdev->id);
4088
4089         return error;
4090 }
4091 EXPORT_SYMBOL(hci_register_dev);
4092
4093 /* Unregister HCI device */
4094 void hci_unregister_dev(struct hci_dev *hdev)
4095 {
4096         int i, id;
4097
4098         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4099
4100         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4101
4102         id = hdev->id;
4103
4104         write_lock(&hci_dev_list_lock);
4105         list_del(&hdev->list);
4106         write_unlock(&hci_dev_list_lock);
4107
4108         hci_dev_do_close(hdev);
4109
4110         for (i = 0; i < NUM_REASSEMBLY; i++)
4111                 kfree_skb(hdev->reassembly[i]);
4112
4113         cancel_work_sync(&hdev->power_on);
4114
4115         if (!test_bit(HCI_INIT, &hdev->flags) &&
4116             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4117             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4118                 hci_dev_lock(hdev);
4119                 mgmt_index_removed(hdev);
4120                 hci_dev_unlock(hdev);
4121         }
4122
4123         /* mgmt_index_removed should take care of emptying the
4124          * pending list */
4125         BUG_ON(!list_empty(&hdev->mgmt_pending));
4126
4127         hci_notify(hdev, HCI_DEV_UNREG);
4128
4129         if (hdev->rfkill) {
4130                 rfkill_unregister(hdev->rfkill);
4131                 rfkill_destroy(hdev->rfkill);
4132         }
4133
4134         if (hdev->tfm_aes)
4135                 crypto_free_blkcipher(hdev->tfm_aes);
4136
4137         device_del(&hdev->dev);
4138
4139         debugfs_remove_recursive(hdev->debugfs);
4140
4141         destroy_workqueue(hdev->workqueue);
4142         destroy_workqueue(hdev->req_workqueue);
4143
4144         hci_dev_lock(hdev);
4145         hci_bdaddr_list_clear(&hdev->blacklist);
4146         hci_bdaddr_list_clear(&hdev->whitelist);
4147         hci_uuids_clear(hdev);
4148         hci_link_keys_clear(hdev);
4149         hci_smp_ltks_clear(hdev);
4150         hci_smp_irks_clear(hdev);
4151         hci_remote_oob_data_clear(hdev);
4152         hci_bdaddr_list_clear(&hdev->le_white_list);
4153         hci_conn_params_clear_all(hdev);
4154         hci_dev_unlock(hdev);
4155
4156         hci_dev_put(hdev);
4157
4158         ida_simple_remove(&hci_index_ida, id);
4159 }
4160 EXPORT_SYMBOL(hci_unregister_dev);
4161
4162 /* Suspend HCI device */
4163 int hci_suspend_dev(struct hci_dev *hdev)
4164 {
4165         hci_notify(hdev, HCI_DEV_SUSPEND);
4166         return 0;
4167 }
4168 EXPORT_SYMBOL(hci_suspend_dev);
4169
4170 /* Resume HCI device */
4171 int hci_resume_dev(struct hci_dev *hdev)
4172 {
4173         hci_notify(hdev, HCI_DEV_RESUME);
4174         return 0;
4175 }
4176 EXPORT_SYMBOL(hci_resume_dev);
4177
4178 /* Receive frame from HCI drivers */
4179 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4180 {
4181         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4182                       && !test_bit(HCI_INIT, &hdev->flags))) {
4183                 kfree_skb(skb);
4184                 return -ENXIO;
4185         }
4186
4187         /* Incoming skb */
4188         bt_cb(skb)->incoming = 1;
4189
4190         /* Time stamp */
4191         __net_timestamp(skb);
4192
4193         skb_queue_tail(&hdev->rx_q, skb);
4194         queue_work(hdev->workqueue, &hdev->rx_work);
4195
4196         return 0;
4197 }
4198 EXPORT_SYMBOL(hci_recv_frame);
4199
4200 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4201                           int count, __u8 index)
4202 {
4203         int len = 0;
4204         int hlen = 0;
4205         int remain = count;
4206         struct sk_buff *skb;
4207         struct bt_skb_cb *scb;
4208
4209         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4210             index >= NUM_REASSEMBLY)
4211                 return -EILSEQ;
4212
4213         skb = hdev->reassembly[index];
4214
4215         if (!skb) {
4216                 switch (type) {
4217                 case HCI_ACLDATA_PKT:
4218                         len = HCI_MAX_FRAME_SIZE;
4219                         hlen = HCI_ACL_HDR_SIZE;
4220                         break;
4221                 case HCI_EVENT_PKT:
4222                         len = HCI_MAX_EVENT_SIZE;
4223                         hlen = HCI_EVENT_HDR_SIZE;
4224                         break;
4225                 case HCI_SCODATA_PKT:
4226                         len = HCI_MAX_SCO_SIZE;
4227                         hlen = HCI_SCO_HDR_SIZE;
4228                         break;
4229                 }
4230
4231                 skb = bt_skb_alloc(len, GFP_ATOMIC);
4232                 if (!skb)
4233                         return -ENOMEM;
4234
4235                 scb = (void *) skb->cb;
4236                 scb->expect = hlen;
4237                 scb->pkt_type = type;
4238
4239                 hdev->reassembly[index] = skb;
4240         }
4241
4242         while (count) {
4243                 scb = (void *) skb->cb;
4244                 len = min_t(uint, scb->expect, count);
4245
4246                 memcpy(skb_put(skb, len), data, len);
4247
4248                 count -= len;
4249                 data += len;
4250                 scb->expect -= len;
4251                 remain = count;
4252
4253                 switch (type) {
4254                 case HCI_EVENT_PKT:
4255                         if (skb->len == HCI_EVENT_HDR_SIZE) {
4256                                 struct hci_event_hdr *h = hci_event_hdr(skb);
4257                                 scb->expect = h->plen;
4258
4259                                 if (skb_tailroom(skb) < scb->expect) {
4260                                         kfree_skb(skb);
4261                                         hdev->reassembly[index] = NULL;
4262                                         return -ENOMEM;
4263                                 }
4264                         }
4265                         break;
4266
4267                 case HCI_ACLDATA_PKT:
4268                         if (skb->len  == HCI_ACL_HDR_SIZE) {
4269                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4270                                 scb->expect = __le16_to_cpu(h->dlen);
4271
4272                                 if (skb_tailroom(skb) < scb->expect) {
4273                                         kfree_skb(skb);
4274                                         hdev->reassembly[index] = NULL;
4275                                         return -ENOMEM;
4276                                 }
4277                         }
4278                         break;
4279
4280                 case HCI_SCODATA_PKT:
4281                         if (skb->len == HCI_SCO_HDR_SIZE) {
4282                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4283                                 scb->expect = h->dlen;
4284
4285                                 if (skb_tailroom(skb) < scb->expect) {
4286                                         kfree_skb(skb);
4287                                         hdev->reassembly[index] = NULL;
4288                                         return -ENOMEM;
4289                                 }
4290                         }
4291                         break;
4292                 }
4293
4294                 if (scb->expect == 0) {
4295                         /* Complete frame */
4296
4297                         bt_cb(skb)->pkt_type = type;
4298                         hci_recv_frame(hdev, skb);
4299
4300                         hdev->reassembly[index] = NULL;
4301                         return remain;
4302                 }
4303         }
4304
4305         return remain;
4306 }
4307
4308 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4309 {
4310         int rem = 0;
4311
4312         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4313                 return -EILSEQ;
4314
4315         while (count) {
4316                 rem = hci_reassembly(hdev, type, data, count, type - 1);
4317                 if (rem < 0)
4318                         return rem;
4319
4320                 data += (count - rem);
4321                 count = rem;
4322         }
4323
4324         return rem;
4325 }
4326 EXPORT_SYMBOL(hci_recv_fragment);
4327
4328 #define STREAM_REASSEMBLY 0
4329
4330 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4331 {
4332         int type;
4333         int rem = 0;
4334
4335         while (count) {
4336                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4337
4338                 if (!skb) {
4339                         struct { char type; } *pkt;
4340
4341                         /* Start of the frame */
4342                         pkt = data;
4343                         type = pkt->type;
4344
4345                         data++;
4346                         count--;
4347                 } else
4348                         type = bt_cb(skb)->pkt_type;
4349
4350                 rem = hci_reassembly(hdev, type, data, count,
4351                                      STREAM_REASSEMBLY);
4352                 if (rem < 0)
4353                         return rem;
4354
4355                 data += (count - rem);
4356                 count = rem;
4357         }
4358
4359         return rem;
4360 }
4361 EXPORT_SYMBOL(hci_recv_stream_fragment);
4362
4363 /* ---- Interface to upper protocols ---- */
4364
4365 int hci_register_cb(struct hci_cb *cb)
4366 {
4367         BT_DBG("%p name %s", cb, cb->name);
4368
4369         write_lock(&hci_cb_list_lock);
4370         list_add(&cb->list, &hci_cb_list);
4371         write_unlock(&hci_cb_list_lock);
4372
4373         return 0;
4374 }
4375 EXPORT_SYMBOL(hci_register_cb);
4376
4377 int hci_unregister_cb(struct hci_cb *cb)
4378 {
4379         BT_DBG("%p name %s", cb, cb->name);
4380
4381         write_lock(&hci_cb_list_lock);
4382         list_del(&cb->list);
4383         write_unlock(&hci_cb_list_lock);
4384
4385         return 0;
4386 }
4387 EXPORT_SYMBOL(hci_unregister_cb);
4388
4389 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4390 {
4391         int err;
4392
4393         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4394
4395         /* Time stamp */
4396         __net_timestamp(skb);
4397
4398         /* Send copy to monitor */
4399         hci_send_to_monitor(hdev, skb);
4400
4401         if (atomic_read(&hdev->promisc)) {
4402                 /* Send copy to the sockets */
4403                 hci_send_to_sock(hdev, skb);
4404         }
4405
4406         /* Get rid of skb owner, prior to sending to the driver. */
4407         skb_orphan(skb);
4408
4409         err = hdev->send(hdev, skb);
4410         if (err < 0) {
4411                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4412                 kfree_skb(skb);
4413         }
4414 }
4415
4416 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4417 {
4418         skb_queue_head_init(&req->cmd_q);
4419         req->hdev = hdev;
4420         req->err = 0;
4421 }
4422
4423 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4424 {
4425         struct hci_dev *hdev = req->hdev;
4426         struct sk_buff *skb;
4427         unsigned long flags;
4428
4429         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4430
4431         /* If an error occured during request building, remove all HCI
4432          * commands queued on the HCI request queue.
4433          */
4434         if (req->err) {
4435                 skb_queue_purge(&req->cmd_q);
4436                 return req->err;
4437         }
4438
4439         /* Do not allow empty requests */
4440         if (skb_queue_empty(&req->cmd_q))
4441                 return -ENODATA;
4442
4443         skb = skb_peek_tail(&req->cmd_q);
4444         bt_cb(skb)->req.complete = complete;
4445
4446         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4447         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4448         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4449
4450         queue_work(hdev->workqueue, &hdev->cmd_work);
4451
4452         return 0;
4453 }
4454
4455 bool hci_req_pending(struct hci_dev *hdev)
4456 {
4457         return (hdev->req_status == HCI_REQ_PEND);
4458 }
4459
4460 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4461                                        u32 plen, const void *param)
4462 {
4463         int len = HCI_COMMAND_HDR_SIZE + plen;
4464         struct hci_command_hdr *hdr;
4465         struct sk_buff *skb;
4466
4467         skb = bt_skb_alloc(len, GFP_ATOMIC);
4468         if (!skb)
4469                 return NULL;
4470
4471         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4472         hdr->opcode = cpu_to_le16(opcode);
4473         hdr->plen   = plen;
4474
4475         if (plen)
4476                 memcpy(skb_put(skb, plen), param, plen);
4477
4478         BT_DBG("skb len %d", skb->len);
4479
4480         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4481
4482         return skb;
4483 }
4484
4485 /* Send HCI command */
4486 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4487                  const void *param)
4488 {
4489         struct sk_buff *skb;
4490
4491         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4492
4493         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4494         if (!skb) {
4495                 BT_ERR("%s no memory for command", hdev->name);
4496                 return -ENOMEM;
4497         }
4498
4499         /* Stand-alone HCI commands must be flaged as
4500          * single-command requests.
4501          */
4502         bt_cb(skb)->req.start = true;
4503
4504         skb_queue_tail(&hdev->cmd_q, skb);
4505         queue_work(hdev->workqueue, &hdev->cmd_work);
4506
4507         return 0;
4508 }
4509
4510 /* Queue a command to an asynchronous HCI request */
4511 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4512                     const void *param, u8 event)
4513 {
4514         struct hci_dev *hdev = req->hdev;
4515         struct sk_buff *skb;
4516
4517         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4518
4519         /* If an error occured during request building, there is no point in
4520          * queueing the HCI command. We can simply return.
4521          */
4522         if (req->err)
4523                 return;
4524
4525         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4526         if (!skb) {
4527                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4528                        hdev->name, opcode);
4529                 req->err = -ENOMEM;
4530                 return;
4531         }
4532
4533         if (skb_queue_empty(&req->cmd_q))
4534                 bt_cb(skb)->req.start = true;
4535
4536         bt_cb(skb)->req.event = event;
4537
4538         skb_queue_tail(&req->cmd_q, skb);
4539 }
4540
4541 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4542                  const void *param)
4543 {
4544         hci_req_add_ev(req, opcode, plen, param, 0);
4545 }
4546
4547 /* Get data from the previously sent command */
4548 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4549 {
4550         struct hci_command_hdr *hdr;
4551
4552         if (!hdev->sent_cmd)
4553                 return NULL;
4554
4555         hdr = (void *) hdev->sent_cmd->data;
4556
4557         if (hdr->opcode != cpu_to_le16(opcode))
4558                 return NULL;
4559
4560         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4561
4562         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4563 }
4564
4565 /* Send ACL data */
4566 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4567 {
4568         struct hci_acl_hdr *hdr;
4569         int len = skb->len;
4570
4571         skb_push(skb, HCI_ACL_HDR_SIZE);
4572         skb_reset_transport_header(skb);
4573         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4574         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4575         hdr->dlen   = cpu_to_le16(len);
4576 }
4577
4578 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4579                           struct sk_buff *skb, __u16 flags)
4580 {
4581         struct hci_conn *conn = chan->conn;
4582         struct hci_dev *hdev = conn->hdev;
4583         struct sk_buff *list;
4584
4585         skb->len = skb_headlen(skb);
4586         skb->data_len = 0;
4587
4588         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4589
4590         switch (hdev->dev_type) {
4591         case HCI_BREDR:
4592                 hci_add_acl_hdr(skb, conn->handle, flags);
4593                 break;
4594         case HCI_AMP:
4595                 hci_add_acl_hdr(skb, chan->handle, flags);
4596                 break;
4597         default:
4598                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4599                 return;
4600         }
4601
4602         list = skb_shinfo(skb)->frag_list;
4603         if (!list) {
4604                 /* Non fragmented */
4605                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4606
4607                 skb_queue_tail(queue, skb);
4608         } else {
4609                 /* Fragmented */
4610                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4611
4612                 skb_shinfo(skb)->frag_list = NULL;
4613
4614                 /* Queue all fragments atomically */
4615                 spin_lock(&queue->lock);
4616
4617                 __skb_queue_tail(queue, skb);
4618
4619                 flags &= ~ACL_START;
4620                 flags |= ACL_CONT;
4621                 do {
4622                         skb = list; list = list->next;
4623
4624                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4625                         hci_add_acl_hdr(skb, conn->handle, flags);
4626
4627                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4628
4629                         __skb_queue_tail(queue, skb);
4630                 } while (list);
4631
4632                 spin_unlock(&queue->lock);
4633         }
4634 }
4635
4636 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4637 {
4638         struct hci_dev *hdev = chan->conn->hdev;
4639
4640         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4641
4642         hci_queue_acl(chan, &chan->data_q, skb, flags);
4643
4644         queue_work(hdev->workqueue, &hdev->tx_work);
4645 }
4646
4647 /* Send SCO data */
4648 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4649 {
4650         struct hci_dev *hdev = conn->hdev;
4651         struct hci_sco_hdr hdr;
4652
4653         BT_DBG("%s len %d", hdev->name, skb->len);
4654
4655         hdr.handle = cpu_to_le16(conn->handle);
4656         hdr.dlen   = skb->len;
4657
4658         skb_push(skb, HCI_SCO_HDR_SIZE);
4659         skb_reset_transport_header(skb);
4660         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4661
4662         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4663
4664         skb_queue_tail(&conn->data_q, skb);
4665         queue_work(hdev->workqueue, &hdev->tx_work);
4666 }
4667
4668 /* ---- HCI TX task (outgoing data) ---- */
4669
4670 /* HCI Connection scheduler */
4671 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4672                                      int *quote)
4673 {
4674         struct hci_conn_hash *h = &hdev->conn_hash;
4675         struct hci_conn *conn = NULL, *c;
4676         unsigned int num = 0, min = ~0;
4677
4678         /* We don't have to lock device here. Connections are always
4679          * added and removed with TX task disabled. */
4680
4681         rcu_read_lock();
4682
4683         list_for_each_entry_rcu(c, &h->list, list) {
4684                 if (c->type != type || skb_queue_empty(&c->data_q))
4685                         continue;
4686
4687                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4688                         continue;
4689
4690                 num++;
4691
4692                 if (c->sent < min) {
4693                         min  = c->sent;
4694                         conn = c;
4695                 }
4696
4697                 if (hci_conn_num(hdev, type) == num)
4698                         break;
4699         }
4700
4701         rcu_read_unlock();
4702
4703         if (conn) {
4704                 int cnt, q;
4705
4706                 switch (conn->type) {
4707                 case ACL_LINK:
4708                         cnt = hdev->acl_cnt;
4709                         break;
4710                 case SCO_LINK:
4711                 case ESCO_LINK:
4712                         cnt = hdev->sco_cnt;
4713                         break;
4714                 case LE_LINK:
4715                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4716                         break;
4717                 default:
4718                         cnt = 0;
4719                         BT_ERR("Unknown link type");
4720                 }
4721
4722                 q = cnt / num;
4723                 *quote = q ? q : 1;
4724         } else
4725                 *quote = 0;
4726
4727         BT_DBG("conn %p quote %d", conn, *quote);
4728         return conn;
4729 }
4730
4731 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4732 {
4733         struct hci_conn_hash *h = &hdev->conn_hash;
4734         struct hci_conn *c;
4735
4736         BT_ERR("%s link tx timeout", hdev->name);
4737
4738         rcu_read_lock();
4739
4740         /* Kill stalled connections */
4741         list_for_each_entry_rcu(c, &h->list, list) {
4742                 if (c->type == type && c->sent) {
4743                         BT_ERR("%s killing stalled connection %pMR",
4744                                hdev->name, &c->dst);
4745                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4746                 }
4747         }
4748
4749         rcu_read_unlock();
4750 }
4751
4752 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4753                                       int *quote)
4754 {
4755         struct hci_conn_hash *h = &hdev->conn_hash;
4756         struct hci_chan *chan = NULL;
4757         unsigned int num = 0, min = ~0, cur_prio = 0;
4758         struct hci_conn *conn;
4759         int cnt, q, conn_num = 0;
4760
4761         BT_DBG("%s", hdev->name);
4762
4763         rcu_read_lock();
4764
4765         list_for_each_entry_rcu(conn, &h->list, list) {
4766                 struct hci_chan *tmp;
4767
4768                 if (conn->type != type)
4769                         continue;
4770
4771                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4772                         continue;
4773
4774                 conn_num++;
4775
4776                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4777                         struct sk_buff *skb;
4778
4779                         if (skb_queue_empty(&tmp->data_q))
4780                                 continue;
4781
4782                         skb = skb_peek(&tmp->data_q);
4783                         if (skb->priority < cur_prio)
4784                                 continue;
4785
4786                         if (skb->priority > cur_prio) {
4787                                 num = 0;
4788                                 min = ~0;
4789                                 cur_prio = skb->priority;
4790                         }
4791
4792                         num++;
4793
4794                         if (conn->sent < min) {
4795                                 min  = conn->sent;
4796                                 chan = tmp;
4797                         }
4798                 }
4799
4800                 if (hci_conn_num(hdev, type) == conn_num)
4801                         break;
4802         }
4803
4804         rcu_read_unlock();
4805
4806         if (!chan)
4807                 return NULL;
4808
4809         switch (chan->conn->type) {
4810         case ACL_LINK:
4811                 cnt = hdev->acl_cnt;
4812                 break;
4813         case AMP_LINK:
4814                 cnt = hdev->block_cnt;
4815                 break;
4816         case SCO_LINK:
4817         case ESCO_LINK:
4818                 cnt = hdev->sco_cnt;
4819                 break;
4820         case LE_LINK:
4821                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4822                 break;
4823         default:
4824                 cnt = 0;
4825                 BT_ERR("Unknown link type");
4826         }
4827
4828         q = cnt / num;
4829         *quote = q ? q : 1;
4830         BT_DBG("chan %p quote %d", chan, *quote);
4831         return chan;
4832 }
4833
4834 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4835 {
4836         struct hci_conn_hash *h = &hdev->conn_hash;
4837         struct hci_conn *conn;
4838         int num = 0;
4839
4840         BT_DBG("%s", hdev->name);
4841
4842         rcu_read_lock();
4843
4844         list_for_each_entry_rcu(conn, &h->list, list) {
4845                 struct hci_chan *chan;
4846
4847                 if (conn->type != type)
4848                         continue;
4849
4850                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4851                         continue;
4852
4853                 num++;
4854
4855                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4856                         struct sk_buff *skb;
4857
4858                         if (chan->sent) {
4859                                 chan->sent = 0;
4860                                 continue;
4861                         }
4862
4863                         if (skb_queue_empty(&chan->data_q))
4864                                 continue;
4865
4866                         skb = skb_peek(&chan->data_q);
4867                         if (skb->priority >= HCI_PRIO_MAX - 1)
4868                                 continue;
4869
4870                         skb->priority = HCI_PRIO_MAX - 1;
4871
4872                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4873                                skb->priority);
4874                 }
4875
4876                 if (hci_conn_num(hdev, type) == num)
4877                         break;
4878         }
4879
4880         rcu_read_unlock();
4881
4882 }
4883
4884 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4885 {
4886         /* Calculate count of blocks used by this packet */
4887         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4888 }
4889
4890 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4891 {
4892         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4893                 /* ACL tx timeout must be longer than maximum
4894                  * link supervision timeout (40.9 seconds) */
4895                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4896                                        HCI_ACL_TX_TIMEOUT))
4897                         hci_link_tx_to(hdev, ACL_LINK);
4898         }
4899 }
4900
4901 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4902 {
4903         unsigned int cnt = hdev->acl_cnt;
4904         struct hci_chan *chan;
4905         struct sk_buff *skb;
4906         int quote;
4907
4908         __check_timeout(hdev, cnt);
4909
4910         while (hdev->acl_cnt &&
4911                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4912                 u32 priority = (skb_peek(&chan->data_q))->priority;
4913                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4914                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4915                                skb->len, skb->priority);
4916
4917                         /* Stop if priority has changed */
4918                         if (skb->priority < priority)
4919                                 break;
4920
4921                         skb = skb_dequeue(&chan->data_q);
4922
4923                         hci_conn_enter_active_mode(chan->conn,
4924                                                    bt_cb(skb)->force_active);
4925
4926                         hci_send_frame(hdev, skb);
4927                         hdev->acl_last_tx = jiffies;
4928
4929                         hdev->acl_cnt--;
4930                         chan->sent++;
4931                         chan->conn->sent++;
4932                 }
4933         }
4934
4935         if (cnt != hdev->acl_cnt)
4936                 hci_prio_recalculate(hdev, ACL_LINK);
4937 }
4938
4939 static void hci_sched_acl_blk(struct hci_dev *hdev)
4940 {
4941         unsigned int cnt = hdev->block_cnt;
4942         struct hci_chan *chan;
4943         struct sk_buff *skb;
4944         int quote;
4945         u8 type;
4946
4947         __check_timeout(hdev, cnt);
4948
4949         BT_DBG("%s", hdev->name);
4950
4951         if (hdev->dev_type == HCI_AMP)
4952                 type = AMP_LINK;
4953         else
4954                 type = ACL_LINK;
4955
4956         while (hdev->block_cnt > 0 &&
4957                (chan = hci_chan_sent(hdev, type, &quote))) {
4958                 u32 priority = (skb_peek(&chan->data_q))->priority;
4959                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4960                         int blocks;
4961
4962                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4963                                skb->len, skb->priority);
4964
4965                         /* Stop if priority has changed */
4966                         if (skb->priority < priority)
4967                                 break;
4968
4969                         skb = skb_dequeue(&chan->data_q);
4970
4971                         blocks = __get_blocks(hdev, skb);
4972                         if (blocks > hdev->block_cnt)
4973                                 return;
4974
4975                         hci_conn_enter_active_mode(chan->conn,
4976                                                    bt_cb(skb)->force_active);
4977
4978                         hci_send_frame(hdev, skb);
4979                         hdev->acl_last_tx = jiffies;
4980
4981                         hdev->block_cnt -= blocks;
4982                         quote -= blocks;
4983
4984                         chan->sent += blocks;
4985                         chan->conn->sent += blocks;
4986                 }
4987         }
4988
4989         if (cnt != hdev->block_cnt)
4990                 hci_prio_recalculate(hdev, type);
4991 }
4992
4993 static void hci_sched_acl(struct hci_dev *hdev)
4994 {
4995         BT_DBG("%s", hdev->name);
4996
4997         /* No ACL link over BR/EDR controller */
4998         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4999                 return;
5000
5001         /* No AMP link over AMP controller */
5002         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
5003                 return;
5004
5005         switch (hdev->flow_ctl_mode) {
5006         case HCI_FLOW_CTL_MODE_PACKET_BASED:
5007                 hci_sched_acl_pkt(hdev);
5008                 break;
5009
5010         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5011                 hci_sched_acl_blk(hdev);
5012                 break;
5013         }
5014 }
5015
5016 /* Schedule SCO */
5017 static void hci_sched_sco(struct hci_dev *hdev)
5018 {
5019         struct hci_conn *conn;
5020         struct sk_buff *skb;
5021         int quote;
5022
5023         BT_DBG("%s", hdev->name);
5024
5025         if (!hci_conn_num(hdev, SCO_LINK))
5026                 return;
5027
5028         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5029                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5030                         BT_DBG("skb %p len %d", skb, skb->len);
5031                         hci_send_frame(hdev, skb);
5032
5033                         conn->sent++;
5034                         if (conn->sent == ~0)
5035                                 conn->sent = 0;
5036                 }
5037         }
5038 }
5039
5040 static void hci_sched_esco(struct hci_dev *hdev)
5041 {
5042         struct hci_conn *conn;
5043         struct sk_buff *skb;
5044         int quote;
5045
5046         BT_DBG("%s", hdev->name);
5047
5048         if (!hci_conn_num(hdev, ESCO_LINK))
5049                 return;
5050
5051         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5052                                                      &quote))) {
5053                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5054                         BT_DBG("skb %p len %d", skb, skb->len);
5055                         hci_send_frame(hdev, skb);
5056
5057                         conn->sent++;
5058                         if (conn->sent == ~0)
5059                                 conn->sent = 0;
5060                 }
5061         }
5062 }
5063
5064 static void hci_sched_le(struct hci_dev *hdev)
5065 {
5066         struct hci_chan *chan;
5067         struct sk_buff *skb;
5068         int quote, cnt, tmp;
5069
5070         BT_DBG("%s", hdev->name);
5071
5072         if (!hci_conn_num(hdev, LE_LINK))
5073                 return;
5074
5075         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5076                 /* LE tx timeout must be longer than maximum
5077                  * link supervision timeout (40.9 seconds) */
5078                 if (!hdev->le_cnt && hdev->le_pkts &&
5079                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
5080                         hci_link_tx_to(hdev, LE_LINK);
5081         }
5082
5083         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5084         tmp = cnt;
5085         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
5086                 u32 priority = (skb_peek(&chan->data_q))->priority;
5087                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5088                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5089                                skb->len, skb->priority);
5090
5091                         /* Stop if priority has changed */
5092                         if (skb->priority < priority)
5093                                 break;
5094
5095                         skb = skb_dequeue(&chan->data_q);
5096
5097                         hci_send_frame(hdev, skb);
5098                         hdev->le_last_tx = jiffies;
5099
5100                         cnt--;
5101                         chan->sent++;
5102                         chan->conn->sent++;
5103                 }
5104         }
5105
5106         if (hdev->le_pkts)
5107                 hdev->le_cnt = cnt;
5108         else
5109                 hdev->acl_cnt = cnt;
5110
5111         if (cnt != tmp)
5112                 hci_prio_recalculate(hdev, LE_LINK);
5113 }
5114
5115 static void hci_tx_work(struct work_struct *work)
5116 {
5117         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5118         struct sk_buff *skb;
5119
5120         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5121                hdev->sco_cnt, hdev->le_cnt);
5122
5123         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5124                 /* Schedule queues and send stuff to HCI driver */
5125                 hci_sched_acl(hdev);
5126                 hci_sched_sco(hdev);
5127                 hci_sched_esco(hdev);
5128                 hci_sched_le(hdev);
5129         }
5130
5131         /* Send next queued raw (unknown type) packet */
5132         while ((skb = skb_dequeue(&hdev->raw_q)))
5133                 hci_send_frame(hdev, skb);
5134 }
5135
5136 /* ----- HCI RX task (incoming data processing) ----- */
5137
5138 /* ACL data packet */
5139 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5140 {
5141         struct hci_acl_hdr *hdr = (void *) skb->data;
5142         struct hci_conn *conn;
5143         __u16 handle, flags;
5144
5145         skb_pull(skb, HCI_ACL_HDR_SIZE);
5146
5147         handle = __le16_to_cpu(hdr->handle);
5148         flags  = hci_flags(handle);
5149         handle = hci_handle(handle);
5150
5151         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5152                handle, flags);
5153
5154         hdev->stat.acl_rx++;
5155
5156         hci_dev_lock(hdev);
5157         conn = hci_conn_hash_lookup_handle(hdev, handle);
5158         hci_dev_unlock(hdev);
5159
5160         if (conn) {
5161                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5162
5163                 /* Send to upper protocol */
5164                 l2cap_recv_acldata(conn, skb, flags);
5165                 return;
5166         } else {
5167                 BT_ERR("%s ACL packet for unknown connection handle %d",
5168                        hdev->name, handle);
5169         }
5170
5171         kfree_skb(skb);
5172 }
5173
5174 /* SCO data packet */
5175 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5176 {
5177         struct hci_sco_hdr *hdr = (void *) skb->data;
5178         struct hci_conn *conn;
5179         __u16 handle;
5180
5181         skb_pull(skb, HCI_SCO_HDR_SIZE);
5182
5183         handle = __le16_to_cpu(hdr->handle);
5184
5185         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5186
5187         hdev->stat.sco_rx++;
5188
5189         hci_dev_lock(hdev);
5190         conn = hci_conn_hash_lookup_handle(hdev, handle);
5191         hci_dev_unlock(hdev);
5192
5193         if (conn) {
5194                 /* Send to upper protocol */
5195                 sco_recv_scodata(conn, skb);
5196                 return;
5197         } else {
5198                 BT_ERR("%s SCO packet for unknown connection handle %d",
5199                        hdev->name, handle);
5200         }
5201
5202         kfree_skb(skb);
5203 }
5204
5205 static bool hci_req_is_complete(struct hci_dev *hdev)
5206 {
5207         struct sk_buff *skb;
5208
5209         skb = skb_peek(&hdev->cmd_q);
5210         if (!skb)
5211                 return true;
5212
5213         return bt_cb(skb)->req.start;
5214 }
5215
5216 static void hci_resend_last(struct hci_dev *hdev)
5217 {
5218         struct hci_command_hdr *sent;
5219         struct sk_buff *skb;
5220         u16 opcode;
5221
5222         if (!hdev->sent_cmd)
5223                 return;
5224
5225         sent = (void *) hdev->sent_cmd->data;
5226         opcode = __le16_to_cpu(sent->opcode);
5227         if (opcode == HCI_OP_RESET)
5228                 return;
5229
5230         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5231         if (!skb)
5232                 return;
5233
5234         skb_queue_head(&hdev->cmd_q, skb);
5235         queue_work(hdev->workqueue, &hdev->cmd_work);
5236 }
5237
5238 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5239 {
5240         hci_req_complete_t req_complete = NULL;
5241         struct sk_buff *skb;
5242         unsigned long flags;
5243
5244         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5245
5246         /* If the completed command doesn't match the last one that was
5247          * sent we need to do special handling of it.
5248          */
5249         if (!hci_sent_cmd_data(hdev, opcode)) {
5250                 /* Some CSR based controllers generate a spontaneous
5251                  * reset complete event during init and any pending
5252                  * command will never be completed. In such a case we
5253                  * need to resend whatever was the last sent
5254                  * command.
5255                  */
5256                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5257                         hci_resend_last(hdev);
5258
5259                 return;
5260         }
5261
5262         /* If the command succeeded and there's still more commands in
5263          * this request the request is not yet complete.
5264          */
5265         if (!status && !hci_req_is_complete(hdev))
5266                 return;
5267
5268         /* If this was the last command in a request the complete
5269          * callback would be found in hdev->sent_cmd instead of the
5270          * command queue (hdev->cmd_q).
5271          */
5272         if (hdev->sent_cmd) {
5273                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5274
5275                 if (req_complete) {
5276                         /* We must set the complete callback to NULL to
5277                          * avoid calling the callback more than once if
5278                          * this function gets called again.
5279                          */
5280                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
5281
5282                         goto call_complete;
5283                 }
5284         }
5285
5286         /* Remove all pending commands belonging to this request */
5287         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5288         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5289                 if (bt_cb(skb)->req.start) {
5290                         __skb_queue_head(&hdev->cmd_q, skb);
5291                         break;
5292                 }
5293
5294                 req_complete = bt_cb(skb)->req.complete;
5295                 kfree_skb(skb);
5296         }
5297         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5298
5299 call_complete:
5300         if (req_complete)
5301                 req_complete(hdev, status);
5302 }
5303
5304 static void hci_rx_work(struct work_struct *work)
5305 {
5306         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5307         struct sk_buff *skb;
5308
5309         BT_DBG("%s", hdev->name);
5310
5311         while ((skb = skb_dequeue(&hdev->rx_q))) {
5312                 /* Send copy to monitor */
5313                 hci_send_to_monitor(hdev, skb);
5314
5315                 if (atomic_read(&hdev->promisc)) {
5316                         /* Send copy to the sockets */
5317                         hci_send_to_sock(hdev, skb);
5318                 }
5319
5320                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5321                         kfree_skb(skb);
5322                         continue;
5323                 }
5324
5325                 if (test_bit(HCI_INIT, &hdev->flags)) {
5326                         /* Don't process data packets in this states. */
5327                         switch (bt_cb(skb)->pkt_type) {
5328                         case HCI_ACLDATA_PKT:
5329                         case HCI_SCODATA_PKT:
5330                                 kfree_skb(skb);
5331                                 continue;
5332                         }
5333                 }
5334
5335                 /* Process frame */
5336                 switch (bt_cb(skb)->pkt_type) {
5337                 case HCI_EVENT_PKT:
5338                         BT_DBG("%s Event packet", hdev->name);
5339                         hci_event_packet(hdev, skb);
5340                         break;
5341
5342                 case HCI_ACLDATA_PKT:
5343                         BT_DBG("%s ACL data packet", hdev->name);
5344                         hci_acldata_packet(hdev, skb);
5345                         break;
5346
5347                 case HCI_SCODATA_PKT:
5348                         BT_DBG("%s SCO data packet", hdev->name);
5349                         hci_scodata_packet(hdev, skb);
5350                         break;
5351
5352                 default:
5353                         kfree_skb(skb);
5354                         break;
5355                 }
5356         }
5357 }
5358
5359 static void hci_cmd_work(struct work_struct *work)
5360 {
5361         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5362         struct sk_buff *skb;
5363
5364         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5365                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5366
5367         /* Send queued commands */
5368         if (atomic_read(&hdev->cmd_cnt)) {
5369                 skb = skb_dequeue(&hdev->cmd_q);
5370                 if (!skb)
5371                         return;
5372
5373                 kfree_skb(hdev->sent_cmd);
5374
5375                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5376                 if (hdev->sent_cmd) {
5377                         atomic_dec(&hdev->cmd_cnt);
5378                         hci_send_frame(hdev, skb);
5379                         if (test_bit(HCI_RESET, &hdev->flags))
5380                                 cancel_delayed_work(&hdev->cmd_timer);
5381                         else
5382                                 schedule_delayed_work(&hdev->cmd_timer,
5383                                                       HCI_CMD_TIMEOUT);
5384                 } else {
5385                         skb_queue_head(&hdev->cmd_q, skb);
5386                         queue_work(hdev->workqueue, &hdev->cmd_work);
5387                 }
5388         }
5389 }
5390
5391 void hci_req_add_le_scan_disable(struct hci_request *req)
5392 {
5393         struct hci_cp_le_set_scan_enable cp;
5394
5395         memset(&cp, 0, sizeof(cp));
5396         cp.enable = LE_SCAN_DISABLE;
5397         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5398 }
5399
5400 void hci_req_add_le_passive_scan(struct hci_request *req)
5401 {
5402         struct hci_cp_le_set_scan_param param_cp;
5403         struct hci_cp_le_set_scan_enable enable_cp;
5404         struct hci_dev *hdev = req->hdev;
5405         u8 own_addr_type;
5406
5407         /* Set require_privacy to false since no SCAN_REQ are send
5408          * during passive scanning. Not using an unresolvable address
5409          * here is important so that peer devices using direct
5410          * advertising with our address will be correctly reported
5411          * by the controller.
5412          */
5413         if (hci_update_random_address(req, false, &own_addr_type))
5414                 return;
5415
5416         memset(&param_cp, 0, sizeof(param_cp));
5417         param_cp.type = LE_SCAN_PASSIVE;
5418         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5419         param_cp.window = cpu_to_le16(hdev->le_scan_window);
5420         param_cp.own_address_type = own_addr_type;
5421         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5422                     &param_cp);
5423
5424         memset(&enable_cp, 0, sizeof(enable_cp));
5425         enable_cp.enable = LE_SCAN_ENABLE;
5426         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5427         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5428                     &enable_cp);
5429 }
5430
5431 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5432 {
5433         if (status)
5434                 BT_DBG("HCI request failed to update background scanning: "
5435                        "status 0x%2.2x", status);
5436 }
5437
5438 /* This function controls the background scanning based on hdev->pend_le_conns
5439  * list. If there are pending LE connection we start the background scanning,
5440  * otherwise we stop it.
5441  *
5442  * This function requires the caller holds hdev->lock.
5443  */
5444 void hci_update_background_scan(struct hci_dev *hdev)
5445 {
5446         struct hci_request req;
5447         struct hci_conn *conn;
5448         int err;
5449
5450         if (!test_bit(HCI_UP, &hdev->flags) ||
5451             test_bit(HCI_INIT, &hdev->flags) ||
5452             test_bit(HCI_SETUP, &hdev->dev_flags) ||
5453             test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5454             test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5455             test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5456                 return;
5457
5458         /* No point in doing scanning if LE support hasn't been enabled */
5459         if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5460                 return;
5461
5462         /* If discovery is active don't interfere with it */
5463         if (hdev->discovery.state != DISCOVERY_STOPPED)
5464                 return;
5465
5466         hci_req_init(&req, hdev);
5467
5468         if (list_empty(&hdev->pend_le_conns) &&
5469             list_empty(&hdev->pend_le_reports)) {
5470                 /* If there is no pending LE connections or devices
5471                  * to be scanned for, we should stop the background
5472                  * scanning.
5473                  */
5474
5475                 /* If controller is not scanning we are done. */
5476                 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5477                         return;
5478
5479                 hci_req_add_le_scan_disable(&req);
5480
5481                 BT_DBG("%s stopping background scanning", hdev->name);
5482         } else {
5483                 /* If there is at least one pending LE connection, we should
5484                  * keep the background scan running.
5485                  */
5486
5487                 /* If controller is connecting, we should not start scanning
5488                  * since some controllers are not able to scan and connect at
5489                  * the same time.
5490                  */
5491                 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5492                 if (conn)
5493                         return;
5494
5495                 /* If controller is currently scanning, we stop it to ensure we
5496                  * don't miss any advertising (due to duplicates filter).
5497                  */
5498                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5499                         hci_req_add_le_scan_disable(&req);
5500
5501                 hci_req_add_le_passive_scan(&req);
5502
5503                 BT_DBG("%s starting background scanning", hdev->name);
5504         }
5505
5506         err = hci_req_run(&req, update_background_scan_complete);
5507         if (err)
5508                 BT_ERR("Failed to run HCI request: err %d", err);
5509 }