Merge tag 'metag-for-v3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/jhogan...
[cascardo/linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "smp.h"
41
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
45
46 /* HCI device list */
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
49
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
53
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
56
57 /* ----- HCI requests ----- */
58
59 #define HCI_REQ_DONE      0
60 #define HCI_REQ_PEND      1
61 #define HCI_REQ_CANCELED  2
62
63 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
64 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
65
66 /* ---- HCI notifications ---- */
67
68 static void hci_notify(struct hci_dev *hdev, int event)
69 {
70         hci_sock_dev_event(hdev, event);
71 }
72
73 /* ---- HCI debugfs entries ---- */
74
75 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76                              size_t count, loff_t *ppos)
77 {
78         struct hci_dev *hdev = file->private_data;
79         char buf[3];
80
81         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
82         buf[1] = '\n';
83         buf[2] = '\0';
84         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85 }
86
87 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88                               size_t count, loff_t *ppos)
89 {
90         struct hci_dev *hdev = file->private_data;
91         struct sk_buff *skb;
92         char buf[32];
93         size_t buf_size = min(count, (sizeof(buf)-1));
94         bool enable;
95         int err;
96
97         if (!test_bit(HCI_UP, &hdev->flags))
98                 return -ENETDOWN;
99
100         if (copy_from_user(buf, user_buf, buf_size))
101                 return -EFAULT;
102
103         buf[buf_size] = '\0';
104         if (strtobool(buf, &enable))
105                 return -EINVAL;
106
107         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
108                 return -EALREADY;
109
110         hci_req_lock(hdev);
111         if (enable)
112                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113                                      HCI_CMD_TIMEOUT);
114         else
115                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116                                      HCI_CMD_TIMEOUT);
117         hci_req_unlock(hdev);
118
119         if (IS_ERR(skb))
120                 return PTR_ERR(skb);
121
122         err = -bt_to_errno(skb->data[0]);
123         kfree_skb(skb);
124
125         if (err < 0)
126                 return err;
127
128         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
129
130         return count;
131 }
132
133 static const struct file_operations dut_mode_fops = {
134         .open           = simple_open,
135         .read           = dut_mode_read,
136         .write          = dut_mode_write,
137         .llseek         = default_llseek,
138 };
139
140 static int features_show(struct seq_file *f, void *ptr)
141 {
142         struct hci_dev *hdev = f->private;
143         u8 p;
144
145         hci_dev_lock(hdev);
146         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
147                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
148                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149                            hdev->features[p][0], hdev->features[p][1],
150                            hdev->features[p][2], hdev->features[p][3],
151                            hdev->features[p][4], hdev->features[p][5],
152                            hdev->features[p][6], hdev->features[p][7]);
153         }
154         if (lmp_le_capable(hdev))
155                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157                            hdev->le_features[0], hdev->le_features[1],
158                            hdev->le_features[2], hdev->le_features[3],
159                            hdev->le_features[4], hdev->le_features[5],
160                            hdev->le_features[6], hdev->le_features[7]);
161         hci_dev_unlock(hdev);
162
163         return 0;
164 }
165
166 static int features_open(struct inode *inode, struct file *file)
167 {
168         return single_open(file, features_show, inode->i_private);
169 }
170
171 static const struct file_operations features_fops = {
172         .open           = features_open,
173         .read           = seq_read,
174         .llseek         = seq_lseek,
175         .release        = single_release,
176 };
177
178 static int blacklist_show(struct seq_file *f, void *p)
179 {
180         struct hci_dev *hdev = f->private;
181         struct bdaddr_list *b;
182
183         hci_dev_lock(hdev);
184         list_for_each_entry(b, &hdev->blacklist, list)
185                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
186         hci_dev_unlock(hdev);
187
188         return 0;
189 }
190
191 static int blacklist_open(struct inode *inode, struct file *file)
192 {
193         return single_open(file, blacklist_show, inode->i_private);
194 }
195
196 static const struct file_operations blacklist_fops = {
197         .open           = blacklist_open,
198         .read           = seq_read,
199         .llseek         = seq_lseek,
200         .release        = single_release,
201 };
202
203 static int whitelist_show(struct seq_file *f, void *p)
204 {
205         struct hci_dev *hdev = f->private;
206         struct bdaddr_list *b;
207
208         hci_dev_lock(hdev);
209         list_for_each_entry(b, &hdev->whitelist, list)
210                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
211         hci_dev_unlock(hdev);
212
213         return 0;
214 }
215
216 static int whitelist_open(struct inode *inode, struct file *file)
217 {
218         return single_open(file, whitelist_show, inode->i_private);
219 }
220
221 static const struct file_operations whitelist_fops = {
222         .open           = whitelist_open,
223         .read           = seq_read,
224         .llseek         = seq_lseek,
225         .release        = single_release,
226 };
227
228 static int uuids_show(struct seq_file *f, void *p)
229 {
230         struct hci_dev *hdev = f->private;
231         struct bt_uuid *uuid;
232
233         hci_dev_lock(hdev);
234         list_for_each_entry(uuid, &hdev->uuids, list) {
235                 u8 i, val[16];
236
237                 /* The Bluetooth UUID values are stored in big endian,
238                  * but with reversed byte order. So convert them into
239                  * the right order for the %pUb modifier.
240                  */
241                 for (i = 0; i < 16; i++)
242                         val[i] = uuid->uuid[15 - i];
243
244                 seq_printf(f, "%pUb\n", val);
245         }
246         hci_dev_unlock(hdev);
247
248         return 0;
249 }
250
251 static int uuids_open(struct inode *inode, struct file *file)
252 {
253         return single_open(file, uuids_show, inode->i_private);
254 }
255
256 static const struct file_operations uuids_fops = {
257         .open           = uuids_open,
258         .read           = seq_read,
259         .llseek         = seq_lseek,
260         .release        = single_release,
261 };
262
263 static int inquiry_cache_show(struct seq_file *f, void *p)
264 {
265         struct hci_dev *hdev = f->private;
266         struct discovery_state *cache = &hdev->discovery;
267         struct inquiry_entry *e;
268
269         hci_dev_lock(hdev);
270
271         list_for_each_entry(e, &cache->all, all) {
272                 struct inquiry_data *data = &e->data;
273                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
274                            &data->bdaddr,
275                            data->pscan_rep_mode, data->pscan_period_mode,
276                            data->pscan_mode, data->dev_class[2],
277                            data->dev_class[1], data->dev_class[0],
278                            __le16_to_cpu(data->clock_offset),
279                            data->rssi, data->ssp_mode, e->timestamp);
280         }
281
282         hci_dev_unlock(hdev);
283
284         return 0;
285 }
286
287 static int inquiry_cache_open(struct inode *inode, struct file *file)
288 {
289         return single_open(file, inquiry_cache_show, inode->i_private);
290 }
291
292 static const struct file_operations inquiry_cache_fops = {
293         .open           = inquiry_cache_open,
294         .read           = seq_read,
295         .llseek         = seq_lseek,
296         .release        = single_release,
297 };
298
299 static int link_keys_show(struct seq_file *f, void *ptr)
300 {
301         struct hci_dev *hdev = f->private;
302         struct list_head *p, *n;
303
304         hci_dev_lock(hdev);
305         list_for_each_safe(p, n, &hdev->link_keys) {
306                 struct link_key *key = list_entry(p, struct link_key, list);
307                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
308                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
309         }
310         hci_dev_unlock(hdev);
311
312         return 0;
313 }
314
315 static int link_keys_open(struct inode *inode, struct file *file)
316 {
317         return single_open(file, link_keys_show, inode->i_private);
318 }
319
320 static const struct file_operations link_keys_fops = {
321         .open           = link_keys_open,
322         .read           = seq_read,
323         .llseek         = seq_lseek,
324         .release        = single_release,
325 };
326
327 static int dev_class_show(struct seq_file *f, void *ptr)
328 {
329         struct hci_dev *hdev = f->private;
330
331         hci_dev_lock(hdev);
332         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
333                    hdev->dev_class[1], hdev->dev_class[0]);
334         hci_dev_unlock(hdev);
335
336         return 0;
337 }
338
339 static int dev_class_open(struct inode *inode, struct file *file)
340 {
341         return single_open(file, dev_class_show, inode->i_private);
342 }
343
344 static const struct file_operations dev_class_fops = {
345         .open           = dev_class_open,
346         .read           = seq_read,
347         .llseek         = seq_lseek,
348         .release        = single_release,
349 };
350
351 static int voice_setting_get(void *data, u64 *val)
352 {
353         struct hci_dev *hdev = data;
354
355         hci_dev_lock(hdev);
356         *val = hdev->voice_setting;
357         hci_dev_unlock(hdev);
358
359         return 0;
360 }
361
362 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
363                         NULL, "0x%4.4llx\n");
364
365 static int auto_accept_delay_set(void *data, u64 val)
366 {
367         struct hci_dev *hdev = data;
368
369         hci_dev_lock(hdev);
370         hdev->auto_accept_delay = val;
371         hci_dev_unlock(hdev);
372
373         return 0;
374 }
375
376 static int auto_accept_delay_get(void *data, u64 *val)
377 {
378         struct hci_dev *hdev = data;
379
380         hci_dev_lock(hdev);
381         *val = hdev->auto_accept_delay;
382         hci_dev_unlock(hdev);
383
384         return 0;
385 }
386
387 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
388                         auto_accept_delay_set, "%llu\n");
389
390 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
391                                      size_t count, loff_t *ppos)
392 {
393         struct hci_dev *hdev = file->private_data;
394         char buf[3];
395
396         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
397         buf[1] = '\n';
398         buf[2] = '\0';
399         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
400 }
401
402 static ssize_t force_sc_support_write(struct file *file,
403                                       const char __user *user_buf,
404                                       size_t count, loff_t *ppos)
405 {
406         struct hci_dev *hdev = file->private_data;
407         char buf[32];
408         size_t buf_size = min(count, (sizeof(buf)-1));
409         bool enable;
410
411         if (test_bit(HCI_UP, &hdev->flags))
412                 return -EBUSY;
413
414         if (copy_from_user(buf, user_buf, buf_size))
415                 return -EFAULT;
416
417         buf[buf_size] = '\0';
418         if (strtobool(buf, &enable))
419                 return -EINVAL;
420
421         if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
422                 return -EALREADY;
423
424         change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
425
426         return count;
427 }
428
429 static const struct file_operations force_sc_support_fops = {
430         .open           = simple_open,
431         .read           = force_sc_support_read,
432         .write          = force_sc_support_write,
433         .llseek         = default_llseek,
434 };
435
436 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
437                                  size_t count, loff_t *ppos)
438 {
439         struct hci_dev *hdev = file->private_data;
440         char buf[3];
441
442         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
443         buf[1] = '\n';
444         buf[2] = '\0';
445         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
446 }
447
448 static const struct file_operations sc_only_mode_fops = {
449         .open           = simple_open,
450         .read           = sc_only_mode_read,
451         .llseek         = default_llseek,
452 };
453
454 static int idle_timeout_set(void *data, u64 val)
455 {
456         struct hci_dev *hdev = data;
457
458         if (val != 0 && (val < 500 || val > 3600000))
459                 return -EINVAL;
460
461         hci_dev_lock(hdev);
462         hdev->idle_timeout = val;
463         hci_dev_unlock(hdev);
464
465         return 0;
466 }
467
468 static int idle_timeout_get(void *data, u64 *val)
469 {
470         struct hci_dev *hdev = data;
471
472         hci_dev_lock(hdev);
473         *val = hdev->idle_timeout;
474         hci_dev_unlock(hdev);
475
476         return 0;
477 }
478
479 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
480                         idle_timeout_set, "%llu\n");
481
482 static int rpa_timeout_set(void *data, u64 val)
483 {
484         struct hci_dev *hdev = data;
485
486         /* Require the RPA timeout to be at least 30 seconds and at most
487          * 24 hours.
488          */
489         if (val < 30 || val > (60 * 60 * 24))
490                 return -EINVAL;
491
492         hci_dev_lock(hdev);
493         hdev->rpa_timeout = val;
494         hci_dev_unlock(hdev);
495
496         return 0;
497 }
498
499 static int rpa_timeout_get(void *data, u64 *val)
500 {
501         struct hci_dev *hdev = data;
502
503         hci_dev_lock(hdev);
504         *val = hdev->rpa_timeout;
505         hci_dev_unlock(hdev);
506
507         return 0;
508 }
509
510 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
511                         rpa_timeout_set, "%llu\n");
512
513 static int sniff_min_interval_set(void *data, u64 val)
514 {
515         struct hci_dev *hdev = data;
516
517         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
518                 return -EINVAL;
519
520         hci_dev_lock(hdev);
521         hdev->sniff_min_interval = val;
522         hci_dev_unlock(hdev);
523
524         return 0;
525 }
526
527 static int sniff_min_interval_get(void *data, u64 *val)
528 {
529         struct hci_dev *hdev = data;
530
531         hci_dev_lock(hdev);
532         *val = hdev->sniff_min_interval;
533         hci_dev_unlock(hdev);
534
535         return 0;
536 }
537
538 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
539                         sniff_min_interval_set, "%llu\n");
540
541 static int sniff_max_interval_set(void *data, u64 val)
542 {
543         struct hci_dev *hdev = data;
544
545         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
546                 return -EINVAL;
547
548         hci_dev_lock(hdev);
549         hdev->sniff_max_interval = val;
550         hci_dev_unlock(hdev);
551
552         return 0;
553 }
554
555 static int sniff_max_interval_get(void *data, u64 *val)
556 {
557         struct hci_dev *hdev = data;
558
559         hci_dev_lock(hdev);
560         *val = hdev->sniff_max_interval;
561         hci_dev_unlock(hdev);
562
563         return 0;
564 }
565
566 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
567                         sniff_max_interval_set, "%llu\n");
568
569 static int conn_info_min_age_set(void *data, u64 val)
570 {
571         struct hci_dev *hdev = data;
572
573         if (val == 0 || val > hdev->conn_info_max_age)
574                 return -EINVAL;
575
576         hci_dev_lock(hdev);
577         hdev->conn_info_min_age = val;
578         hci_dev_unlock(hdev);
579
580         return 0;
581 }
582
583 static int conn_info_min_age_get(void *data, u64 *val)
584 {
585         struct hci_dev *hdev = data;
586
587         hci_dev_lock(hdev);
588         *val = hdev->conn_info_min_age;
589         hci_dev_unlock(hdev);
590
591         return 0;
592 }
593
594 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
595                         conn_info_min_age_set, "%llu\n");
596
597 static int conn_info_max_age_set(void *data, u64 val)
598 {
599         struct hci_dev *hdev = data;
600
601         if (val == 0 || val < hdev->conn_info_min_age)
602                 return -EINVAL;
603
604         hci_dev_lock(hdev);
605         hdev->conn_info_max_age = val;
606         hci_dev_unlock(hdev);
607
608         return 0;
609 }
610
611 static int conn_info_max_age_get(void *data, u64 *val)
612 {
613         struct hci_dev *hdev = data;
614
615         hci_dev_lock(hdev);
616         *val = hdev->conn_info_max_age;
617         hci_dev_unlock(hdev);
618
619         return 0;
620 }
621
622 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
623                         conn_info_max_age_set, "%llu\n");
624
625 static int identity_show(struct seq_file *f, void *p)
626 {
627         struct hci_dev *hdev = f->private;
628         bdaddr_t addr;
629         u8 addr_type;
630
631         hci_dev_lock(hdev);
632
633         hci_copy_identity_address(hdev, &addr, &addr_type);
634
635         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
636                    16, hdev->irk, &hdev->rpa);
637
638         hci_dev_unlock(hdev);
639
640         return 0;
641 }
642
643 static int identity_open(struct inode *inode, struct file *file)
644 {
645         return single_open(file, identity_show, inode->i_private);
646 }
647
648 static const struct file_operations identity_fops = {
649         .open           = identity_open,
650         .read           = seq_read,
651         .llseek         = seq_lseek,
652         .release        = single_release,
653 };
654
655 static int random_address_show(struct seq_file *f, void *p)
656 {
657         struct hci_dev *hdev = f->private;
658
659         hci_dev_lock(hdev);
660         seq_printf(f, "%pMR\n", &hdev->random_addr);
661         hci_dev_unlock(hdev);
662
663         return 0;
664 }
665
666 static int random_address_open(struct inode *inode, struct file *file)
667 {
668         return single_open(file, random_address_show, inode->i_private);
669 }
670
671 static const struct file_operations random_address_fops = {
672         .open           = random_address_open,
673         .read           = seq_read,
674         .llseek         = seq_lseek,
675         .release        = single_release,
676 };
677
678 static int static_address_show(struct seq_file *f, void *p)
679 {
680         struct hci_dev *hdev = f->private;
681
682         hci_dev_lock(hdev);
683         seq_printf(f, "%pMR\n", &hdev->static_addr);
684         hci_dev_unlock(hdev);
685
686         return 0;
687 }
688
689 static int static_address_open(struct inode *inode, struct file *file)
690 {
691         return single_open(file, static_address_show, inode->i_private);
692 }
693
694 static const struct file_operations static_address_fops = {
695         .open           = static_address_open,
696         .read           = seq_read,
697         .llseek         = seq_lseek,
698         .release        = single_release,
699 };
700
701 static ssize_t force_static_address_read(struct file *file,
702                                          char __user *user_buf,
703                                          size_t count, loff_t *ppos)
704 {
705         struct hci_dev *hdev = file->private_data;
706         char buf[3];
707
708         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
709         buf[1] = '\n';
710         buf[2] = '\0';
711         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
712 }
713
714 static ssize_t force_static_address_write(struct file *file,
715                                           const char __user *user_buf,
716                                           size_t count, loff_t *ppos)
717 {
718         struct hci_dev *hdev = file->private_data;
719         char buf[32];
720         size_t buf_size = min(count, (sizeof(buf)-1));
721         bool enable;
722
723         if (test_bit(HCI_UP, &hdev->flags))
724                 return -EBUSY;
725
726         if (copy_from_user(buf, user_buf, buf_size))
727                 return -EFAULT;
728
729         buf[buf_size] = '\0';
730         if (strtobool(buf, &enable))
731                 return -EINVAL;
732
733         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
734                 return -EALREADY;
735
736         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
737
738         return count;
739 }
740
741 static const struct file_operations force_static_address_fops = {
742         .open           = simple_open,
743         .read           = force_static_address_read,
744         .write          = force_static_address_write,
745         .llseek         = default_llseek,
746 };
747
748 static int white_list_show(struct seq_file *f, void *ptr)
749 {
750         struct hci_dev *hdev = f->private;
751         struct bdaddr_list *b;
752
753         hci_dev_lock(hdev);
754         list_for_each_entry(b, &hdev->le_white_list, list)
755                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
756         hci_dev_unlock(hdev);
757
758         return 0;
759 }
760
761 static int white_list_open(struct inode *inode, struct file *file)
762 {
763         return single_open(file, white_list_show, inode->i_private);
764 }
765
766 static const struct file_operations white_list_fops = {
767         .open           = white_list_open,
768         .read           = seq_read,
769         .llseek         = seq_lseek,
770         .release        = single_release,
771 };
772
773 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
774 {
775         struct hci_dev *hdev = f->private;
776         struct list_head *p, *n;
777
778         hci_dev_lock(hdev);
779         list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
780                 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
781                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
782                            &irk->bdaddr, irk->addr_type,
783                            16, irk->val, &irk->rpa);
784         }
785         hci_dev_unlock(hdev);
786
787         return 0;
788 }
789
790 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
791 {
792         return single_open(file, identity_resolving_keys_show,
793                            inode->i_private);
794 }
795
796 static const struct file_operations identity_resolving_keys_fops = {
797         .open           = identity_resolving_keys_open,
798         .read           = seq_read,
799         .llseek         = seq_lseek,
800         .release        = single_release,
801 };
802
803 static int long_term_keys_show(struct seq_file *f, void *ptr)
804 {
805         struct hci_dev *hdev = f->private;
806         struct list_head *p, *n;
807
808         hci_dev_lock(hdev);
809         list_for_each_safe(p, n, &hdev->long_term_keys) {
810                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
811                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
812                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
813                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
814                            __le64_to_cpu(ltk->rand), 16, ltk->val);
815         }
816         hci_dev_unlock(hdev);
817
818         return 0;
819 }
820
821 static int long_term_keys_open(struct inode *inode, struct file *file)
822 {
823         return single_open(file, long_term_keys_show, inode->i_private);
824 }
825
826 static const struct file_operations long_term_keys_fops = {
827         .open           = long_term_keys_open,
828         .read           = seq_read,
829         .llseek         = seq_lseek,
830         .release        = single_release,
831 };
832
833 static int conn_min_interval_set(void *data, u64 val)
834 {
835         struct hci_dev *hdev = data;
836
837         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
838                 return -EINVAL;
839
840         hci_dev_lock(hdev);
841         hdev->le_conn_min_interval = val;
842         hci_dev_unlock(hdev);
843
844         return 0;
845 }
846
847 static int conn_min_interval_get(void *data, u64 *val)
848 {
849         struct hci_dev *hdev = data;
850
851         hci_dev_lock(hdev);
852         *val = hdev->le_conn_min_interval;
853         hci_dev_unlock(hdev);
854
855         return 0;
856 }
857
858 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
859                         conn_min_interval_set, "%llu\n");
860
861 static int conn_max_interval_set(void *data, u64 val)
862 {
863         struct hci_dev *hdev = data;
864
865         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
866                 return -EINVAL;
867
868         hci_dev_lock(hdev);
869         hdev->le_conn_max_interval = val;
870         hci_dev_unlock(hdev);
871
872         return 0;
873 }
874
875 static int conn_max_interval_get(void *data, u64 *val)
876 {
877         struct hci_dev *hdev = data;
878
879         hci_dev_lock(hdev);
880         *val = hdev->le_conn_max_interval;
881         hci_dev_unlock(hdev);
882
883         return 0;
884 }
885
886 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
887                         conn_max_interval_set, "%llu\n");
888
889 static int conn_latency_set(void *data, u64 val)
890 {
891         struct hci_dev *hdev = data;
892
893         if (val > 0x01f3)
894                 return -EINVAL;
895
896         hci_dev_lock(hdev);
897         hdev->le_conn_latency = val;
898         hci_dev_unlock(hdev);
899
900         return 0;
901 }
902
903 static int conn_latency_get(void *data, u64 *val)
904 {
905         struct hci_dev *hdev = data;
906
907         hci_dev_lock(hdev);
908         *val = hdev->le_conn_latency;
909         hci_dev_unlock(hdev);
910
911         return 0;
912 }
913
914 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
915                         conn_latency_set, "%llu\n");
916
917 static int supervision_timeout_set(void *data, u64 val)
918 {
919         struct hci_dev *hdev = data;
920
921         if (val < 0x000a || val > 0x0c80)
922                 return -EINVAL;
923
924         hci_dev_lock(hdev);
925         hdev->le_supv_timeout = val;
926         hci_dev_unlock(hdev);
927
928         return 0;
929 }
930
931 static int supervision_timeout_get(void *data, u64 *val)
932 {
933         struct hci_dev *hdev = data;
934
935         hci_dev_lock(hdev);
936         *val = hdev->le_supv_timeout;
937         hci_dev_unlock(hdev);
938
939         return 0;
940 }
941
942 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
943                         supervision_timeout_set, "%llu\n");
944
945 static int adv_channel_map_set(void *data, u64 val)
946 {
947         struct hci_dev *hdev = data;
948
949         if (val < 0x01 || val > 0x07)
950                 return -EINVAL;
951
952         hci_dev_lock(hdev);
953         hdev->le_adv_channel_map = val;
954         hci_dev_unlock(hdev);
955
956         return 0;
957 }
958
959 static int adv_channel_map_get(void *data, u64 *val)
960 {
961         struct hci_dev *hdev = data;
962
963         hci_dev_lock(hdev);
964         *val = hdev->le_adv_channel_map;
965         hci_dev_unlock(hdev);
966
967         return 0;
968 }
969
970 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
971                         adv_channel_map_set, "%llu\n");
972
973 static int adv_min_interval_set(void *data, u64 val)
974 {
975         struct hci_dev *hdev = data;
976
977         if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
978                 return -EINVAL;
979
980         hci_dev_lock(hdev);
981         hdev->le_adv_min_interval = val;
982         hci_dev_unlock(hdev);
983
984         return 0;
985 }
986
987 static int adv_min_interval_get(void *data, u64 *val)
988 {
989         struct hci_dev *hdev = data;
990
991         hci_dev_lock(hdev);
992         *val = hdev->le_adv_min_interval;
993         hci_dev_unlock(hdev);
994
995         return 0;
996 }
997
998 DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
999                         adv_min_interval_set, "%llu\n");
1000
1001 static int adv_max_interval_set(void *data, u64 val)
1002 {
1003         struct hci_dev *hdev = data;
1004
1005         if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
1006                 return -EINVAL;
1007
1008         hci_dev_lock(hdev);
1009         hdev->le_adv_max_interval = val;
1010         hci_dev_unlock(hdev);
1011
1012         return 0;
1013 }
1014
1015 static int adv_max_interval_get(void *data, u64 *val)
1016 {
1017         struct hci_dev *hdev = data;
1018
1019         hci_dev_lock(hdev);
1020         *val = hdev->le_adv_max_interval;
1021         hci_dev_unlock(hdev);
1022
1023         return 0;
1024 }
1025
1026 DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1027                         adv_max_interval_set, "%llu\n");
1028
1029 static int device_list_show(struct seq_file *f, void *ptr)
1030 {
1031         struct hci_dev *hdev = f->private;
1032         struct hci_conn_params *p;
1033
1034         hci_dev_lock(hdev);
1035         list_for_each_entry(p, &hdev->le_conn_params, list) {
1036                 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
1037                            p->auto_connect);
1038         }
1039         hci_dev_unlock(hdev);
1040
1041         return 0;
1042 }
1043
1044 static int device_list_open(struct inode *inode, struct file *file)
1045 {
1046         return single_open(file, device_list_show, inode->i_private);
1047 }
1048
1049 static const struct file_operations device_list_fops = {
1050         .open           = device_list_open,
1051         .read           = seq_read,
1052         .llseek         = seq_lseek,
1053         .release        = single_release,
1054 };
1055
1056 /* ---- HCI requests ---- */
1057
1058 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1059 {
1060         BT_DBG("%s result 0x%2.2x", hdev->name, result);
1061
1062         if (hdev->req_status == HCI_REQ_PEND) {
1063                 hdev->req_result = result;
1064                 hdev->req_status = HCI_REQ_DONE;
1065                 wake_up_interruptible(&hdev->req_wait_q);
1066         }
1067 }
1068
1069 static void hci_req_cancel(struct hci_dev *hdev, int err)
1070 {
1071         BT_DBG("%s err 0x%2.2x", hdev->name, err);
1072
1073         if (hdev->req_status == HCI_REQ_PEND) {
1074                 hdev->req_result = err;
1075                 hdev->req_status = HCI_REQ_CANCELED;
1076                 wake_up_interruptible(&hdev->req_wait_q);
1077         }
1078 }
1079
1080 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1081                                             u8 event)
1082 {
1083         struct hci_ev_cmd_complete *ev;
1084         struct hci_event_hdr *hdr;
1085         struct sk_buff *skb;
1086
1087         hci_dev_lock(hdev);
1088
1089         skb = hdev->recv_evt;
1090         hdev->recv_evt = NULL;
1091
1092         hci_dev_unlock(hdev);
1093
1094         if (!skb)
1095                 return ERR_PTR(-ENODATA);
1096
1097         if (skb->len < sizeof(*hdr)) {
1098                 BT_ERR("Too short HCI event");
1099                 goto failed;
1100         }
1101
1102         hdr = (void *) skb->data;
1103         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1104
1105         if (event) {
1106                 if (hdr->evt != event)
1107                         goto failed;
1108                 return skb;
1109         }
1110
1111         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1112                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1113                 goto failed;
1114         }
1115
1116         if (skb->len < sizeof(*ev)) {
1117                 BT_ERR("Too short cmd_complete event");
1118                 goto failed;
1119         }
1120
1121         ev = (void *) skb->data;
1122         skb_pull(skb, sizeof(*ev));
1123
1124         if (opcode == __le16_to_cpu(ev->opcode))
1125                 return skb;
1126
1127         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1128                __le16_to_cpu(ev->opcode));
1129
1130 failed:
1131         kfree_skb(skb);
1132         return ERR_PTR(-ENODATA);
1133 }
1134
1135 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1136                                   const void *param, u8 event, u32 timeout)
1137 {
1138         DECLARE_WAITQUEUE(wait, current);
1139         struct hci_request req;
1140         int err = 0;
1141
1142         BT_DBG("%s", hdev->name);
1143
1144         hci_req_init(&req, hdev);
1145
1146         hci_req_add_ev(&req, opcode, plen, param, event);
1147
1148         hdev->req_status = HCI_REQ_PEND;
1149
1150         err = hci_req_run(&req, hci_req_sync_complete);
1151         if (err < 0)
1152                 return ERR_PTR(err);
1153
1154         add_wait_queue(&hdev->req_wait_q, &wait);
1155         set_current_state(TASK_INTERRUPTIBLE);
1156
1157         schedule_timeout(timeout);
1158
1159         remove_wait_queue(&hdev->req_wait_q, &wait);
1160
1161         if (signal_pending(current))
1162                 return ERR_PTR(-EINTR);
1163
1164         switch (hdev->req_status) {
1165         case HCI_REQ_DONE:
1166                 err = -bt_to_errno(hdev->req_result);
1167                 break;
1168
1169         case HCI_REQ_CANCELED:
1170                 err = -hdev->req_result;
1171                 break;
1172
1173         default:
1174                 err = -ETIMEDOUT;
1175                 break;
1176         }
1177
1178         hdev->req_status = hdev->req_result = 0;
1179
1180         BT_DBG("%s end: err %d", hdev->name, err);
1181
1182         if (err < 0)
1183                 return ERR_PTR(err);
1184
1185         return hci_get_cmd_complete(hdev, opcode, event);
1186 }
1187 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1188
1189 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1190                                const void *param, u32 timeout)
1191 {
1192         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1193 }
1194 EXPORT_SYMBOL(__hci_cmd_sync);
1195
1196 /* Execute request and wait for completion. */
1197 static int __hci_req_sync(struct hci_dev *hdev,
1198                           void (*func)(struct hci_request *req,
1199                                       unsigned long opt),
1200                           unsigned long opt, __u32 timeout)
1201 {
1202         struct hci_request req;
1203         DECLARE_WAITQUEUE(wait, current);
1204         int err = 0;
1205
1206         BT_DBG("%s start", hdev->name);
1207
1208         hci_req_init(&req, hdev);
1209
1210         hdev->req_status = HCI_REQ_PEND;
1211
1212         func(&req, opt);
1213
1214         err = hci_req_run(&req, hci_req_sync_complete);
1215         if (err < 0) {
1216                 hdev->req_status = 0;
1217
1218                 /* ENODATA means the HCI request command queue is empty.
1219                  * This can happen when a request with conditionals doesn't
1220                  * trigger any commands to be sent. This is normal behavior
1221                  * and should not trigger an error return.
1222                  */
1223                 if (err == -ENODATA)
1224                         return 0;
1225
1226                 return err;
1227         }
1228
1229         add_wait_queue(&hdev->req_wait_q, &wait);
1230         set_current_state(TASK_INTERRUPTIBLE);
1231
1232         schedule_timeout(timeout);
1233
1234         remove_wait_queue(&hdev->req_wait_q, &wait);
1235
1236         if (signal_pending(current))
1237                 return -EINTR;
1238
1239         switch (hdev->req_status) {
1240         case HCI_REQ_DONE:
1241                 err = -bt_to_errno(hdev->req_result);
1242                 break;
1243
1244         case HCI_REQ_CANCELED:
1245                 err = -hdev->req_result;
1246                 break;
1247
1248         default:
1249                 err = -ETIMEDOUT;
1250                 break;
1251         }
1252
1253         hdev->req_status = hdev->req_result = 0;
1254
1255         BT_DBG("%s end: err %d", hdev->name, err);
1256
1257         return err;
1258 }
1259
1260 static int hci_req_sync(struct hci_dev *hdev,
1261                         void (*req)(struct hci_request *req,
1262                                     unsigned long opt),
1263                         unsigned long opt, __u32 timeout)
1264 {
1265         int ret;
1266
1267         if (!test_bit(HCI_UP, &hdev->flags))
1268                 return -ENETDOWN;
1269
1270         /* Serialize all requests */
1271         hci_req_lock(hdev);
1272         ret = __hci_req_sync(hdev, req, opt, timeout);
1273         hci_req_unlock(hdev);
1274
1275         return ret;
1276 }
1277
1278 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1279 {
1280         BT_DBG("%s %ld", req->hdev->name, opt);
1281
1282         /* Reset device */
1283         set_bit(HCI_RESET, &req->hdev->flags);
1284         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1285 }
1286
1287 static void bredr_init(struct hci_request *req)
1288 {
1289         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1290
1291         /* Read Local Supported Features */
1292         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1293
1294         /* Read Local Version */
1295         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1296
1297         /* Read BD Address */
1298         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1299 }
1300
1301 static void amp_init(struct hci_request *req)
1302 {
1303         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1304
1305         /* Read Local Version */
1306         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1307
1308         /* Read Local Supported Commands */
1309         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1310
1311         /* Read Local Supported Features */
1312         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1313
1314         /* Read Local AMP Info */
1315         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1316
1317         /* Read Data Blk size */
1318         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1319
1320         /* Read Flow Control Mode */
1321         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1322
1323         /* Read Location Data */
1324         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1325 }
1326
1327 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1328 {
1329         struct hci_dev *hdev = req->hdev;
1330
1331         BT_DBG("%s %ld", hdev->name, opt);
1332
1333         /* Reset */
1334         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1335                 hci_reset_req(req, 0);
1336
1337         switch (hdev->dev_type) {
1338         case HCI_BREDR:
1339                 bredr_init(req);
1340                 break;
1341
1342         case HCI_AMP:
1343                 amp_init(req);
1344                 break;
1345
1346         default:
1347                 BT_ERR("Unknown device type %d", hdev->dev_type);
1348                 break;
1349         }
1350 }
1351
1352 static void bredr_setup(struct hci_request *req)
1353 {
1354         struct hci_dev *hdev = req->hdev;
1355
1356         __le16 param;
1357         __u8 flt_type;
1358
1359         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1360         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1361
1362         /* Read Class of Device */
1363         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1364
1365         /* Read Local Name */
1366         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1367
1368         /* Read Voice Setting */
1369         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1370
1371         /* Read Number of Supported IAC */
1372         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1373
1374         /* Read Current IAC LAP */
1375         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1376
1377         /* Clear Event Filters */
1378         flt_type = HCI_FLT_CLEAR_ALL;
1379         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1380
1381         /* Connection accept timeout ~20 secs */
1382         param = cpu_to_le16(0x7d00);
1383         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1384
1385         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1386          * but it does not support page scan related HCI commands.
1387          */
1388         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1389                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1390                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1391         }
1392 }
1393
1394 static void le_setup(struct hci_request *req)
1395 {
1396         struct hci_dev *hdev = req->hdev;
1397
1398         /* Read LE Buffer Size */
1399         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1400
1401         /* Read LE Local Supported Features */
1402         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1403
1404         /* Read LE Supported States */
1405         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1406
1407         /* Read LE White List Size */
1408         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1409
1410         /* Clear LE White List */
1411         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1412
1413         /* LE-only controllers have LE implicitly enabled */
1414         if (!lmp_bredr_capable(hdev))
1415                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1416 }
1417
1418 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1419 {
1420         if (lmp_ext_inq_capable(hdev))
1421                 return 0x02;
1422
1423         if (lmp_inq_rssi_capable(hdev))
1424                 return 0x01;
1425
1426         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1427             hdev->lmp_subver == 0x0757)
1428                 return 0x01;
1429
1430         if (hdev->manufacturer == 15) {
1431                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1432                         return 0x01;
1433                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1434                         return 0x01;
1435                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1436                         return 0x01;
1437         }
1438
1439         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1440             hdev->lmp_subver == 0x1805)
1441                 return 0x01;
1442
1443         return 0x00;
1444 }
1445
1446 static void hci_setup_inquiry_mode(struct hci_request *req)
1447 {
1448         u8 mode;
1449
1450         mode = hci_get_inquiry_mode(req->hdev);
1451
1452         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1453 }
1454
1455 static void hci_setup_event_mask(struct hci_request *req)
1456 {
1457         struct hci_dev *hdev = req->hdev;
1458
1459         /* The second byte is 0xff instead of 0x9f (two reserved bits
1460          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1461          * command otherwise.
1462          */
1463         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1464
1465         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1466          * any event mask for pre 1.2 devices.
1467          */
1468         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1469                 return;
1470
1471         if (lmp_bredr_capable(hdev)) {
1472                 events[4] |= 0x01; /* Flow Specification Complete */
1473                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1474                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1475                 events[5] |= 0x08; /* Synchronous Connection Complete */
1476                 events[5] |= 0x10; /* Synchronous Connection Changed */
1477         } else {
1478                 /* Use a different default for LE-only devices */
1479                 memset(events, 0, sizeof(events));
1480                 events[0] |= 0x10; /* Disconnection Complete */
1481                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1482                 events[1] |= 0x20; /* Command Complete */
1483                 events[1] |= 0x40; /* Command Status */
1484                 events[1] |= 0x80; /* Hardware Error */
1485                 events[2] |= 0x04; /* Number of Completed Packets */
1486                 events[3] |= 0x02; /* Data Buffer Overflow */
1487
1488                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1489                         events[0] |= 0x80; /* Encryption Change */
1490                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
1491                 }
1492         }
1493
1494         if (lmp_inq_rssi_capable(hdev))
1495                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1496
1497         if (lmp_sniffsubr_capable(hdev))
1498                 events[5] |= 0x20; /* Sniff Subrating */
1499
1500         if (lmp_pause_enc_capable(hdev))
1501                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1502
1503         if (lmp_ext_inq_capable(hdev))
1504                 events[5] |= 0x40; /* Extended Inquiry Result */
1505
1506         if (lmp_no_flush_capable(hdev))
1507                 events[7] |= 0x01; /* Enhanced Flush Complete */
1508
1509         if (lmp_lsto_capable(hdev))
1510                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1511
1512         if (lmp_ssp_capable(hdev)) {
1513                 events[6] |= 0x01;      /* IO Capability Request */
1514                 events[6] |= 0x02;      /* IO Capability Response */
1515                 events[6] |= 0x04;      /* User Confirmation Request */
1516                 events[6] |= 0x08;      /* User Passkey Request */
1517                 events[6] |= 0x10;      /* Remote OOB Data Request */
1518                 events[6] |= 0x20;      /* Simple Pairing Complete */
1519                 events[7] |= 0x04;      /* User Passkey Notification */
1520                 events[7] |= 0x08;      /* Keypress Notification */
1521                 events[7] |= 0x10;      /* Remote Host Supported
1522                                          * Features Notification
1523                                          */
1524         }
1525
1526         if (lmp_le_capable(hdev))
1527                 events[7] |= 0x20;      /* LE Meta-Event */
1528
1529         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1530 }
1531
1532 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1533 {
1534         struct hci_dev *hdev = req->hdev;
1535
1536         if (lmp_bredr_capable(hdev))
1537                 bredr_setup(req);
1538         else
1539                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1540
1541         if (lmp_le_capable(hdev))
1542                 le_setup(req);
1543
1544         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1545          * local supported commands HCI command.
1546          */
1547         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1548                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1549
1550         if (lmp_ssp_capable(hdev)) {
1551                 /* When SSP is available, then the host features page
1552                  * should also be available as well. However some
1553                  * controllers list the max_page as 0 as long as SSP
1554                  * has not been enabled. To achieve proper debugging
1555                  * output, force the minimum max_page to 1 at least.
1556                  */
1557                 hdev->max_page = 0x01;
1558
1559                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1560                         u8 mode = 0x01;
1561                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1562                                     sizeof(mode), &mode);
1563                 } else {
1564                         struct hci_cp_write_eir cp;
1565
1566                         memset(hdev->eir, 0, sizeof(hdev->eir));
1567                         memset(&cp, 0, sizeof(cp));
1568
1569                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1570                 }
1571         }
1572
1573         if (lmp_inq_rssi_capable(hdev))
1574                 hci_setup_inquiry_mode(req);
1575
1576         if (lmp_inq_tx_pwr_capable(hdev))
1577                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1578
1579         if (lmp_ext_feat_capable(hdev)) {
1580                 struct hci_cp_read_local_ext_features cp;
1581
1582                 cp.page = 0x01;
1583                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1584                             sizeof(cp), &cp);
1585         }
1586
1587         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1588                 u8 enable = 1;
1589                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1590                             &enable);
1591         }
1592 }
1593
1594 static void hci_setup_link_policy(struct hci_request *req)
1595 {
1596         struct hci_dev *hdev = req->hdev;
1597         struct hci_cp_write_def_link_policy cp;
1598         u16 link_policy = 0;
1599
1600         if (lmp_rswitch_capable(hdev))
1601                 link_policy |= HCI_LP_RSWITCH;
1602         if (lmp_hold_capable(hdev))
1603                 link_policy |= HCI_LP_HOLD;
1604         if (lmp_sniff_capable(hdev))
1605                 link_policy |= HCI_LP_SNIFF;
1606         if (lmp_park_capable(hdev))
1607                 link_policy |= HCI_LP_PARK;
1608
1609         cp.policy = cpu_to_le16(link_policy);
1610         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1611 }
1612
1613 static void hci_set_le_support(struct hci_request *req)
1614 {
1615         struct hci_dev *hdev = req->hdev;
1616         struct hci_cp_write_le_host_supported cp;
1617
1618         /* LE-only devices do not support explicit enablement */
1619         if (!lmp_bredr_capable(hdev))
1620                 return;
1621
1622         memset(&cp, 0, sizeof(cp));
1623
1624         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1625                 cp.le = 0x01;
1626                 cp.simul = 0x00;
1627         }
1628
1629         if (cp.le != lmp_host_le_capable(hdev))
1630                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1631                             &cp);
1632 }
1633
1634 static void hci_set_event_mask_page_2(struct hci_request *req)
1635 {
1636         struct hci_dev *hdev = req->hdev;
1637         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1638
1639         /* If Connectionless Slave Broadcast master role is supported
1640          * enable all necessary events for it.
1641          */
1642         if (lmp_csb_master_capable(hdev)) {
1643                 events[1] |= 0x40;      /* Triggered Clock Capture */
1644                 events[1] |= 0x80;      /* Synchronization Train Complete */
1645                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1646                 events[2] |= 0x20;      /* CSB Channel Map Change */
1647         }
1648
1649         /* If Connectionless Slave Broadcast slave role is supported
1650          * enable all necessary events for it.
1651          */
1652         if (lmp_csb_slave_capable(hdev)) {
1653                 events[2] |= 0x01;      /* Synchronization Train Received */
1654                 events[2] |= 0x02;      /* CSB Receive */
1655                 events[2] |= 0x04;      /* CSB Timeout */
1656                 events[2] |= 0x08;      /* Truncated Page Complete */
1657         }
1658
1659         /* Enable Authenticated Payload Timeout Expired event if supported */
1660         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1661                 events[2] |= 0x80;
1662
1663         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1664 }
1665
1666 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1667 {
1668         struct hci_dev *hdev = req->hdev;
1669         u8 p;
1670
1671         hci_setup_event_mask(req);
1672
1673         /* Some Broadcom based Bluetooth controllers do not support the
1674          * Delete Stored Link Key command. They are clearly indicating its
1675          * absence in the bit mask of supported commands.
1676          *
1677          * Check the supported commands and only if the the command is marked
1678          * as supported send it. If not supported assume that the controller
1679          * does not have actual support for stored link keys which makes this
1680          * command redundant anyway.
1681          *
1682          * Some controllers indicate that they support handling deleting
1683          * stored link keys, but they don't. The quirk lets a driver
1684          * just disable this command.
1685          */
1686         if (hdev->commands[6] & 0x80 &&
1687             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1688                 struct hci_cp_delete_stored_link_key cp;
1689
1690                 bacpy(&cp.bdaddr, BDADDR_ANY);
1691                 cp.delete_all = 0x01;
1692                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1693                             sizeof(cp), &cp);
1694         }
1695
1696         if (hdev->commands[5] & 0x10)
1697                 hci_setup_link_policy(req);
1698
1699         if (lmp_le_capable(hdev)) {
1700                 u8 events[8];
1701
1702                 memset(events, 0, sizeof(events));
1703                 events[0] = 0x0f;
1704
1705                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1706                         events[0] |= 0x10;      /* LE Long Term Key Request */
1707
1708                 /* If controller supports the Connection Parameters Request
1709                  * Link Layer Procedure, enable the corresponding event.
1710                  */
1711                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1712                         events[0] |= 0x20;      /* LE Remote Connection
1713                                                  * Parameter Request
1714                                                  */
1715
1716                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1717                             events);
1718
1719                 if (hdev->commands[25] & 0x40) {
1720                         /* Read LE Advertising Channel TX Power */
1721                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1722                 }
1723
1724                 hci_set_le_support(req);
1725         }
1726
1727         /* Read features beyond page 1 if available */
1728         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1729                 struct hci_cp_read_local_ext_features cp;
1730
1731                 cp.page = p;
1732                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1733                             sizeof(cp), &cp);
1734         }
1735 }
1736
1737 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1738 {
1739         struct hci_dev *hdev = req->hdev;
1740
1741         /* Set event mask page 2 if the HCI command for it is supported */
1742         if (hdev->commands[22] & 0x04)
1743                 hci_set_event_mask_page_2(req);
1744
1745         /* Read local codec list if the HCI command is supported */
1746         if (hdev->commands[29] & 0x20)
1747                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1748
1749         /* Get MWS transport configuration if the HCI command is supported */
1750         if (hdev->commands[30] & 0x08)
1751                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1752
1753         /* Check for Synchronization Train support */
1754         if (lmp_sync_train_capable(hdev))
1755                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1756
1757         /* Enable Secure Connections if supported and configured */
1758         if ((lmp_sc_capable(hdev) ||
1759              test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1760             test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1761                 u8 support = 0x01;
1762                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1763                             sizeof(support), &support);
1764         }
1765 }
1766
1767 static int __hci_init(struct hci_dev *hdev)
1768 {
1769         int err;
1770
1771         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1772         if (err < 0)
1773                 return err;
1774
1775         /* The Device Under Test (DUT) mode is special and available for
1776          * all controller types. So just create it early on.
1777          */
1778         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1779                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1780                                     &dut_mode_fops);
1781         }
1782
1783         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1784          * BR/EDR/LE type controllers. AMP controllers only need the
1785          * first stage init.
1786          */
1787         if (hdev->dev_type != HCI_BREDR)
1788                 return 0;
1789
1790         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1791         if (err < 0)
1792                 return err;
1793
1794         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1795         if (err < 0)
1796                 return err;
1797
1798         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1799         if (err < 0)
1800                 return err;
1801
1802         /* Only create debugfs entries during the initial setup
1803          * phase and not every time the controller gets powered on.
1804          */
1805         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1806                 return 0;
1807
1808         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1809                             &features_fops);
1810         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1811                            &hdev->manufacturer);
1812         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1813         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1814         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1815                             &blacklist_fops);
1816         debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1817                             &whitelist_fops);
1818         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1819
1820         debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1821                             &conn_info_min_age_fops);
1822         debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1823                             &conn_info_max_age_fops);
1824
1825         if (lmp_bredr_capable(hdev)) {
1826                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1827                                     hdev, &inquiry_cache_fops);
1828                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1829                                     hdev, &link_keys_fops);
1830                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1831                                     hdev, &dev_class_fops);
1832                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1833                                     hdev, &voice_setting_fops);
1834         }
1835
1836         if (lmp_ssp_capable(hdev)) {
1837                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1838                                     hdev, &auto_accept_delay_fops);
1839                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1840                                     hdev, &force_sc_support_fops);
1841                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1842                                     hdev, &sc_only_mode_fops);
1843         }
1844
1845         if (lmp_sniff_capable(hdev)) {
1846                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1847                                     hdev, &idle_timeout_fops);
1848                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1849                                     hdev, &sniff_min_interval_fops);
1850                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1851                                     hdev, &sniff_max_interval_fops);
1852         }
1853
1854         if (lmp_le_capable(hdev)) {
1855                 debugfs_create_file("identity", 0400, hdev->debugfs,
1856                                     hdev, &identity_fops);
1857                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1858                                     hdev, &rpa_timeout_fops);
1859                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1860                                     hdev, &random_address_fops);
1861                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1862                                     hdev, &static_address_fops);
1863
1864                 /* For controllers with a public address, provide a debug
1865                  * option to force the usage of the configured static
1866                  * address. By default the public address is used.
1867                  */
1868                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1869                         debugfs_create_file("force_static_address", 0644,
1870                                             hdev->debugfs, hdev,
1871                                             &force_static_address_fops);
1872
1873                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1874                                   &hdev->le_white_list_size);
1875                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1876                                     &white_list_fops);
1877                 debugfs_create_file("identity_resolving_keys", 0400,
1878                                     hdev->debugfs, hdev,
1879                                     &identity_resolving_keys_fops);
1880                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1881                                     hdev, &long_term_keys_fops);
1882                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1883                                     hdev, &conn_min_interval_fops);
1884                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1885                                     hdev, &conn_max_interval_fops);
1886                 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1887                                     hdev, &conn_latency_fops);
1888                 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1889                                     hdev, &supervision_timeout_fops);
1890                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1891                                     hdev, &adv_channel_map_fops);
1892                 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1893                                     hdev, &adv_min_interval_fops);
1894                 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1895                                     hdev, &adv_max_interval_fops);
1896                 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1897                                     &device_list_fops);
1898                 debugfs_create_u16("discov_interleaved_timeout", 0644,
1899                                    hdev->debugfs,
1900                                    &hdev->discov_interleaved_timeout);
1901         }
1902
1903         return 0;
1904 }
1905
1906 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1907 {
1908         struct hci_dev *hdev = req->hdev;
1909
1910         BT_DBG("%s %ld", hdev->name, opt);
1911
1912         /* Reset */
1913         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1914                 hci_reset_req(req, 0);
1915
1916         /* Read Local Version */
1917         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1918
1919         /* Read BD Address */
1920         if (hdev->set_bdaddr)
1921                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1922 }
1923
1924 static int __hci_unconf_init(struct hci_dev *hdev)
1925 {
1926         int err;
1927
1928         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1929                 return 0;
1930
1931         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1932         if (err < 0)
1933                 return err;
1934
1935         return 0;
1936 }
1937
1938 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1939 {
1940         __u8 scan = opt;
1941
1942         BT_DBG("%s %x", req->hdev->name, scan);
1943
1944         /* Inquiry and Page scans */
1945         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1946 }
1947
1948 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1949 {
1950         __u8 auth = opt;
1951
1952         BT_DBG("%s %x", req->hdev->name, auth);
1953
1954         /* Authentication */
1955         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1956 }
1957
1958 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1959 {
1960         __u8 encrypt = opt;
1961
1962         BT_DBG("%s %x", req->hdev->name, encrypt);
1963
1964         /* Encryption */
1965         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1966 }
1967
1968 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1969 {
1970         __le16 policy = cpu_to_le16(opt);
1971
1972         BT_DBG("%s %x", req->hdev->name, policy);
1973
1974         /* Default link policy */
1975         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1976 }
1977
1978 /* Get HCI device by index.
1979  * Device is held on return. */
1980 struct hci_dev *hci_dev_get(int index)
1981 {
1982         struct hci_dev *hdev = NULL, *d;
1983
1984         BT_DBG("%d", index);
1985
1986         if (index < 0)
1987                 return NULL;
1988
1989         read_lock(&hci_dev_list_lock);
1990         list_for_each_entry(d, &hci_dev_list, list) {
1991                 if (d->id == index) {
1992                         hdev = hci_dev_hold(d);
1993                         break;
1994                 }
1995         }
1996         read_unlock(&hci_dev_list_lock);
1997         return hdev;
1998 }
1999
2000 /* ---- Inquiry support ---- */
2001
2002 bool hci_discovery_active(struct hci_dev *hdev)
2003 {
2004         struct discovery_state *discov = &hdev->discovery;
2005
2006         switch (discov->state) {
2007         case DISCOVERY_FINDING:
2008         case DISCOVERY_RESOLVING:
2009                 return true;
2010
2011         default:
2012                 return false;
2013         }
2014 }
2015
2016 void hci_discovery_set_state(struct hci_dev *hdev, int state)
2017 {
2018         int old_state = hdev->discovery.state;
2019
2020         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2021
2022         if (old_state == state)
2023                 return;
2024
2025         hdev->discovery.state = state;
2026
2027         switch (state) {
2028         case DISCOVERY_STOPPED:
2029                 hci_update_background_scan(hdev);
2030
2031                 if (old_state != DISCOVERY_STARTING)
2032                         mgmt_discovering(hdev, 0);
2033                 break;
2034         case DISCOVERY_STARTING:
2035                 break;
2036         case DISCOVERY_FINDING:
2037                 mgmt_discovering(hdev, 1);
2038                 break;
2039         case DISCOVERY_RESOLVING:
2040                 break;
2041         case DISCOVERY_STOPPING:
2042                 break;
2043         }
2044 }
2045
2046 void hci_inquiry_cache_flush(struct hci_dev *hdev)
2047 {
2048         struct discovery_state *cache = &hdev->discovery;
2049         struct inquiry_entry *p, *n;
2050
2051         list_for_each_entry_safe(p, n, &cache->all, all) {
2052                 list_del(&p->all);
2053                 kfree(p);
2054         }
2055
2056         INIT_LIST_HEAD(&cache->unknown);
2057         INIT_LIST_HEAD(&cache->resolve);
2058 }
2059
2060 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2061                                                bdaddr_t *bdaddr)
2062 {
2063         struct discovery_state *cache = &hdev->discovery;
2064         struct inquiry_entry *e;
2065
2066         BT_DBG("cache %p, %pMR", cache, bdaddr);
2067
2068         list_for_each_entry(e, &cache->all, all) {
2069                 if (!bacmp(&e->data.bdaddr, bdaddr))
2070                         return e;
2071         }
2072
2073         return NULL;
2074 }
2075
2076 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2077                                                        bdaddr_t *bdaddr)
2078 {
2079         struct discovery_state *cache = &hdev->discovery;
2080         struct inquiry_entry *e;
2081
2082         BT_DBG("cache %p, %pMR", cache, bdaddr);
2083
2084         list_for_each_entry(e, &cache->unknown, list) {
2085                 if (!bacmp(&e->data.bdaddr, bdaddr))
2086                         return e;
2087         }
2088
2089         return NULL;
2090 }
2091
2092 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2093                                                        bdaddr_t *bdaddr,
2094                                                        int state)
2095 {
2096         struct discovery_state *cache = &hdev->discovery;
2097         struct inquiry_entry *e;
2098
2099         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2100
2101         list_for_each_entry(e, &cache->resolve, list) {
2102                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2103                         return e;
2104                 if (!bacmp(&e->data.bdaddr, bdaddr))
2105                         return e;
2106         }
2107
2108         return NULL;
2109 }
2110
2111 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2112                                       struct inquiry_entry *ie)
2113 {
2114         struct discovery_state *cache = &hdev->discovery;
2115         struct list_head *pos = &cache->resolve;
2116         struct inquiry_entry *p;
2117
2118         list_del(&ie->list);
2119
2120         list_for_each_entry(p, &cache->resolve, list) {
2121                 if (p->name_state != NAME_PENDING &&
2122                     abs(p->data.rssi) >= abs(ie->data.rssi))
2123                         break;
2124                 pos = &p->list;
2125         }
2126
2127         list_add(&ie->list, pos);
2128 }
2129
2130 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2131                              bool name_known)
2132 {
2133         struct discovery_state *cache = &hdev->discovery;
2134         struct inquiry_entry *ie;
2135         u32 flags = 0;
2136
2137         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2138
2139         hci_remove_remote_oob_data(hdev, &data->bdaddr);
2140
2141         if (!data->ssp_mode)
2142                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2143
2144         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2145         if (ie) {
2146                 if (!ie->data.ssp_mode)
2147                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2148
2149                 if (ie->name_state == NAME_NEEDED &&
2150                     data->rssi != ie->data.rssi) {
2151                         ie->data.rssi = data->rssi;
2152                         hci_inquiry_cache_update_resolve(hdev, ie);
2153                 }
2154
2155                 goto update;
2156         }
2157
2158         /* Entry not in the cache. Add new one. */
2159         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
2160         if (!ie) {
2161                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2162                 goto done;
2163         }
2164
2165         list_add(&ie->all, &cache->all);
2166
2167         if (name_known) {
2168                 ie->name_state = NAME_KNOWN;
2169         } else {
2170                 ie->name_state = NAME_NOT_KNOWN;
2171                 list_add(&ie->list, &cache->unknown);
2172         }
2173
2174 update:
2175         if (name_known && ie->name_state != NAME_KNOWN &&
2176             ie->name_state != NAME_PENDING) {
2177                 ie->name_state = NAME_KNOWN;
2178                 list_del(&ie->list);
2179         }
2180
2181         memcpy(&ie->data, data, sizeof(*data));
2182         ie->timestamp = jiffies;
2183         cache->timestamp = jiffies;
2184
2185         if (ie->name_state == NAME_NOT_KNOWN)
2186                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2187
2188 done:
2189         return flags;
2190 }
2191
2192 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2193 {
2194         struct discovery_state *cache = &hdev->discovery;
2195         struct inquiry_info *info = (struct inquiry_info *) buf;
2196         struct inquiry_entry *e;
2197         int copied = 0;
2198
2199         list_for_each_entry(e, &cache->all, all) {
2200                 struct inquiry_data *data = &e->data;
2201
2202                 if (copied >= num)
2203                         break;
2204
2205                 bacpy(&info->bdaddr, &data->bdaddr);
2206                 info->pscan_rep_mode    = data->pscan_rep_mode;
2207                 info->pscan_period_mode = data->pscan_period_mode;
2208                 info->pscan_mode        = data->pscan_mode;
2209                 memcpy(info->dev_class, data->dev_class, 3);
2210                 info->clock_offset      = data->clock_offset;
2211
2212                 info++;
2213                 copied++;
2214         }
2215
2216         BT_DBG("cache %p, copied %d", cache, copied);
2217         return copied;
2218 }
2219
2220 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2221 {
2222         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2223         struct hci_dev *hdev = req->hdev;
2224         struct hci_cp_inquiry cp;
2225
2226         BT_DBG("%s", hdev->name);
2227
2228         if (test_bit(HCI_INQUIRY, &hdev->flags))
2229                 return;
2230
2231         /* Start Inquiry */
2232         memcpy(&cp.lap, &ir->lap, 3);
2233         cp.length  = ir->length;
2234         cp.num_rsp = ir->num_rsp;
2235         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2236 }
2237
2238 int hci_inquiry(void __user *arg)
2239 {
2240         __u8 __user *ptr = arg;
2241         struct hci_inquiry_req ir;
2242         struct hci_dev *hdev;
2243         int err = 0, do_inquiry = 0, max_rsp;
2244         long timeo;
2245         __u8 *buf;
2246
2247         if (copy_from_user(&ir, ptr, sizeof(ir)))
2248                 return -EFAULT;
2249
2250         hdev = hci_dev_get(ir.dev_id);
2251         if (!hdev)
2252                 return -ENODEV;
2253
2254         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2255                 err = -EBUSY;
2256                 goto done;
2257         }
2258
2259         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2260                 err = -EOPNOTSUPP;
2261                 goto done;
2262         }
2263
2264         if (hdev->dev_type != HCI_BREDR) {
2265                 err = -EOPNOTSUPP;
2266                 goto done;
2267         }
2268
2269         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2270                 err = -EOPNOTSUPP;
2271                 goto done;
2272         }
2273
2274         hci_dev_lock(hdev);
2275         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2276             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2277                 hci_inquiry_cache_flush(hdev);
2278                 do_inquiry = 1;
2279         }
2280         hci_dev_unlock(hdev);
2281
2282         timeo = ir.length * msecs_to_jiffies(2000);
2283
2284         if (do_inquiry) {
2285                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2286                                    timeo);
2287                 if (err < 0)
2288                         goto done;
2289
2290                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2291                  * cleared). If it is interrupted by a signal, return -EINTR.
2292                  */
2293                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
2294                                 TASK_INTERRUPTIBLE))
2295                         return -EINTR;
2296         }
2297
2298         /* for unlimited number of responses we will use buffer with
2299          * 255 entries
2300          */
2301         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2302
2303         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2304          * copy it to the user space.
2305          */
2306         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2307         if (!buf) {
2308                 err = -ENOMEM;
2309                 goto done;
2310         }
2311
2312         hci_dev_lock(hdev);
2313         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2314         hci_dev_unlock(hdev);
2315
2316         BT_DBG("num_rsp %d", ir.num_rsp);
2317
2318         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2319                 ptr += sizeof(ir);
2320                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2321                                  ir.num_rsp))
2322                         err = -EFAULT;
2323         } else
2324                 err = -EFAULT;
2325
2326         kfree(buf);
2327
2328 done:
2329         hci_dev_put(hdev);
2330         return err;
2331 }
2332
2333 static int hci_dev_do_open(struct hci_dev *hdev)
2334 {
2335         int ret = 0;
2336
2337         BT_DBG("%s %p", hdev->name, hdev);
2338
2339         hci_req_lock(hdev);
2340
2341         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2342                 ret = -ENODEV;
2343                 goto done;
2344         }
2345
2346         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2347             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2348                 /* Check for rfkill but allow the HCI setup stage to
2349                  * proceed (which in itself doesn't cause any RF activity).
2350                  */
2351                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2352                         ret = -ERFKILL;
2353                         goto done;
2354                 }
2355
2356                 /* Check for valid public address or a configured static
2357                  * random adddress, but let the HCI setup proceed to
2358                  * be able to determine if there is a public address
2359                  * or not.
2360                  *
2361                  * In case of user channel usage, it is not important
2362                  * if a public address or static random address is
2363                  * available.
2364                  *
2365                  * This check is only valid for BR/EDR controllers
2366                  * since AMP controllers do not have an address.
2367                  */
2368                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2369                     hdev->dev_type == HCI_BREDR &&
2370                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2371                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2372                         ret = -EADDRNOTAVAIL;
2373                         goto done;
2374                 }
2375         }
2376
2377         if (test_bit(HCI_UP, &hdev->flags)) {
2378                 ret = -EALREADY;
2379                 goto done;
2380         }
2381
2382         if (hdev->open(hdev)) {
2383                 ret = -EIO;
2384                 goto done;
2385         }
2386
2387         atomic_set(&hdev->cmd_cnt, 1);
2388         set_bit(HCI_INIT, &hdev->flags);
2389
2390         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2391                 if (hdev->setup)
2392                         ret = hdev->setup(hdev);
2393
2394                 /* The transport driver can set these quirks before
2395                  * creating the HCI device or in its setup callback.
2396                  *
2397                  * In case any of them is set, the controller has to
2398                  * start up as unconfigured.
2399                  */
2400                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2401                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2402                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2403
2404                 /* For an unconfigured controller it is required to
2405                  * read at least the version information provided by
2406                  * the Read Local Version Information command.
2407                  *
2408                  * If the set_bdaddr driver callback is provided, then
2409                  * also the original Bluetooth public device address
2410                  * will be read using the Read BD Address command.
2411                  */
2412                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2413                         ret = __hci_unconf_init(hdev);
2414         }
2415
2416         if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2417                 /* If public address change is configured, ensure that
2418                  * the address gets programmed. If the driver does not
2419                  * support changing the public address, fail the power
2420                  * on procedure.
2421                  */
2422                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2423                     hdev->set_bdaddr)
2424                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2425                 else
2426                         ret = -EADDRNOTAVAIL;
2427         }
2428
2429         if (!ret) {
2430                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2431                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2432                         ret = __hci_init(hdev);
2433         }
2434
2435         clear_bit(HCI_INIT, &hdev->flags);
2436
2437         if (!ret) {
2438                 hci_dev_hold(hdev);
2439                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2440                 set_bit(HCI_UP, &hdev->flags);
2441                 hci_notify(hdev, HCI_DEV_UP);
2442                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2443                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2444                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2445                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2446                     hdev->dev_type == HCI_BREDR) {
2447                         hci_dev_lock(hdev);
2448                         mgmt_powered(hdev, 1);
2449                         hci_dev_unlock(hdev);
2450                 }
2451         } else {
2452                 /* Init failed, cleanup */
2453                 flush_work(&hdev->tx_work);
2454                 flush_work(&hdev->cmd_work);
2455                 flush_work(&hdev->rx_work);
2456
2457                 skb_queue_purge(&hdev->cmd_q);
2458                 skb_queue_purge(&hdev->rx_q);
2459
2460                 if (hdev->flush)
2461                         hdev->flush(hdev);
2462
2463                 if (hdev->sent_cmd) {
2464                         kfree_skb(hdev->sent_cmd);
2465                         hdev->sent_cmd = NULL;
2466                 }
2467
2468                 hdev->close(hdev);
2469                 hdev->flags &= BIT(HCI_RAW);
2470         }
2471
2472 done:
2473         hci_req_unlock(hdev);
2474         return ret;
2475 }
2476
2477 /* ---- HCI ioctl helpers ---- */
2478
2479 int hci_dev_open(__u16 dev)
2480 {
2481         struct hci_dev *hdev;
2482         int err;
2483
2484         hdev = hci_dev_get(dev);
2485         if (!hdev)
2486                 return -ENODEV;
2487
2488         /* Devices that are marked as unconfigured can only be powered
2489          * up as user channel. Trying to bring them up as normal devices
2490          * will result into a failure. Only user channel operation is
2491          * possible.
2492          *
2493          * When this function is called for a user channel, the flag
2494          * HCI_USER_CHANNEL will be set first before attempting to
2495          * open the device.
2496          */
2497         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2498             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2499                 err = -EOPNOTSUPP;
2500                 goto done;
2501         }
2502
2503         /* We need to ensure that no other power on/off work is pending
2504          * before proceeding to call hci_dev_do_open. This is
2505          * particularly important if the setup procedure has not yet
2506          * completed.
2507          */
2508         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2509                 cancel_delayed_work(&hdev->power_off);
2510
2511         /* After this call it is guaranteed that the setup procedure
2512          * has finished. This means that error conditions like RFKILL
2513          * or no valid public or static random address apply.
2514          */
2515         flush_workqueue(hdev->req_workqueue);
2516
2517         /* For controllers not using the management interface and that
2518          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
2519          * so that pairing works for them. Once the management interface
2520          * is in use this bit will be cleared again and userspace has
2521          * to explicitly enable it.
2522          */
2523         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2524             !test_bit(HCI_MGMT, &hdev->dev_flags))
2525                 set_bit(HCI_BONDABLE, &hdev->dev_flags);
2526
2527         err = hci_dev_do_open(hdev);
2528
2529 done:
2530         hci_dev_put(hdev);
2531         return err;
2532 }
2533
2534 /* This function requires the caller holds hdev->lock */
2535 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2536 {
2537         struct hci_conn_params *p;
2538
2539         list_for_each_entry(p, &hdev->le_conn_params, list)
2540                 list_del_init(&p->action);
2541
2542         BT_DBG("All LE pending actions cleared");
2543 }
2544
2545 static int hci_dev_do_close(struct hci_dev *hdev)
2546 {
2547         BT_DBG("%s %p", hdev->name, hdev);
2548
2549         cancel_delayed_work(&hdev->power_off);
2550
2551         hci_req_cancel(hdev, ENODEV);
2552         hci_req_lock(hdev);
2553
2554         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2555                 cancel_delayed_work_sync(&hdev->cmd_timer);
2556                 hci_req_unlock(hdev);
2557                 return 0;
2558         }
2559
2560         /* Flush RX and TX works */
2561         flush_work(&hdev->tx_work);
2562         flush_work(&hdev->rx_work);
2563
2564         if (hdev->discov_timeout > 0) {
2565                 cancel_delayed_work(&hdev->discov_off);
2566                 hdev->discov_timeout = 0;
2567                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2568                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2569         }
2570
2571         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2572                 cancel_delayed_work(&hdev->service_cache);
2573
2574         cancel_delayed_work_sync(&hdev->le_scan_disable);
2575
2576         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2577                 cancel_delayed_work_sync(&hdev->rpa_expired);
2578
2579         hci_dev_lock(hdev);
2580         hci_inquiry_cache_flush(hdev);
2581         hci_conn_hash_flush(hdev);
2582         hci_pend_le_actions_clear(hdev);
2583         hci_dev_unlock(hdev);
2584
2585         hci_notify(hdev, HCI_DEV_DOWN);
2586
2587         if (hdev->flush)
2588                 hdev->flush(hdev);
2589
2590         /* Reset device */
2591         skb_queue_purge(&hdev->cmd_q);
2592         atomic_set(&hdev->cmd_cnt, 1);
2593         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2594             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2595             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2596                 set_bit(HCI_INIT, &hdev->flags);
2597                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2598                 clear_bit(HCI_INIT, &hdev->flags);
2599         }
2600
2601         /* flush cmd  work */
2602         flush_work(&hdev->cmd_work);
2603
2604         /* Drop queues */
2605         skb_queue_purge(&hdev->rx_q);
2606         skb_queue_purge(&hdev->cmd_q);
2607         skb_queue_purge(&hdev->raw_q);
2608
2609         /* Drop last sent command */
2610         if (hdev->sent_cmd) {
2611                 cancel_delayed_work_sync(&hdev->cmd_timer);
2612                 kfree_skb(hdev->sent_cmd);
2613                 hdev->sent_cmd = NULL;
2614         }
2615
2616         kfree_skb(hdev->recv_evt);
2617         hdev->recv_evt = NULL;
2618
2619         /* After this point our queues are empty
2620          * and no tasks are scheduled. */
2621         hdev->close(hdev);
2622
2623         /* Clear flags */
2624         hdev->flags &= BIT(HCI_RAW);
2625         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2626
2627         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2628                 if (hdev->dev_type == HCI_BREDR) {
2629                         hci_dev_lock(hdev);
2630                         mgmt_powered(hdev, 0);
2631                         hci_dev_unlock(hdev);
2632                 }
2633         }
2634
2635         /* Controller radio is available but is currently powered down */
2636         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2637
2638         memset(hdev->eir, 0, sizeof(hdev->eir));
2639         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2640         bacpy(&hdev->random_addr, BDADDR_ANY);
2641
2642         hci_req_unlock(hdev);
2643
2644         hci_dev_put(hdev);
2645         return 0;
2646 }
2647
2648 int hci_dev_close(__u16 dev)
2649 {
2650         struct hci_dev *hdev;
2651         int err;
2652
2653         hdev = hci_dev_get(dev);
2654         if (!hdev)
2655                 return -ENODEV;
2656
2657         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2658                 err = -EBUSY;
2659                 goto done;
2660         }
2661
2662         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2663                 cancel_delayed_work(&hdev->power_off);
2664
2665         err = hci_dev_do_close(hdev);
2666
2667 done:
2668         hci_dev_put(hdev);
2669         return err;
2670 }
2671
2672 int hci_dev_reset(__u16 dev)
2673 {
2674         struct hci_dev *hdev;
2675         int ret = 0;
2676
2677         hdev = hci_dev_get(dev);
2678         if (!hdev)
2679                 return -ENODEV;
2680
2681         hci_req_lock(hdev);
2682
2683         if (!test_bit(HCI_UP, &hdev->flags)) {
2684                 ret = -ENETDOWN;
2685                 goto done;
2686         }
2687
2688         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2689                 ret = -EBUSY;
2690                 goto done;
2691         }
2692
2693         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2694                 ret = -EOPNOTSUPP;
2695                 goto done;
2696         }
2697
2698         /* Drop queues */
2699         skb_queue_purge(&hdev->rx_q);
2700         skb_queue_purge(&hdev->cmd_q);
2701
2702         hci_dev_lock(hdev);
2703         hci_inquiry_cache_flush(hdev);
2704         hci_conn_hash_flush(hdev);
2705         hci_dev_unlock(hdev);
2706
2707         if (hdev->flush)
2708                 hdev->flush(hdev);
2709
2710         atomic_set(&hdev->cmd_cnt, 1);
2711         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2712
2713         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2714
2715 done:
2716         hci_req_unlock(hdev);
2717         hci_dev_put(hdev);
2718         return ret;
2719 }
2720
2721 int hci_dev_reset_stat(__u16 dev)
2722 {
2723         struct hci_dev *hdev;
2724         int ret = 0;
2725
2726         hdev = hci_dev_get(dev);
2727         if (!hdev)
2728                 return -ENODEV;
2729
2730         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2731                 ret = -EBUSY;
2732                 goto done;
2733         }
2734
2735         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2736                 ret = -EOPNOTSUPP;
2737                 goto done;
2738         }
2739
2740         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2741
2742 done:
2743         hci_dev_put(hdev);
2744         return ret;
2745 }
2746
2747 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2748 {
2749         bool conn_changed, discov_changed;
2750
2751         BT_DBG("%s scan 0x%02x", hdev->name, scan);
2752
2753         if ((scan & SCAN_PAGE))
2754                 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2755                                                  &hdev->dev_flags);
2756         else
2757                 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2758                                                   &hdev->dev_flags);
2759
2760         if ((scan & SCAN_INQUIRY)) {
2761                 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2762                                                    &hdev->dev_flags);
2763         } else {
2764                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2765                 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2766                                                     &hdev->dev_flags);
2767         }
2768
2769         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2770                 return;
2771
2772         if (conn_changed || discov_changed) {
2773                 /* In case this was disabled through mgmt */
2774                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2775
2776                 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2777                         mgmt_update_adv_data(hdev);
2778
2779                 mgmt_new_settings(hdev);
2780         }
2781 }
2782
2783 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2784 {
2785         struct hci_dev *hdev;
2786         struct hci_dev_req dr;
2787         int err = 0;
2788
2789         if (copy_from_user(&dr, arg, sizeof(dr)))
2790                 return -EFAULT;
2791
2792         hdev = hci_dev_get(dr.dev_id);
2793         if (!hdev)
2794                 return -ENODEV;
2795
2796         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2797                 err = -EBUSY;
2798                 goto done;
2799         }
2800
2801         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2802                 err = -EOPNOTSUPP;
2803                 goto done;
2804         }
2805
2806         if (hdev->dev_type != HCI_BREDR) {
2807                 err = -EOPNOTSUPP;
2808                 goto done;
2809         }
2810
2811         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2812                 err = -EOPNOTSUPP;
2813                 goto done;
2814         }
2815
2816         switch (cmd) {
2817         case HCISETAUTH:
2818                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2819                                    HCI_INIT_TIMEOUT);
2820                 break;
2821
2822         case HCISETENCRYPT:
2823                 if (!lmp_encrypt_capable(hdev)) {
2824                         err = -EOPNOTSUPP;
2825                         break;
2826                 }
2827
2828                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2829                         /* Auth must be enabled first */
2830                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2831                                            HCI_INIT_TIMEOUT);
2832                         if (err)
2833                                 break;
2834                 }
2835
2836                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2837                                    HCI_INIT_TIMEOUT);
2838                 break;
2839
2840         case HCISETSCAN:
2841                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2842                                    HCI_INIT_TIMEOUT);
2843
2844                 /* Ensure that the connectable and discoverable states
2845                  * get correctly modified as this was a non-mgmt change.
2846                  */
2847                 if (!err)
2848                         hci_update_scan_state(hdev, dr.dev_opt);
2849                 break;
2850
2851         case HCISETLINKPOL:
2852                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2853                                    HCI_INIT_TIMEOUT);
2854                 break;
2855
2856         case HCISETLINKMODE:
2857                 hdev->link_mode = ((__u16) dr.dev_opt) &
2858                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2859                 break;
2860
2861         case HCISETPTYPE:
2862                 hdev->pkt_type = (__u16) dr.dev_opt;
2863                 break;
2864
2865         case HCISETACLMTU:
2866                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2867                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2868                 break;
2869
2870         case HCISETSCOMTU:
2871                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2872                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2873                 break;
2874
2875         default:
2876                 err = -EINVAL;
2877                 break;
2878         }
2879
2880 done:
2881         hci_dev_put(hdev);
2882         return err;
2883 }
2884
2885 int hci_get_dev_list(void __user *arg)
2886 {
2887         struct hci_dev *hdev;
2888         struct hci_dev_list_req *dl;
2889         struct hci_dev_req *dr;
2890         int n = 0, size, err;
2891         __u16 dev_num;
2892
2893         if (get_user(dev_num, (__u16 __user *) arg))
2894                 return -EFAULT;
2895
2896         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2897                 return -EINVAL;
2898
2899         size = sizeof(*dl) + dev_num * sizeof(*dr);
2900
2901         dl = kzalloc(size, GFP_KERNEL);
2902         if (!dl)
2903                 return -ENOMEM;
2904
2905         dr = dl->dev_req;
2906
2907         read_lock(&hci_dev_list_lock);
2908         list_for_each_entry(hdev, &hci_dev_list, list) {
2909                 unsigned long flags = hdev->flags;
2910
2911                 /* When the auto-off is configured it means the transport
2912                  * is running, but in that case still indicate that the
2913                  * device is actually down.
2914                  */
2915                 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2916                         flags &= ~BIT(HCI_UP);
2917
2918                 (dr + n)->dev_id  = hdev->id;
2919                 (dr + n)->dev_opt = flags;
2920
2921                 if (++n >= dev_num)
2922                         break;
2923         }
2924         read_unlock(&hci_dev_list_lock);
2925
2926         dl->dev_num = n;
2927         size = sizeof(*dl) + n * sizeof(*dr);
2928
2929         err = copy_to_user(arg, dl, size);
2930         kfree(dl);
2931
2932         return err ? -EFAULT : 0;
2933 }
2934
2935 int hci_get_dev_info(void __user *arg)
2936 {
2937         struct hci_dev *hdev;
2938         struct hci_dev_info di;
2939         unsigned long flags;
2940         int err = 0;
2941
2942         if (copy_from_user(&di, arg, sizeof(di)))
2943                 return -EFAULT;
2944
2945         hdev = hci_dev_get(di.dev_id);
2946         if (!hdev)
2947                 return -ENODEV;
2948
2949         /* When the auto-off is configured it means the transport
2950          * is running, but in that case still indicate that the
2951          * device is actually down.
2952          */
2953         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2954                 flags = hdev->flags & ~BIT(HCI_UP);
2955         else
2956                 flags = hdev->flags;
2957
2958         strcpy(di.name, hdev->name);
2959         di.bdaddr   = hdev->bdaddr;
2960         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2961         di.flags    = flags;
2962         di.pkt_type = hdev->pkt_type;
2963         if (lmp_bredr_capable(hdev)) {
2964                 di.acl_mtu  = hdev->acl_mtu;
2965                 di.acl_pkts = hdev->acl_pkts;
2966                 di.sco_mtu  = hdev->sco_mtu;
2967                 di.sco_pkts = hdev->sco_pkts;
2968         } else {
2969                 di.acl_mtu  = hdev->le_mtu;
2970                 di.acl_pkts = hdev->le_pkts;
2971                 di.sco_mtu  = 0;
2972                 di.sco_pkts = 0;
2973         }
2974         di.link_policy = hdev->link_policy;
2975         di.link_mode   = hdev->link_mode;
2976
2977         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2978         memcpy(&di.features, &hdev->features, sizeof(di.features));
2979
2980         if (copy_to_user(arg, &di, sizeof(di)))
2981                 err = -EFAULT;
2982
2983         hci_dev_put(hdev);
2984
2985         return err;
2986 }
2987
2988 /* ---- Interface to HCI drivers ---- */
2989
2990 static int hci_rfkill_set_block(void *data, bool blocked)
2991 {
2992         struct hci_dev *hdev = data;
2993
2994         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2995
2996         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2997                 return -EBUSY;
2998
2999         if (blocked) {
3000                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3001                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3002                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
3003                         hci_dev_do_close(hdev);
3004         } else {
3005                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
3006         }
3007
3008         return 0;
3009 }
3010
3011 static const struct rfkill_ops hci_rfkill_ops = {
3012         .set_block = hci_rfkill_set_block,
3013 };
3014
3015 static void hci_power_on(struct work_struct *work)
3016 {
3017         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
3018         int err;
3019
3020         BT_DBG("%s", hdev->name);
3021
3022         err = hci_dev_do_open(hdev);
3023         if (err < 0) {
3024                 mgmt_set_powered_failed(hdev, err);
3025                 return;
3026         }
3027
3028         /* During the HCI setup phase, a few error conditions are
3029          * ignored and they need to be checked now. If they are still
3030          * valid, it is important to turn the device back off.
3031          */
3032         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
3033             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
3034             (hdev->dev_type == HCI_BREDR &&
3035              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3036              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
3037                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3038                 hci_dev_do_close(hdev);
3039         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
3040                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3041                                    HCI_AUTO_OFF_TIMEOUT);
3042         }
3043
3044         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
3045                 /* For unconfigured devices, set the HCI_RAW flag
3046                  * so that userspace can easily identify them.
3047                  */
3048                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3049                         set_bit(HCI_RAW, &hdev->flags);
3050
3051                 /* For fully configured devices, this will send
3052                  * the Index Added event. For unconfigured devices,
3053                  * it will send Unconfigued Index Added event.
3054                  *
3055                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3056                  * and no event will be send.
3057                  */
3058                 mgmt_index_added(hdev);
3059         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
3060                 /* When the controller is now configured, then it
3061                  * is important to clear the HCI_RAW flag.
3062                  */
3063                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3064                         clear_bit(HCI_RAW, &hdev->flags);
3065
3066                 /* Powering on the controller with HCI_CONFIG set only
3067                  * happens with the transition from unconfigured to
3068                  * configured. This will send the Index Added event.
3069                  */
3070                 mgmt_index_added(hdev);
3071         }
3072 }
3073
3074 static void hci_power_off(struct work_struct *work)
3075 {
3076         struct hci_dev *hdev = container_of(work, struct hci_dev,
3077                                             power_off.work);
3078
3079         BT_DBG("%s", hdev->name);
3080
3081         hci_dev_do_close(hdev);
3082 }
3083
3084 static void hci_discov_off(struct work_struct *work)
3085 {
3086         struct hci_dev *hdev;
3087
3088         hdev = container_of(work, struct hci_dev, discov_off.work);
3089
3090         BT_DBG("%s", hdev->name);
3091
3092         mgmt_discoverable_timeout(hdev);
3093 }
3094
3095 void hci_uuids_clear(struct hci_dev *hdev)
3096 {
3097         struct bt_uuid *uuid, *tmp;
3098
3099         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3100                 list_del(&uuid->list);
3101                 kfree(uuid);
3102         }
3103 }
3104
3105 void hci_link_keys_clear(struct hci_dev *hdev)
3106 {
3107         struct list_head *p, *n;
3108
3109         list_for_each_safe(p, n, &hdev->link_keys) {
3110                 struct link_key *key;
3111
3112                 key = list_entry(p, struct link_key, list);
3113
3114                 list_del(p);
3115                 kfree(key);
3116         }
3117 }
3118
3119 void hci_smp_ltks_clear(struct hci_dev *hdev)
3120 {
3121         struct smp_ltk *k, *tmp;
3122
3123         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3124                 list_del(&k->list);
3125                 kfree(k);
3126         }
3127 }
3128
3129 void hci_smp_irks_clear(struct hci_dev *hdev)
3130 {
3131         struct smp_irk *k, *tmp;
3132
3133         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3134                 list_del(&k->list);
3135                 kfree(k);
3136         }
3137 }
3138
3139 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3140 {
3141         struct link_key *k;
3142
3143         list_for_each_entry(k, &hdev->link_keys, list)
3144                 if (bacmp(bdaddr, &k->bdaddr) == 0)
3145                         return k;
3146
3147         return NULL;
3148 }
3149
3150 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3151                                u8 key_type, u8 old_key_type)
3152 {
3153         /* Legacy key */
3154         if (key_type < 0x03)
3155                 return true;
3156
3157         /* Debug keys are insecure so don't store them persistently */
3158         if (key_type == HCI_LK_DEBUG_COMBINATION)
3159                 return false;
3160
3161         /* Changed combination key and there's no previous one */
3162         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3163                 return false;
3164
3165         /* Security mode 3 case */
3166         if (!conn)
3167                 return true;
3168
3169         /* Neither local nor remote side had no-bonding as requirement */
3170         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3171                 return true;
3172
3173         /* Local side had dedicated bonding as requirement */
3174         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3175                 return true;
3176
3177         /* Remote side had dedicated bonding as requirement */
3178         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3179                 return true;
3180
3181         /* If none of the above criteria match, then don't store the key
3182          * persistently */
3183         return false;
3184 }
3185
3186 static u8 ltk_role(u8 type)
3187 {
3188         if (type == SMP_LTK)
3189                 return HCI_ROLE_MASTER;
3190
3191         return HCI_ROLE_SLAVE;
3192 }
3193
3194 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3195                              u8 role)
3196 {
3197         struct smp_ltk *k;
3198
3199         list_for_each_entry(k, &hdev->long_term_keys, list) {
3200                 if (k->ediv != ediv || k->rand != rand)
3201                         continue;
3202
3203                 if (ltk_role(k->type) != role)
3204                         continue;
3205
3206                 return k;
3207         }
3208
3209         return NULL;
3210 }
3211
3212 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3213                                      u8 addr_type, u8 role)
3214 {
3215         struct smp_ltk *k;
3216
3217         list_for_each_entry(k, &hdev->long_term_keys, list)
3218                 if (addr_type == k->bdaddr_type &&
3219                     bacmp(bdaddr, &k->bdaddr) == 0 &&
3220                     ltk_role(k->type) == role)
3221                         return k;
3222
3223         return NULL;
3224 }
3225
3226 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3227 {
3228         struct smp_irk *irk;
3229
3230         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3231                 if (!bacmp(&irk->rpa, rpa))
3232                         return irk;
3233         }
3234
3235         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3236                 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3237                         bacpy(&irk->rpa, rpa);
3238                         return irk;
3239                 }
3240         }
3241
3242         return NULL;
3243 }
3244
3245 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3246                                      u8 addr_type)
3247 {
3248         struct smp_irk *irk;
3249
3250         /* Identity Address must be public or static random */
3251         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3252                 return NULL;
3253
3254         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3255                 if (addr_type == irk->addr_type &&
3256                     bacmp(bdaddr, &irk->bdaddr) == 0)
3257                         return irk;
3258         }
3259
3260         return NULL;
3261 }
3262
3263 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3264                                   bdaddr_t *bdaddr, u8 *val, u8 type,
3265                                   u8 pin_len, bool *persistent)
3266 {
3267         struct link_key *key, *old_key;
3268         u8 old_key_type;
3269
3270         old_key = hci_find_link_key(hdev, bdaddr);
3271         if (old_key) {
3272                 old_key_type = old_key->type;
3273                 key = old_key;
3274         } else {
3275                 old_key_type = conn ? conn->key_type : 0xff;
3276                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3277                 if (!key)
3278                         return NULL;
3279                 list_add(&key->list, &hdev->link_keys);
3280         }
3281
3282         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3283
3284         /* Some buggy controller combinations generate a changed
3285          * combination key for legacy pairing even when there's no
3286          * previous key */
3287         if (type == HCI_LK_CHANGED_COMBINATION &&
3288             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3289                 type = HCI_LK_COMBINATION;
3290                 if (conn)
3291                         conn->key_type = type;
3292         }
3293
3294         bacpy(&key->bdaddr, bdaddr);
3295         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3296         key->pin_len = pin_len;
3297
3298         if (type == HCI_LK_CHANGED_COMBINATION)
3299                 key->type = old_key_type;
3300         else
3301                 key->type = type;
3302
3303         if (persistent)
3304                 *persistent = hci_persistent_key(hdev, conn, type,
3305                                                  old_key_type);
3306
3307         return key;
3308 }
3309
3310 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3311                             u8 addr_type, u8 type, u8 authenticated,
3312                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3313 {
3314         struct smp_ltk *key, *old_key;
3315         u8 role = ltk_role(type);
3316
3317         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
3318         if (old_key)
3319                 key = old_key;
3320         else {
3321                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3322                 if (!key)
3323                         return NULL;
3324                 list_add(&key->list, &hdev->long_term_keys);
3325         }
3326
3327         bacpy(&key->bdaddr, bdaddr);
3328         key->bdaddr_type = addr_type;
3329         memcpy(key->val, tk, sizeof(key->val));
3330         key->authenticated = authenticated;
3331         key->ediv = ediv;
3332         key->rand = rand;
3333         key->enc_size = enc_size;
3334         key->type = type;
3335
3336         return key;
3337 }
3338
3339 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3340                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3341 {
3342         struct smp_irk *irk;
3343
3344         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3345         if (!irk) {
3346                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3347                 if (!irk)
3348                         return NULL;
3349
3350                 bacpy(&irk->bdaddr, bdaddr);
3351                 irk->addr_type = addr_type;
3352
3353                 list_add(&irk->list, &hdev->identity_resolving_keys);
3354         }
3355
3356         memcpy(irk->val, val, 16);
3357         bacpy(&irk->rpa, rpa);
3358
3359         return irk;
3360 }
3361
3362 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3363 {
3364         struct link_key *key;
3365
3366         key = hci_find_link_key(hdev, bdaddr);
3367         if (!key)
3368                 return -ENOENT;
3369
3370         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3371
3372         list_del(&key->list);
3373         kfree(key);
3374
3375         return 0;
3376 }
3377
3378 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3379 {
3380         struct smp_ltk *k, *tmp;
3381         int removed = 0;
3382
3383         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3384                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3385                         continue;
3386
3387                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3388
3389                 list_del(&k->list);
3390                 kfree(k);
3391                 removed++;
3392         }
3393
3394         return removed ? 0 : -ENOENT;
3395 }
3396
3397 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3398 {
3399         struct smp_irk *k, *tmp;
3400
3401         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3402                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3403                         continue;
3404
3405                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3406
3407                 list_del(&k->list);
3408                 kfree(k);
3409         }
3410 }
3411
3412 /* HCI command timer function */
3413 static void hci_cmd_timeout(struct work_struct *work)
3414 {
3415         struct hci_dev *hdev = container_of(work, struct hci_dev,
3416                                             cmd_timer.work);
3417
3418         if (hdev->sent_cmd) {
3419                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3420                 u16 opcode = __le16_to_cpu(sent->opcode);
3421
3422                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3423         } else {
3424                 BT_ERR("%s command tx timeout", hdev->name);
3425         }
3426
3427         atomic_set(&hdev->cmd_cnt, 1);
3428         queue_work(hdev->workqueue, &hdev->cmd_work);
3429 }
3430
3431 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3432                                           bdaddr_t *bdaddr)
3433 {
3434         struct oob_data *data;
3435
3436         list_for_each_entry(data, &hdev->remote_oob_data, list)
3437                 if (bacmp(bdaddr, &data->bdaddr) == 0)
3438                         return data;
3439
3440         return NULL;
3441 }
3442
3443 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3444 {
3445         struct oob_data *data;
3446
3447         data = hci_find_remote_oob_data(hdev, bdaddr);
3448         if (!data)
3449                 return -ENOENT;
3450
3451         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3452
3453         list_del(&data->list);
3454         kfree(data);
3455
3456         return 0;
3457 }
3458
3459 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3460 {
3461         struct oob_data *data, *n;
3462
3463         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3464                 list_del(&data->list);
3465                 kfree(data);
3466         }
3467 }
3468
3469 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3470                             u8 *hash, u8 *randomizer)
3471 {
3472         struct oob_data *data;
3473
3474         data = hci_find_remote_oob_data(hdev, bdaddr);
3475         if (!data) {
3476                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3477                 if (!data)
3478                         return -ENOMEM;
3479
3480                 bacpy(&data->bdaddr, bdaddr);
3481                 list_add(&data->list, &hdev->remote_oob_data);
3482         }
3483
3484         memcpy(data->hash192, hash, sizeof(data->hash192));
3485         memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3486
3487         memset(data->hash256, 0, sizeof(data->hash256));
3488         memset(data->randomizer256, 0, sizeof(data->randomizer256));
3489
3490         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3491
3492         return 0;
3493 }
3494
3495 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3496                                 u8 *hash192, u8 *randomizer192,
3497                                 u8 *hash256, u8 *randomizer256)
3498 {
3499         struct oob_data *data;
3500
3501         data = hci_find_remote_oob_data(hdev, bdaddr);
3502         if (!data) {
3503                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3504                 if (!data)
3505                         return -ENOMEM;
3506
3507                 bacpy(&data->bdaddr, bdaddr);
3508                 list_add(&data->list, &hdev->remote_oob_data);
3509         }
3510
3511         memcpy(data->hash192, hash192, sizeof(data->hash192));
3512         memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3513
3514         memcpy(data->hash256, hash256, sizeof(data->hash256));
3515         memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3516
3517         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3518
3519         return 0;
3520 }
3521
3522 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3523                                          bdaddr_t *bdaddr, u8 type)
3524 {
3525         struct bdaddr_list *b;
3526
3527         list_for_each_entry(b, bdaddr_list, list) {
3528                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3529                         return b;
3530         }
3531
3532         return NULL;
3533 }
3534
3535 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3536 {
3537         struct list_head *p, *n;
3538
3539         list_for_each_safe(p, n, bdaddr_list) {
3540                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3541
3542                 list_del(p);
3543                 kfree(b);
3544         }
3545 }
3546
3547 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3548 {
3549         struct bdaddr_list *entry;
3550
3551         if (!bacmp(bdaddr, BDADDR_ANY))
3552                 return -EBADF;
3553
3554         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3555                 return -EEXIST;
3556
3557         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3558         if (!entry)
3559                 return -ENOMEM;
3560
3561         bacpy(&entry->bdaddr, bdaddr);
3562         entry->bdaddr_type = type;
3563
3564         list_add(&entry->list, list);
3565
3566         return 0;
3567 }
3568
3569 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3570 {
3571         struct bdaddr_list *entry;
3572
3573         if (!bacmp(bdaddr, BDADDR_ANY)) {
3574                 hci_bdaddr_list_clear(list);
3575                 return 0;
3576         }
3577
3578         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3579         if (!entry)
3580                 return -ENOENT;
3581
3582         list_del(&entry->list);
3583         kfree(entry);
3584
3585         return 0;
3586 }
3587
3588 /* This function requires the caller holds hdev->lock */
3589 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3590                                                bdaddr_t *addr, u8 addr_type)
3591 {
3592         struct hci_conn_params *params;
3593
3594         /* The conn params list only contains identity addresses */
3595         if (!hci_is_identity_address(addr, addr_type))
3596                 return NULL;
3597
3598         list_for_each_entry(params, &hdev->le_conn_params, list) {
3599                 if (bacmp(&params->addr, addr) == 0 &&
3600                     params->addr_type == addr_type) {
3601                         return params;
3602                 }
3603         }
3604
3605         return NULL;
3606 }
3607
3608 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3609 {
3610         struct hci_conn *conn;
3611
3612         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3613         if (!conn)
3614                 return false;
3615
3616         if (conn->dst_type != type)
3617                 return false;
3618
3619         if (conn->state != BT_CONNECTED)
3620                 return false;
3621
3622         return true;
3623 }
3624
3625 /* This function requires the caller holds hdev->lock */
3626 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3627                                                   bdaddr_t *addr, u8 addr_type)
3628 {
3629         struct hci_conn_params *param;
3630
3631         /* The list only contains identity addresses */
3632         if (!hci_is_identity_address(addr, addr_type))
3633                 return NULL;
3634
3635         list_for_each_entry(param, list, action) {
3636                 if (bacmp(&param->addr, addr) == 0 &&
3637                     param->addr_type == addr_type)
3638                         return param;
3639         }
3640
3641         return NULL;
3642 }
3643
3644 /* This function requires the caller holds hdev->lock */
3645 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3646                                             bdaddr_t *addr, u8 addr_type)
3647 {
3648         struct hci_conn_params *params;
3649
3650         if (!hci_is_identity_address(addr, addr_type))
3651                 return NULL;
3652
3653         params = hci_conn_params_lookup(hdev, addr, addr_type);
3654         if (params)
3655                 return params;
3656
3657         params = kzalloc(sizeof(*params), GFP_KERNEL);
3658         if (!params) {
3659                 BT_ERR("Out of memory");
3660                 return NULL;
3661         }
3662
3663         bacpy(&params->addr, addr);
3664         params->addr_type = addr_type;
3665
3666         list_add(&params->list, &hdev->le_conn_params);
3667         INIT_LIST_HEAD(&params->action);
3668
3669         params->conn_min_interval = hdev->le_conn_min_interval;
3670         params->conn_max_interval = hdev->le_conn_max_interval;
3671         params->conn_latency = hdev->le_conn_latency;
3672         params->supervision_timeout = hdev->le_supv_timeout;
3673         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3674
3675         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3676
3677         return params;
3678 }
3679
3680 /* This function requires the caller holds hdev->lock */
3681 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3682                         u8 auto_connect)
3683 {
3684         struct hci_conn_params *params;
3685
3686         params = hci_conn_params_add(hdev, addr, addr_type);
3687         if (!params)
3688                 return -EIO;
3689
3690         if (params->auto_connect == auto_connect)
3691                 return 0;
3692
3693         list_del_init(&params->action);
3694
3695         switch (auto_connect) {
3696         case HCI_AUTO_CONN_DISABLED:
3697         case HCI_AUTO_CONN_LINK_LOSS:
3698                 hci_update_background_scan(hdev);
3699                 break;
3700         case HCI_AUTO_CONN_REPORT:
3701                 list_add(&params->action, &hdev->pend_le_reports);
3702                 hci_update_background_scan(hdev);
3703                 break;
3704         case HCI_AUTO_CONN_DIRECT:
3705         case HCI_AUTO_CONN_ALWAYS:
3706                 if (!is_connected(hdev, addr, addr_type)) {
3707                         list_add(&params->action, &hdev->pend_le_conns);
3708                         hci_update_background_scan(hdev);
3709                 }
3710                 break;
3711         }
3712
3713         params->auto_connect = auto_connect;
3714
3715         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3716                auto_connect);
3717
3718         return 0;
3719 }
3720
3721 /* This function requires the caller holds hdev->lock */
3722 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3723 {
3724         struct hci_conn_params *params;
3725
3726         params = hci_conn_params_lookup(hdev, addr, addr_type);
3727         if (!params)
3728                 return;
3729
3730         list_del(&params->action);
3731         list_del(&params->list);
3732         kfree(params);
3733
3734         hci_update_background_scan(hdev);
3735
3736         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3737 }
3738
3739 /* This function requires the caller holds hdev->lock */
3740 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3741 {
3742         struct hci_conn_params *params, *tmp;
3743
3744         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3745                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3746                         continue;
3747                 list_del(&params->list);
3748                 kfree(params);
3749         }
3750
3751         BT_DBG("All LE disabled connection parameters were removed");
3752 }
3753
3754 /* This function requires the caller holds hdev->lock */
3755 void hci_conn_params_clear_all(struct hci_dev *hdev)
3756 {
3757         struct hci_conn_params *params, *tmp;
3758
3759         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3760                 list_del(&params->action);
3761                 list_del(&params->list);
3762                 kfree(params);
3763         }
3764
3765         hci_update_background_scan(hdev);
3766
3767         BT_DBG("All LE connection parameters were removed");
3768 }
3769
3770 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3771 {
3772         if (status) {
3773                 BT_ERR("Failed to start inquiry: status %d", status);
3774
3775                 hci_dev_lock(hdev);
3776                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3777                 hci_dev_unlock(hdev);
3778                 return;
3779         }
3780 }
3781
3782 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3783 {
3784         /* General inquiry access code (GIAC) */
3785         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3786         struct hci_request req;
3787         struct hci_cp_inquiry cp;
3788         int err;
3789
3790         if (status) {
3791                 BT_ERR("Failed to disable LE scanning: status %d", status);
3792                 return;
3793         }
3794
3795         switch (hdev->discovery.type) {
3796         case DISCOV_TYPE_LE:
3797                 hci_dev_lock(hdev);
3798                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3799                 hci_dev_unlock(hdev);
3800                 break;
3801
3802         case DISCOV_TYPE_INTERLEAVED:
3803                 hci_req_init(&req, hdev);
3804
3805                 memset(&cp, 0, sizeof(cp));
3806                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3807                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3808                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3809
3810                 hci_dev_lock(hdev);
3811
3812                 hci_inquiry_cache_flush(hdev);
3813
3814                 err = hci_req_run(&req, inquiry_complete);
3815                 if (err) {
3816                         BT_ERR("Inquiry request failed: err %d", err);
3817                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3818                 }
3819
3820                 hci_dev_unlock(hdev);
3821                 break;
3822         }
3823 }
3824
3825 static void le_scan_disable_work(struct work_struct *work)
3826 {
3827         struct hci_dev *hdev = container_of(work, struct hci_dev,
3828                                             le_scan_disable.work);
3829         struct hci_request req;
3830         int err;
3831
3832         BT_DBG("%s", hdev->name);
3833
3834         hci_req_init(&req, hdev);
3835
3836         hci_req_add_le_scan_disable(&req);
3837
3838         err = hci_req_run(&req, le_scan_disable_work_complete);
3839         if (err)
3840                 BT_ERR("Disable LE scanning request failed: err %d", err);
3841 }
3842
3843 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3844 {
3845         struct hci_dev *hdev = req->hdev;
3846
3847         /* If we're advertising or initiating an LE connection we can't
3848          * go ahead and change the random address at this time. This is
3849          * because the eventual initiator address used for the
3850          * subsequently created connection will be undefined (some
3851          * controllers use the new address and others the one we had
3852          * when the operation started).
3853          *
3854          * In this kind of scenario skip the update and let the random
3855          * address be updated at the next cycle.
3856          */
3857         if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3858             hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3859                 BT_DBG("Deferring random address update");
3860                 return;
3861         }
3862
3863         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3864 }
3865
3866 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3867                               u8 *own_addr_type)
3868 {
3869         struct hci_dev *hdev = req->hdev;
3870         int err;
3871
3872         /* If privacy is enabled use a resolvable private address. If
3873          * current RPA has expired or there is something else than
3874          * the current RPA in use, then generate a new one.
3875          */
3876         if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3877                 int to;
3878
3879                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3880
3881                 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3882                     !bacmp(&hdev->random_addr, &hdev->rpa))
3883                         return 0;
3884
3885                 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3886                 if (err < 0) {
3887                         BT_ERR("%s failed to generate new RPA", hdev->name);
3888                         return err;
3889                 }
3890
3891                 set_random_addr(req, &hdev->rpa);
3892
3893                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3894                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3895
3896                 return 0;
3897         }
3898
3899         /* In case of required privacy without resolvable private address,
3900          * use an unresolvable private address. This is useful for active
3901          * scanning and non-connectable advertising.
3902          */
3903         if (require_privacy) {
3904                 bdaddr_t urpa;
3905
3906                 get_random_bytes(&urpa, 6);
3907                 urpa.b[5] &= 0x3f;      /* Clear two most significant bits */
3908
3909                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3910                 set_random_addr(req, &urpa);
3911                 return 0;
3912         }
3913
3914         /* If forcing static address is in use or there is no public
3915          * address use the static address as random address (but skip
3916          * the HCI command if the current random address is already the
3917          * static one.
3918          */
3919         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3920             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3921                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3922                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3923                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3924                                     &hdev->static_addr);
3925                 return 0;
3926         }
3927
3928         /* Neither privacy nor static address is being used so use a
3929          * public address.
3930          */
3931         *own_addr_type = ADDR_LE_DEV_PUBLIC;
3932
3933         return 0;
3934 }
3935
3936 /* Copy the Identity Address of the controller.
3937  *
3938  * If the controller has a public BD_ADDR, then by default use that one.
3939  * If this is a LE only controller without a public address, default to
3940  * the static random address.
3941  *
3942  * For debugging purposes it is possible to force controllers with a
3943  * public address to use the static random address instead.
3944  */
3945 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3946                                u8 *bdaddr_type)
3947 {
3948         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3949             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3950                 bacpy(bdaddr, &hdev->static_addr);
3951                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3952         } else {
3953                 bacpy(bdaddr, &hdev->bdaddr);
3954                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3955         }
3956 }
3957
3958 /* Alloc HCI device */
3959 struct hci_dev *hci_alloc_dev(void)
3960 {
3961         struct hci_dev *hdev;
3962
3963         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3964         if (!hdev)
3965                 return NULL;
3966
3967         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3968         hdev->esco_type = (ESCO_HV1);
3969         hdev->link_mode = (HCI_LM_ACCEPT);
3970         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3971         hdev->io_capability = 0x03;     /* No Input No Output */
3972         hdev->manufacturer = 0xffff;    /* Default to internal use */
3973         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3974         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3975
3976         hdev->sniff_max_interval = 800;
3977         hdev->sniff_min_interval = 80;
3978
3979         hdev->le_adv_channel_map = 0x07;
3980         hdev->le_adv_min_interval = 0x0800;
3981         hdev->le_adv_max_interval = 0x0800;
3982         hdev->le_scan_interval = 0x0060;
3983         hdev->le_scan_window = 0x0030;
3984         hdev->le_conn_min_interval = 0x0028;
3985         hdev->le_conn_max_interval = 0x0038;
3986         hdev->le_conn_latency = 0x0000;
3987         hdev->le_supv_timeout = 0x002a;
3988
3989         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3990         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3991         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3992         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3993
3994         mutex_init(&hdev->lock);
3995         mutex_init(&hdev->req_lock);
3996
3997         INIT_LIST_HEAD(&hdev->mgmt_pending);
3998         INIT_LIST_HEAD(&hdev->blacklist);
3999         INIT_LIST_HEAD(&hdev->whitelist);
4000         INIT_LIST_HEAD(&hdev->uuids);
4001         INIT_LIST_HEAD(&hdev->link_keys);
4002         INIT_LIST_HEAD(&hdev->long_term_keys);
4003         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
4004         INIT_LIST_HEAD(&hdev->remote_oob_data);
4005         INIT_LIST_HEAD(&hdev->le_white_list);
4006         INIT_LIST_HEAD(&hdev->le_conn_params);
4007         INIT_LIST_HEAD(&hdev->pend_le_conns);
4008         INIT_LIST_HEAD(&hdev->pend_le_reports);
4009         INIT_LIST_HEAD(&hdev->conn_hash.list);
4010
4011         INIT_WORK(&hdev->rx_work, hci_rx_work);
4012         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4013         INIT_WORK(&hdev->tx_work, hci_tx_work);
4014         INIT_WORK(&hdev->power_on, hci_power_on);
4015
4016         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4017         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4018         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4019
4020         skb_queue_head_init(&hdev->rx_q);
4021         skb_queue_head_init(&hdev->cmd_q);
4022         skb_queue_head_init(&hdev->raw_q);
4023
4024         init_waitqueue_head(&hdev->req_wait_q);
4025
4026         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
4027
4028         hci_init_sysfs(hdev);
4029         discovery_init(hdev);
4030
4031         return hdev;
4032 }
4033 EXPORT_SYMBOL(hci_alloc_dev);
4034
4035 /* Free HCI device */
4036 void hci_free_dev(struct hci_dev *hdev)
4037 {
4038         /* will free via device release */
4039         put_device(&hdev->dev);
4040 }
4041 EXPORT_SYMBOL(hci_free_dev);
4042
4043 /* Register HCI device */
4044 int hci_register_dev(struct hci_dev *hdev)
4045 {
4046         int id, error;
4047
4048         if (!hdev->open || !hdev->close || !hdev->send)
4049                 return -EINVAL;
4050
4051         /* Do not allow HCI_AMP devices to register at index 0,
4052          * so the index can be used as the AMP controller ID.
4053          */
4054         switch (hdev->dev_type) {
4055         case HCI_BREDR:
4056                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4057                 break;
4058         case HCI_AMP:
4059                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4060                 break;
4061         default:
4062                 return -EINVAL;
4063         }
4064
4065         if (id < 0)
4066                 return id;
4067
4068         sprintf(hdev->name, "hci%d", id);
4069         hdev->id = id;
4070
4071         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4072
4073         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4074                                           WQ_MEM_RECLAIM, 1, hdev->name);
4075         if (!hdev->workqueue) {
4076                 error = -ENOMEM;
4077                 goto err;
4078         }
4079
4080         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4081                                               WQ_MEM_RECLAIM, 1, hdev->name);
4082         if (!hdev->req_workqueue) {
4083                 destroy_workqueue(hdev->workqueue);
4084                 error = -ENOMEM;
4085                 goto err;
4086         }
4087
4088         if (!IS_ERR_OR_NULL(bt_debugfs))
4089                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4090
4091         dev_set_name(&hdev->dev, "%s", hdev->name);
4092
4093         hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
4094                                                CRYPTO_ALG_ASYNC);
4095         if (IS_ERR(hdev->tfm_aes)) {
4096                 BT_ERR("Unable to create crypto context");
4097                 error = PTR_ERR(hdev->tfm_aes);
4098                 hdev->tfm_aes = NULL;
4099                 goto err_wqueue;
4100         }
4101
4102         error = device_add(&hdev->dev);
4103         if (error < 0)
4104                 goto err_tfm;
4105
4106         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4107                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4108                                     hdev);
4109         if (hdev->rfkill) {
4110                 if (rfkill_register(hdev->rfkill) < 0) {
4111                         rfkill_destroy(hdev->rfkill);
4112                         hdev->rfkill = NULL;
4113                 }
4114         }
4115
4116         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4117                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4118
4119         set_bit(HCI_SETUP, &hdev->dev_flags);
4120         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4121
4122         if (hdev->dev_type == HCI_BREDR) {
4123                 /* Assume BR/EDR support until proven otherwise (such as
4124                  * through reading supported features during init.
4125                  */
4126                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4127         }
4128
4129         write_lock(&hci_dev_list_lock);
4130         list_add(&hdev->list, &hci_dev_list);
4131         write_unlock(&hci_dev_list_lock);
4132
4133         /* Devices that are marked for raw-only usage are unconfigured
4134          * and should not be included in normal operation.
4135          */
4136         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4137                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4138
4139         hci_notify(hdev, HCI_DEV_REG);
4140         hci_dev_hold(hdev);
4141
4142         queue_work(hdev->req_workqueue, &hdev->power_on);
4143
4144         return id;
4145
4146 err_tfm:
4147         crypto_free_blkcipher(hdev->tfm_aes);
4148 err_wqueue:
4149         destroy_workqueue(hdev->workqueue);
4150         destroy_workqueue(hdev->req_workqueue);
4151 err:
4152         ida_simple_remove(&hci_index_ida, hdev->id);
4153
4154         return error;
4155 }
4156 EXPORT_SYMBOL(hci_register_dev);
4157
4158 /* Unregister HCI device */
4159 void hci_unregister_dev(struct hci_dev *hdev)
4160 {
4161         int i, id;
4162
4163         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4164
4165         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4166
4167         id = hdev->id;
4168
4169         write_lock(&hci_dev_list_lock);
4170         list_del(&hdev->list);
4171         write_unlock(&hci_dev_list_lock);
4172
4173         hci_dev_do_close(hdev);
4174
4175         for (i = 0; i < NUM_REASSEMBLY; i++)
4176                 kfree_skb(hdev->reassembly[i]);
4177
4178         cancel_work_sync(&hdev->power_on);
4179
4180         if (!test_bit(HCI_INIT, &hdev->flags) &&
4181             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4182             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4183                 hci_dev_lock(hdev);
4184                 mgmt_index_removed(hdev);
4185                 hci_dev_unlock(hdev);
4186         }
4187
4188         /* mgmt_index_removed should take care of emptying the
4189          * pending list */
4190         BUG_ON(!list_empty(&hdev->mgmt_pending));
4191
4192         hci_notify(hdev, HCI_DEV_UNREG);
4193
4194         if (hdev->rfkill) {
4195                 rfkill_unregister(hdev->rfkill);
4196                 rfkill_destroy(hdev->rfkill);
4197         }
4198
4199         if (hdev->tfm_aes)
4200                 crypto_free_blkcipher(hdev->tfm_aes);
4201
4202         device_del(&hdev->dev);
4203
4204         debugfs_remove_recursive(hdev->debugfs);
4205
4206         destroy_workqueue(hdev->workqueue);
4207         destroy_workqueue(hdev->req_workqueue);
4208
4209         hci_dev_lock(hdev);
4210         hci_bdaddr_list_clear(&hdev->blacklist);
4211         hci_bdaddr_list_clear(&hdev->whitelist);
4212         hci_uuids_clear(hdev);
4213         hci_link_keys_clear(hdev);
4214         hci_smp_ltks_clear(hdev);
4215         hci_smp_irks_clear(hdev);
4216         hci_remote_oob_data_clear(hdev);
4217         hci_bdaddr_list_clear(&hdev->le_white_list);
4218         hci_conn_params_clear_all(hdev);
4219         hci_dev_unlock(hdev);
4220
4221         hci_dev_put(hdev);
4222
4223         ida_simple_remove(&hci_index_ida, id);
4224 }
4225 EXPORT_SYMBOL(hci_unregister_dev);
4226
4227 /* Suspend HCI device */
4228 int hci_suspend_dev(struct hci_dev *hdev)
4229 {
4230         hci_notify(hdev, HCI_DEV_SUSPEND);
4231         return 0;
4232 }
4233 EXPORT_SYMBOL(hci_suspend_dev);
4234
4235 /* Resume HCI device */
4236 int hci_resume_dev(struct hci_dev *hdev)
4237 {
4238         hci_notify(hdev, HCI_DEV_RESUME);
4239         return 0;
4240 }
4241 EXPORT_SYMBOL(hci_resume_dev);
4242
4243 /* Receive frame from HCI drivers */
4244 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4245 {
4246         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4247                       && !test_bit(HCI_INIT, &hdev->flags))) {
4248                 kfree_skb(skb);
4249                 return -ENXIO;
4250         }
4251
4252         /* Incoming skb */
4253         bt_cb(skb)->incoming = 1;
4254
4255         /* Time stamp */
4256         __net_timestamp(skb);
4257
4258         skb_queue_tail(&hdev->rx_q, skb);
4259         queue_work(hdev->workqueue, &hdev->rx_work);
4260
4261         return 0;
4262 }
4263 EXPORT_SYMBOL(hci_recv_frame);
4264
4265 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4266                           int count, __u8 index)
4267 {
4268         int len = 0;
4269         int hlen = 0;
4270         int remain = count;
4271         struct sk_buff *skb;
4272         struct bt_skb_cb *scb;
4273
4274         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4275             index >= NUM_REASSEMBLY)
4276                 return -EILSEQ;
4277
4278         skb = hdev->reassembly[index];
4279
4280         if (!skb) {
4281                 switch (type) {
4282                 case HCI_ACLDATA_PKT:
4283                         len = HCI_MAX_FRAME_SIZE;
4284                         hlen = HCI_ACL_HDR_SIZE;
4285                         break;
4286                 case HCI_EVENT_PKT:
4287                         len = HCI_MAX_EVENT_SIZE;
4288                         hlen = HCI_EVENT_HDR_SIZE;
4289                         break;
4290                 case HCI_SCODATA_PKT:
4291                         len = HCI_MAX_SCO_SIZE;
4292                         hlen = HCI_SCO_HDR_SIZE;
4293                         break;
4294                 }
4295
4296                 skb = bt_skb_alloc(len, GFP_ATOMIC);
4297                 if (!skb)
4298                         return -ENOMEM;
4299
4300                 scb = (void *) skb->cb;
4301                 scb->expect = hlen;
4302                 scb->pkt_type = type;
4303
4304                 hdev->reassembly[index] = skb;
4305         }
4306
4307         while (count) {
4308                 scb = (void *) skb->cb;
4309                 len = min_t(uint, scb->expect, count);
4310
4311                 memcpy(skb_put(skb, len), data, len);
4312
4313                 count -= len;
4314                 data += len;
4315                 scb->expect -= len;
4316                 remain = count;
4317
4318                 switch (type) {
4319                 case HCI_EVENT_PKT:
4320                         if (skb->len == HCI_EVENT_HDR_SIZE) {
4321                                 struct hci_event_hdr *h = hci_event_hdr(skb);
4322                                 scb->expect = h->plen;
4323
4324                                 if (skb_tailroom(skb) < scb->expect) {
4325                                         kfree_skb(skb);
4326                                         hdev->reassembly[index] = NULL;
4327                                         return -ENOMEM;
4328                                 }
4329                         }
4330                         break;
4331
4332                 case HCI_ACLDATA_PKT:
4333                         if (skb->len  == HCI_ACL_HDR_SIZE) {
4334                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4335                                 scb->expect = __le16_to_cpu(h->dlen);
4336
4337                                 if (skb_tailroom(skb) < scb->expect) {
4338                                         kfree_skb(skb);
4339                                         hdev->reassembly[index] = NULL;
4340                                         return -ENOMEM;
4341                                 }
4342                         }
4343                         break;
4344
4345                 case HCI_SCODATA_PKT:
4346                         if (skb->len == HCI_SCO_HDR_SIZE) {
4347                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4348                                 scb->expect = h->dlen;
4349
4350                                 if (skb_tailroom(skb) < scb->expect) {
4351                                         kfree_skb(skb);
4352                                         hdev->reassembly[index] = NULL;
4353                                         return -ENOMEM;
4354                                 }
4355                         }
4356                         break;
4357                 }
4358
4359                 if (scb->expect == 0) {
4360                         /* Complete frame */
4361
4362                         bt_cb(skb)->pkt_type = type;
4363                         hci_recv_frame(hdev, skb);
4364
4365                         hdev->reassembly[index] = NULL;
4366                         return remain;
4367                 }
4368         }
4369
4370         return remain;
4371 }
4372
4373 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4374 {
4375         int rem = 0;
4376
4377         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4378                 return -EILSEQ;
4379
4380         while (count) {
4381                 rem = hci_reassembly(hdev, type, data, count, type - 1);
4382                 if (rem < 0)
4383                         return rem;
4384
4385                 data += (count - rem);
4386                 count = rem;
4387         }
4388
4389         return rem;
4390 }
4391 EXPORT_SYMBOL(hci_recv_fragment);
4392
4393 #define STREAM_REASSEMBLY 0
4394
4395 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4396 {
4397         int type;
4398         int rem = 0;
4399
4400         while (count) {
4401                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4402
4403                 if (!skb) {
4404                         struct { char type; } *pkt;
4405
4406                         /* Start of the frame */
4407                         pkt = data;
4408                         type = pkt->type;
4409
4410                         data++;
4411                         count--;
4412                 } else
4413                         type = bt_cb(skb)->pkt_type;
4414
4415                 rem = hci_reassembly(hdev, type, data, count,
4416                                      STREAM_REASSEMBLY);
4417                 if (rem < 0)
4418                         return rem;
4419
4420                 data += (count - rem);
4421                 count = rem;
4422         }
4423
4424         return rem;
4425 }
4426 EXPORT_SYMBOL(hci_recv_stream_fragment);
4427
4428 /* ---- Interface to upper protocols ---- */
4429
4430 int hci_register_cb(struct hci_cb *cb)
4431 {
4432         BT_DBG("%p name %s", cb, cb->name);
4433
4434         write_lock(&hci_cb_list_lock);
4435         list_add(&cb->list, &hci_cb_list);
4436         write_unlock(&hci_cb_list_lock);
4437
4438         return 0;
4439 }
4440 EXPORT_SYMBOL(hci_register_cb);
4441
4442 int hci_unregister_cb(struct hci_cb *cb)
4443 {
4444         BT_DBG("%p name %s", cb, cb->name);
4445
4446         write_lock(&hci_cb_list_lock);
4447         list_del(&cb->list);
4448         write_unlock(&hci_cb_list_lock);
4449
4450         return 0;
4451 }
4452 EXPORT_SYMBOL(hci_unregister_cb);
4453
4454 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4455 {
4456         int err;
4457
4458         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4459
4460         /* Time stamp */
4461         __net_timestamp(skb);
4462
4463         /* Send copy to monitor */
4464         hci_send_to_monitor(hdev, skb);
4465
4466         if (atomic_read(&hdev->promisc)) {
4467                 /* Send copy to the sockets */
4468                 hci_send_to_sock(hdev, skb);
4469         }
4470
4471         /* Get rid of skb owner, prior to sending to the driver. */
4472         skb_orphan(skb);
4473
4474         err = hdev->send(hdev, skb);
4475         if (err < 0) {
4476                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4477                 kfree_skb(skb);
4478         }
4479 }
4480
4481 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4482 {
4483         skb_queue_head_init(&req->cmd_q);
4484         req->hdev = hdev;
4485         req->err = 0;
4486 }
4487
4488 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4489 {
4490         struct hci_dev *hdev = req->hdev;
4491         struct sk_buff *skb;
4492         unsigned long flags;
4493
4494         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4495
4496         /* If an error occured during request building, remove all HCI
4497          * commands queued on the HCI request queue.
4498          */
4499         if (req->err) {
4500                 skb_queue_purge(&req->cmd_q);
4501                 return req->err;
4502         }
4503
4504         /* Do not allow empty requests */
4505         if (skb_queue_empty(&req->cmd_q))
4506                 return -ENODATA;
4507
4508         skb = skb_peek_tail(&req->cmd_q);
4509         bt_cb(skb)->req.complete = complete;
4510
4511         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4512         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4513         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4514
4515         queue_work(hdev->workqueue, &hdev->cmd_work);
4516
4517         return 0;
4518 }
4519
4520 bool hci_req_pending(struct hci_dev *hdev)
4521 {
4522         return (hdev->req_status == HCI_REQ_PEND);
4523 }
4524
4525 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4526                                        u32 plen, const void *param)
4527 {
4528         int len = HCI_COMMAND_HDR_SIZE + plen;
4529         struct hci_command_hdr *hdr;
4530         struct sk_buff *skb;
4531
4532         skb = bt_skb_alloc(len, GFP_ATOMIC);
4533         if (!skb)
4534                 return NULL;
4535
4536         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4537         hdr->opcode = cpu_to_le16(opcode);
4538         hdr->plen   = plen;
4539
4540         if (plen)
4541                 memcpy(skb_put(skb, plen), param, plen);
4542
4543         BT_DBG("skb len %d", skb->len);
4544
4545         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4546
4547         return skb;
4548 }
4549
4550 /* Send HCI command */
4551 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4552                  const void *param)
4553 {
4554         struct sk_buff *skb;
4555
4556         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4557
4558         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4559         if (!skb) {
4560                 BT_ERR("%s no memory for command", hdev->name);
4561                 return -ENOMEM;
4562         }
4563
4564         /* Stand-alone HCI commands must be flaged as
4565          * single-command requests.
4566          */
4567         bt_cb(skb)->req.start = true;
4568
4569         skb_queue_tail(&hdev->cmd_q, skb);
4570         queue_work(hdev->workqueue, &hdev->cmd_work);
4571
4572         return 0;
4573 }
4574
4575 /* Queue a command to an asynchronous HCI request */
4576 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4577                     const void *param, u8 event)
4578 {
4579         struct hci_dev *hdev = req->hdev;
4580         struct sk_buff *skb;
4581
4582         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4583
4584         /* If an error occured during request building, there is no point in
4585          * queueing the HCI command. We can simply return.
4586          */
4587         if (req->err)
4588                 return;
4589
4590         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4591         if (!skb) {
4592                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4593                        hdev->name, opcode);
4594                 req->err = -ENOMEM;
4595                 return;
4596         }
4597
4598         if (skb_queue_empty(&req->cmd_q))
4599                 bt_cb(skb)->req.start = true;
4600
4601         bt_cb(skb)->req.event = event;
4602
4603         skb_queue_tail(&req->cmd_q, skb);
4604 }
4605
4606 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4607                  const void *param)
4608 {
4609         hci_req_add_ev(req, opcode, plen, param, 0);
4610 }
4611
4612 /* Get data from the previously sent command */
4613 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4614 {
4615         struct hci_command_hdr *hdr;
4616
4617         if (!hdev->sent_cmd)
4618                 return NULL;
4619
4620         hdr = (void *) hdev->sent_cmd->data;
4621
4622         if (hdr->opcode != cpu_to_le16(opcode))
4623                 return NULL;
4624
4625         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4626
4627         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4628 }
4629
4630 /* Send ACL data */
4631 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4632 {
4633         struct hci_acl_hdr *hdr;
4634         int len = skb->len;
4635
4636         skb_push(skb, HCI_ACL_HDR_SIZE);
4637         skb_reset_transport_header(skb);
4638         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4639         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4640         hdr->dlen   = cpu_to_le16(len);
4641 }
4642
4643 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4644                           struct sk_buff *skb, __u16 flags)
4645 {
4646         struct hci_conn *conn = chan->conn;
4647         struct hci_dev *hdev = conn->hdev;
4648         struct sk_buff *list;
4649
4650         skb->len = skb_headlen(skb);
4651         skb->data_len = 0;
4652
4653         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4654
4655         switch (hdev->dev_type) {
4656         case HCI_BREDR:
4657                 hci_add_acl_hdr(skb, conn->handle, flags);
4658                 break;
4659         case HCI_AMP:
4660                 hci_add_acl_hdr(skb, chan->handle, flags);
4661                 break;
4662         default:
4663                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4664                 return;
4665         }
4666
4667         list = skb_shinfo(skb)->frag_list;
4668         if (!list) {
4669                 /* Non fragmented */
4670                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4671
4672                 skb_queue_tail(queue, skb);
4673         } else {
4674                 /* Fragmented */
4675                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4676
4677                 skb_shinfo(skb)->frag_list = NULL;
4678
4679                 /* Queue all fragments atomically */
4680                 spin_lock(&queue->lock);
4681
4682                 __skb_queue_tail(queue, skb);
4683
4684                 flags &= ~ACL_START;
4685                 flags |= ACL_CONT;
4686                 do {
4687                         skb = list; list = list->next;
4688
4689                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4690                         hci_add_acl_hdr(skb, conn->handle, flags);
4691
4692                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4693
4694                         __skb_queue_tail(queue, skb);
4695                 } while (list);
4696
4697                 spin_unlock(&queue->lock);
4698         }
4699 }
4700
4701 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4702 {
4703         struct hci_dev *hdev = chan->conn->hdev;
4704
4705         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4706
4707         hci_queue_acl(chan, &chan->data_q, skb, flags);
4708
4709         queue_work(hdev->workqueue, &hdev->tx_work);
4710 }
4711
4712 /* Send SCO data */
4713 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4714 {
4715         struct hci_dev *hdev = conn->hdev;
4716         struct hci_sco_hdr hdr;
4717
4718         BT_DBG("%s len %d", hdev->name, skb->len);
4719
4720         hdr.handle = cpu_to_le16(conn->handle);
4721         hdr.dlen   = skb->len;
4722
4723         skb_push(skb, HCI_SCO_HDR_SIZE);
4724         skb_reset_transport_header(skb);
4725         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4726
4727         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4728
4729         skb_queue_tail(&conn->data_q, skb);
4730         queue_work(hdev->workqueue, &hdev->tx_work);
4731 }
4732
4733 /* ---- HCI TX task (outgoing data) ---- */
4734
4735 /* HCI Connection scheduler */
4736 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4737                                      int *quote)
4738 {
4739         struct hci_conn_hash *h = &hdev->conn_hash;
4740         struct hci_conn *conn = NULL, *c;
4741         unsigned int num = 0, min = ~0;
4742
4743         /* We don't have to lock device here. Connections are always
4744          * added and removed with TX task disabled. */
4745
4746         rcu_read_lock();
4747
4748         list_for_each_entry_rcu(c, &h->list, list) {
4749                 if (c->type != type || skb_queue_empty(&c->data_q))
4750                         continue;
4751
4752                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4753                         continue;
4754
4755                 num++;
4756
4757                 if (c->sent < min) {
4758                         min  = c->sent;
4759                         conn = c;
4760                 }
4761
4762                 if (hci_conn_num(hdev, type) == num)
4763                         break;
4764         }
4765
4766         rcu_read_unlock();
4767
4768         if (conn) {
4769                 int cnt, q;
4770
4771                 switch (conn->type) {
4772                 case ACL_LINK:
4773                         cnt = hdev->acl_cnt;
4774                         break;
4775                 case SCO_LINK:
4776                 case ESCO_LINK:
4777                         cnt = hdev->sco_cnt;
4778                         break;
4779                 case LE_LINK:
4780                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4781                         break;
4782                 default:
4783                         cnt = 0;
4784                         BT_ERR("Unknown link type");
4785                 }
4786
4787                 q = cnt / num;
4788                 *quote = q ? q : 1;
4789         } else
4790                 *quote = 0;
4791
4792         BT_DBG("conn %p quote %d", conn, *quote);
4793         return conn;
4794 }
4795
4796 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4797 {
4798         struct hci_conn_hash *h = &hdev->conn_hash;
4799         struct hci_conn *c;
4800
4801         BT_ERR("%s link tx timeout", hdev->name);
4802
4803         rcu_read_lock();
4804
4805         /* Kill stalled connections */
4806         list_for_each_entry_rcu(c, &h->list, list) {
4807                 if (c->type == type && c->sent) {
4808                         BT_ERR("%s killing stalled connection %pMR",
4809                                hdev->name, &c->dst);
4810                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4811                 }
4812         }
4813
4814         rcu_read_unlock();
4815 }
4816
4817 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4818                                       int *quote)
4819 {
4820         struct hci_conn_hash *h = &hdev->conn_hash;
4821         struct hci_chan *chan = NULL;
4822         unsigned int num = 0, min = ~0, cur_prio = 0;
4823         struct hci_conn *conn;
4824         int cnt, q, conn_num = 0;
4825
4826         BT_DBG("%s", hdev->name);
4827
4828         rcu_read_lock();
4829
4830         list_for_each_entry_rcu(conn, &h->list, list) {
4831                 struct hci_chan *tmp;
4832
4833                 if (conn->type != type)
4834                         continue;
4835
4836                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4837                         continue;
4838
4839                 conn_num++;
4840
4841                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4842                         struct sk_buff *skb;
4843
4844                         if (skb_queue_empty(&tmp->data_q))
4845                                 continue;
4846
4847                         skb = skb_peek(&tmp->data_q);
4848                         if (skb->priority < cur_prio)
4849                                 continue;
4850
4851                         if (skb->priority > cur_prio) {
4852                                 num = 0;
4853                                 min = ~0;
4854                                 cur_prio = skb->priority;
4855                         }
4856
4857                         num++;
4858
4859                         if (conn->sent < min) {
4860                                 min  = conn->sent;
4861                                 chan = tmp;
4862                         }
4863                 }
4864
4865                 if (hci_conn_num(hdev, type) == conn_num)
4866                         break;
4867         }
4868
4869         rcu_read_unlock();
4870
4871         if (!chan)
4872                 return NULL;
4873
4874         switch (chan->conn->type) {
4875         case ACL_LINK:
4876                 cnt = hdev->acl_cnt;
4877                 break;
4878         case AMP_LINK:
4879                 cnt = hdev->block_cnt;
4880                 break;
4881         case SCO_LINK:
4882         case ESCO_LINK:
4883                 cnt = hdev->sco_cnt;
4884                 break;
4885         case LE_LINK:
4886                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4887                 break;
4888         default:
4889                 cnt = 0;
4890                 BT_ERR("Unknown link type");
4891         }
4892
4893         q = cnt / num;
4894         *quote = q ? q : 1;
4895         BT_DBG("chan %p quote %d", chan, *quote);
4896         return chan;
4897 }
4898
4899 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4900 {
4901         struct hci_conn_hash *h = &hdev->conn_hash;
4902         struct hci_conn *conn;
4903         int num = 0;
4904
4905         BT_DBG("%s", hdev->name);
4906
4907         rcu_read_lock();
4908
4909         list_for_each_entry_rcu(conn, &h->list, list) {
4910                 struct hci_chan *chan;
4911
4912                 if (conn->type != type)
4913                         continue;
4914
4915                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4916                         continue;
4917
4918                 num++;
4919
4920                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4921                         struct sk_buff *skb;
4922
4923                         if (chan->sent) {
4924                                 chan->sent = 0;
4925                                 continue;
4926                         }
4927
4928                         if (skb_queue_empty(&chan->data_q))
4929                                 continue;
4930
4931                         skb = skb_peek(&chan->data_q);
4932                         if (skb->priority >= HCI_PRIO_MAX - 1)
4933                                 continue;
4934
4935                         skb->priority = HCI_PRIO_MAX - 1;
4936
4937                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4938                                skb->priority);
4939                 }
4940
4941                 if (hci_conn_num(hdev, type) == num)
4942                         break;
4943         }
4944
4945         rcu_read_unlock();
4946
4947 }
4948
4949 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4950 {
4951         /* Calculate count of blocks used by this packet */
4952         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4953 }
4954
4955 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4956 {
4957         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4958                 /* ACL tx timeout must be longer than maximum
4959                  * link supervision timeout (40.9 seconds) */
4960                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4961                                        HCI_ACL_TX_TIMEOUT))
4962                         hci_link_tx_to(hdev, ACL_LINK);
4963         }
4964 }
4965
4966 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4967 {
4968         unsigned int cnt = hdev->acl_cnt;
4969         struct hci_chan *chan;
4970         struct sk_buff *skb;
4971         int quote;
4972
4973         __check_timeout(hdev, cnt);
4974
4975         while (hdev->acl_cnt &&
4976                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4977                 u32 priority = (skb_peek(&chan->data_q))->priority;
4978                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4979                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4980                                skb->len, skb->priority);
4981
4982                         /* Stop if priority has changed */
4983                         if (skb->priority < priority)
4984                                 break;
4985
4986                         skb = skb_dequeue(&chan->data_q);
4987
4988                         hci_conn_enter_active_mode(chan->conn,
4989                                                    bt_cb(skb)->force_active);
4990
4991                         hci_send_frame(hdev, skb);
4992                         hdev->acl_last_tx = jiffies;
4993
4994                         hdev->acl_cnt--;
4995                         chan->sent++;
4996                         chan->conn->sent++;
4997                 }
4998         }
4999
5000         if (cnt != hdev->acl_cnt)
5001                 hci_prio_recalculate(hdev, ACL_LINK);
5002 }
5003
5004 static void hci_sched_acl_blk(struct hci_dev *hdev)
5005 {
5006         unsigned int cnt = hdev->block_cnt;
5007         struct hci_chan *chan;
5008         struct sk_buff *skb;
5009         int quote;
5010         u8 type;
5011
5012         __check_timeout(hdev, cnt);
5013
5014         BT_DBG("%s", hdev->name);
5015
5016         if (hdev->dev_type == HCI_AMP)
5017                 type = AMP_LINK;
5018         else
5019                 type = ACL_LINK;
5020
5021         while (hdev->block_cnt > 0 &&
5022                (chan = hci_chan_sent(hdev, type, &quote))) {
5023                 u32 priority = (skb_peek(&chan->data_q))->priority;
5024                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5025                         int blocks;
5026
5027                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5028                                skb->len, skb->priority);
5029
5030                         /* Stop if priority has changed */
5031                         if (skb->priority < priority)
5032                                 break;
5033
5034                         skb = skb_dequeue(&chan->data_q);
5035
5036                         blocks = __get_blocks(hdev, skb);
5037                         if (blocks > hdev->block_cnt)
5038                                 return;
5039
5040                         hci_conn_enter_active_mode(chan->conn,
5041                                                    bt_cb(skb)->force_active);
5042
5043                         hci_send_frame(hdev, skb);
5044                         hdev->acl_last_tx = jiffies;
5045
5046                         hdev->block_cnt -= blocks;
5047                         quote -= blocks;
5048
5049                         chan->sent += blocks;
5050                         chan->conn->sent += blocks;
5051                 }
5052         }
5053
5054         if (cnt != hdev->block_cnt)
5055                 hci_prio_recalculate(hdev, type);
5056 }
5057
5058 static void hci_sched_acl(struct hci_dev *hdev)
5059 {
5060         BT_DBG("%s", hdev->name);
5061
5062         /* No ACL link over BR/EDR controller */
5063         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5064                 return;
5065
5066         /* No AMP link over AMP controller */
5067         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
5068                 return;
5069
5070         switch (hdev->flow_ctl_mode) {
5071         case HCI_FLOW_CTL_MODE_PACKET_BASED:
5072                 hci_sched_acl_pkt(hdev);
5073                 break;
5074
5075         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5076                 hci_sched_acl_blk(hdev);
5077                 break;
5078         }
5079 }
5080
5081 /* Schedule SCO */
5082 static void hci_sched_sco(struct hci_dev *hdev)
5083 {
5084         struct hci_conn *conn;
5085         struct sk_buff *skb;
5086         int quote;
5087
5088         BT_DBG("%s", hdev->name);
5089
5090         if (!hci_conn_num(hdev, SCO_LINK))
5091                 return;
5092
5093         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5094                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5095                         BT_DBG("skb %p len %d", skb, skb->len);
5096                         hci_send_frame(hdev, skb);
5097
5098                         conn->sent++;
5099                         if (conn->sent == ~0)
5100                                 conn->sent = 0;
5101                 }
5102         }
5103 }
5104
5105 static void hci_sched_esco(struct hci_dev *hdev)
5106 {
5107         struct hci_conn *conn;
5108         struct sk_buff *skb;
5109         int quote;
5110
5111         BT_DBG("%s", hdev->name);
5112
5113         if (!hci_conn_num(hdev, ESCO_LINK))
5114                 return;
5115
5116         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5117                                                      &quote))) {
5118                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5119                         BT_DBG("skb %p len %d", skb, skb->len);
5120                         hci_send_frame(hdev, skb);
5121
5122                         conn->sent++;
5123                         if (conn->sent == ~0)
5124                                 conn->sent = 0;
5125                 }
5126         }
5127 }
5128
5129 static void hci_sched_le(struct hci_dev *hdev)
5130 {
5131         struct hci_chan *chan;
5132         struct sk_buff *skb;
5133         int quote, cnt, tmp;
5134
5135         BT_DBG("%s", hdev->name);
5136
5137         if (!hci_conn_num(hdev, LE_LINK))
5138                 return;
5139
5140         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5141                 /* LE tx timeout must be longer than maximum
5142                  * link supervision timeout (40.9 seconds) */
5143                 if (!hdev->le_cnt && hdev->le_pkts &&
5144                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
5145                         hci_link_tx_to(hdev, LE_LINK);
5146         }
5147
5148         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5149         tmp = cnt;
5150         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
5151                 u32 priority = (skb_peek(&chan->data_q))->priority;
5152                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5153                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5154                                skb->len, skb->priority);
5155
5156                         /* Stop if priority has changed */
5157                         if (skb->priority < priority)
5158                                 break;
5159
5160                         skb = skb_dequeue(&chan->data_q);
5161
5162                         hci_send_frame(hdev, skb);
5163                         hdev->le_last_tx = jiffies;
5164
5165                         cnt--;
5166                         chan->sent++;
5167                         chan->conn->sent++;
5168                 }
5169         }
5170
5171         if (hdev->le_pkts)
5172                 hdev->le_cnt = cnt;
5173         else
5174                 hdev->acl_cnt = cnt;
5175
5176         if (cnt != tmp)
5177                 hci_prio_recalculate(hdev, LE_LINK);
5178 }
5179
5180 static void hci_tx_work(struct work_struct *work)
5181 {
5182         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5183         struct sk_buff *skb;
5184
5185         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5186                hdev->sco_cnt, hdev->le_cnt);
5187
5188         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5189                 /* Schedule queues and send stuff to HCI driver */
5190                 hci_sched_acl(hdev);
5191                 hci_sched_sco(hdev);
5192                 hci_sched_esco(hdev);
5193                 hci_sched_le(hdev);
5194         }
5195
5196         /* Send next queued raw (unknown type) packet */
5197         while ((skb = skb_dequeue(&hdev->raw_q)))
5198                 hci_send_frame(hdev, skb);
5199 }
5200
5201 /* ----- HCI RX task (incoming data processing) ----- */
5202
5203 /* ACL data packet */
5204 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5205 {
5206         struct hci_acl_hdr *hdr = (void *) skb->data;
5207         struct hci_conn *conn;
5208         __u16 handle, flags;
5209
5210         skb_pull(skb, HCI_ACL_HDR_SIZE);
5211
5212         handle = __le16_to_cpu(hdr->handle);
5213         flags  = hci_flags(handle);
5214         handle = hci_handle(handle);
5215
5216         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5217                handle, flags);
5218
5219         hdev->stat.acl_rx++;
5220
5221         hci_dev_lock(hdev);
5222         conn = hci_conn_hash_lookup_handle(hdev, handle);
5223         hci_dev_unlock(hdev);
5224
5225         if (conn) {
5226                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5227
5228                 /* Send to upper protocol */
5229                 l2cap_recv_acldata(conn, skb, flags);
5230                 return;
5231         } else {
5232                 BT_ERR("%s ACL packet for unknown connection handle %d",
5233                        hdev->name, handle);
5234         }
5235
5236         kfree_skb(skb);
5237 }
5238
5239 /* SCO data packet */
5240 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5241 {
5242         struct hci_sco_hdr *hdr = (void *) skb->data;
5243         struct hci_conn *conn;
5244         __u16 handle;
5245
5246         skb_pull(skb, HCI_SCO_HDR_SIZE);
5247
5248         handle = __le16_to_cpu(hdr->handle);
5249
5250         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5251
5252         hdev->stat.sco_rx++;
5253
5254         hci_dev_lock(hdev);
5255         conn = hci_conn_hash_lookup_handle(hdev, handle);
5256         hci_dev_unlock(hdev);
5257
5258         if (conn) {
5259                 /* Send to upper protocol */
5260                 sco_recv_scodata(conn, skb);
5261                 return;
5262         } else {
5263                 BT_ERR("%s SCO packet for unknown connection handle %d",
5264                        hdev->name, handle);
5265         }
5266
5267         kfree_skb(skb);
5268 }
5269
5270 static bool hci_req_is_complete(struct hci_dev *hdev)
5271 {
5272         struct sk_buff *skb;
5273
5274         skb = skb_peek(&hdev->cmd_q);
5275         if (!skb)
5276                 return true;
5277
5278         return bt_cb(skb)->req.start;
5279 }
5280
5281 static void hci_resend_last(struct hci_dev *hdev)
5282 {
5283         struct hci_command_hdr *sent;
5284         struct sk_buff *skb;
5285         u16 opcode;
5286
5287         if (!hdev->sent_cmd)
5288                 return;
5289
5290         sent = (void *) hdev->sent_cmd->data;
5291         opcode = __le16_to_cpu(sent->opcode);
5292         if (opcode == HCI_OP_RESET)
5293                 return;
5294
5295         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5296         if (!skb)
5297                 return;
5298
5299         skb_queue_head(&hdev->cmd_q, skb);
5300         queue_work(hdev->workqueue, &hdev->cmd_work);
5301 }
5302
5303 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5304 {
5305         hci_req_complete_t req_complete = NULL;
5306         struct sk_buff *skb;
5307         unsigned long flags;
5308
5309         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5310
5311         /* If the completed command doesn't match the last one that was
5312          * sent we need to do special handling of it.
5313          */
5314         if (!hci_sent_cmd_data(hdev, opcode)) {
5315                 /* Some CSR based controllers generate a spontaneous
5316                  * reset complete event during init and any pending
5317                  * command will never be completed. In such a case we
5318                  * need to resend whatever was the last sent
5319                  * command.
5320                  */
5321                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5322                         hci_resend_last(hdev);
5323
5324                 return;
5325         }
5326
5327         /* If the command succeeded and there's still more commands in
5328          * this request the request is not yet complete.
5329          */
5330         if (!status && !hci_req_is_complete(hdev))
5331                 return;
5332
5333         /* If this was the last command in a request the complete
5334          * callback would be found in hdev->sent_cmd instead of the
5335          * command queue (hdev->cmd_q).
5336          */
5337         if (hdev->sent_cmd) {
5338                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5339
5340                 if (req_complete) {
5341                         /* We must set the complete callback to NULL to
5342                          * avoid calling the callback more than once if
5343                          * this function gets called again.
5344                          */
5345                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
5346
5347                         goto call_complete;
5348                 }
5349         }
5350
5351         /* Remove all pending commands belonging to this request */
5352         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5353         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5354                 if (bt_cb(skb)->req.start) {
5355                         __skb_queue_head(&hdev->cmd_q, skb);
5356                         break;
5357                 }
5358
5359                 req_complete = bt_cb(skb)->req.complete;
5360                 kfree_skb(skb);
5361         }
5362         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5363
5364 call_complete:
5365         if (req_complete)
5366                 req_complete(hdev, status);
5367 }
5368
5369 static void hci_rx_work(struct work_struct *work)
5370 {
5371         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5372         struct sk_buff *skb;
5373
5374         BT_DBG("%s", hdev->name);
5375
5376         while ((skb = skb_dequeue(&hdev->rx_q))) {
5377                 /* Send copy to monitor */
5378                 hci_send_to_monitor(hdev, skb);
5379
5380                 if (atomic_read(&hdev->promisc)) {
5381                         /* Send copy to the sockets */
5382                         hci_send_to_sock(hdev, skb);
5383                 }
5384
5385                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5386                         kfree_skb(skb);
5387                         continue;
5388                 }
5389
5390                 if (test_bit(HCI_INIT, &hdev->flags)) {
5391                         /* Don't process data packets in this states. */
5392                         switch (bt_cb(skb)->pkt_type) {
5393                         case HCI_ACLDATA_PKT:
5394                         case HCI_SCODATA_PKT:
5395                                 kfree_skb(skb);
5396                                 continue;
5397                         }
5398                 }
5399
5400                 /* Process frame */
5401                 switch (bt_cb(skb)->pkt_type) {
5402                 case HCI_EVENT_PKT:
5403                         BT_DBG("%s Event packet", hdev->name);
5404                         hci_event_packet(hdev, skb);
5405                         break;
5406
5407                 case HCI_ACLDATA_PKT:
5408                         BT_DBG("%s ACL data packet", hdev->name);
5409                         hci_acldata_packet(hdev, skb);
5410                         break;
5411
5412                 case HCI_SCODATA_PKT:
5413                         BT_DBG("%s SCO data packet", hdev->name);
5414                         hci_scodata_packet(hdev, skb);
5415                         break;
5416
5417                 default:
5418                         kfree_skb(skb);
5419                         break;
5420                 }
5421         }
5422 }
5423
5424 static void hci_cmd_work(struct work_struct *work)
5425 {
5426         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5427         struct sk_buff *skb;
5428
5429         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5430                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5431
5432         /* Send queued commands */
5433         if (atomic_read(&hdev->cmd_cnt)) {
5434                 skb = skb_dequeue(&hdev->cmd_q);
5435                 if (!skb)
5436                         return;
5437
5438                 kfree_skb(hdev->sent_cmd);
5439
5440                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5441                 if (hdev->sent_cmd) {
5442                         atomic_dec(&hdev->cmd_cnt);
5443                         hci_send_frame(hdev, skb);
5444                         if (test_bit(HCI_RESET, &hdev->flags))
5445                                 cancel_delayed_work(&hdev->cmd_timer);
5446                         else
5447                                 schedule_delayed_work(&hdev->cmd_timer,
5448                                                       HCI_CMD_TIMEOUT);
5449                 } else {
5450                         skb_queue_head(&hdev->cmd_q, skb);
5451                         queue_work(hdev->workqueue, &hdev->cmd_work);
5452                 }
5453         }
5454 }
5455
5456 void hci_req_add_le_scan_disable(struct hci_request *req)
5457 {
5458         struct hci_cp_le_set_scan_enable cp;
5459
5460         memset(&cp, 0, sizeof(cp));
5461         cp.enable = LE_SCAN_DISABLE;
5462         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5463 }
5464
5465 static void add_to_white_list(struct hci_request *req,
5466                               struct hci_conn_params *params)
5467 {
5468         struct hci_cp_le_add_to_white_list cp;
5469
5470         cp.bdaddr_type = params->addr_type;
5471         bacpy(&cp.bdaddr, &params->addr);
5472
5473         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5474 }
5475
5476 static u8 update_white_list(struct hci_request *req)
5477 {
5478         struct hci_dev *hdev = req->hdev;
5479         struct hci_conn_params *params;
5480         struct bdaddr_list *b;
5481         uint8_t white_list_entries = 0;
5482
5483         /* Go through the current white list programmed into the
5484          * controller one by one and check if that address is still
5485          * in the list of pending connections or list of devices to
5486          * report. If not present in either list, then queue the
5487          * command to remove it from the controller.
5488          */
5489         list_for_each_entry(b, &hdev->le_white_list, list) {
5490                 struct hci_cp_le_del_from_white_list cp;
5491
5492                 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5493                                               &b->bdaddr, b->bdaddr_type) ||
5494                     hci_pend_le_action_lookup(&hdev->pend_le_reports,
5495                                               &b->bdaddr, b->bdaddr_type)) {
5496                         white_list_entries++;
5497                         continue;
5498                 }
5499
5500                 cp.bdaddr_type = b->bdaddr_type;
5501                 bacpy(&cp.bdaddr, &b->bdaddr);
5502
5503                 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5504                             sizeof(cp), &cp);
5505         }
5506
5507         /* Since all no longer valid white list entries have been
5508          * removed, walk through the list of pending connections
5509          * and ensure that any new device gets programmed into
5510          * the controller.
5511          *
5512          * If the list of the devices is larger than the list of
5513          * available white list entries in the controller, then
5514          * just abort and return filer policy value to not use the
5515          * white list.
5516          */
5517         list_for_each_entry(params, &hdev->pend_le_conns, action) {
5518                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5519                                            &params->addr, params->addr_type))
5520                         continue;
5521
5522                 if (white_list_entries >= hdev->le_white_list_size) {
5523                         /* Select filter policy to accept all advertising */
5524                         return 0x00;
5525                 }
5526
5527                 if (hci_find_irk_by_addr(hdev, &params->addr,
5528                                          params->addr_type)) {
5529                         /* White list can not be used with RPAs */
5530                         return 0x00;
5531                 }
5532
5533                 white_list_entries++;
5534                 add_to_white_list(req, params);
5535         }
5536
5537         /* After adding all new pending connections, walk through
5538          * the list of pending reports and also add these to the
5539          * white list if there is still space.
5540          */
5541         list_for_each_entry(params, &hdev->pend_le_reports, action) {
5542                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5543                                            &params->addr, params->addr_type))
5544                         continue;
5545
5546                 if (white_list_entries >= hdev->le_white_list_size) {
5547                         /* Select filter policy to accept all advertising */
5548                         return 0x00;
5549                 }
5550
5551                 if (hci_find_irk_by_addr(hdev, &params->addr,
5552                                          params->addr_type)) {
5553                         /* White list can not be used with RPAs */
5554                         return 0x00;
5555                 }
5556
5557                 white_list_entries++;
5558                 add_to_white_list(req, params);
5559         }
5560
5561         /* Select filter policy to use white list */
5562         return 0x01;
5563 }
5564
5565 void hci_req_add_le_passive_scan(struct hci_request *req)
5566 {
5567         struct hci_cp_le_set_scan_param param_cp;
5568         struct hci_cp_le_set_scan_enable enable_cp;
5569         struct hci_dev *hdev = req->hdev;
5570         u8 own_addr_type;
5571         u8 filter_policy;
5572
5573         /* Set require_privacy to false since no SCAN_REQ are send
5574          * during passive scanning. Not using an unresolvable address
5575          * here is important so that peer devices using direct
5576          * advertising with our address will be correctly reported
5577          * by the controller.
5578          */
5579         if (hci_update_random_address(req, false, &own_addr_type))
5580                 return;
5581
5582         /* Adding or removing entries from the white list must
5583          * happen before enabling scanning. The controller does
5584          * not allow white list modification while scanning.
5585          */
5586         filter_policy = update_white_list(req);
5587
5588         memset(&param_cp, 0, sizeof(param_cp));
5589         param_cp.type = LE_SCAN_PASSIVE;
5590         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5591         param_cp.window = cpu_to_le16(hdev->le_scan_window);
5592         param_cp.own_address_type = own_addr_type;
5593         param_cp.filter_policy = filter_policy;
5594         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5595                     &param_cp);
5596
5597         memset(&enable_cp, 0, sizeof(enable_cp));
5598         enable_cp.enable = LE_SCAN_ENABLE;
5599         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5600         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5601                     &enable_cp);
5602 }
5603
5604 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5605 {
5606         if (status)
5607                 BT_DBG("HCI request failed to update background scanning: "
5608                        "status 0x%2.2x", status);
5609 }
5610
5611 /* This function controls the background scanning based on hdev->pend_le_conns
5612  * list. If there are pending LE connection we start the background scanning,
5613  * otherwise we stop it.
5614  *
5615  * This function requires the caller holds hdev->lock.
5616  */
5617 void hci_update_background_scan(struct hci_dev *hdev)
5618 {
5619         struct hci_request req;
5620         struct hci_conn *conn;
5621         int err;
5622
5623         if (!test_bit(HCI_UP, &hdev->flags) ||
5624             test_bit(HCI_INIT, &hdev->flags) ||
5625             test_bit(HCI_SETUP, &hdev->dev_flags) ||
5626             test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5627             test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5628             test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5629                 return;
5630
5631         /* No point in doing scanning if LE support hasn't been enabled */
5632         if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5633                 return;
5634
5635         /* If discovery is active don't interfere with it */
5636         if (hdev->discovery.state != DISCOVERY_STOPPED)
5637                 return;
5638
5639         hci_req_init(&req, hdev);
5640
5641         if (list_empty(&hdev->pend_le_conns) &&
5642             list_empty(&hdev->pend_le_reports)) {
5643                 /* If there is no pending LE connections or devices
5644                  * to be scanned for, we should stop the background
5645                  * scanning.
5646                  */
5647
5648                 /* If controller is not scanning we are done. */
5649                 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5650                         return;
5651
5652                 hci_req_add_le_scan_disable(&req);
5653
5654                 BT_DBG("%s stopping background scanning", hdev->name);
5655         } else {
5656                 /* If there is at least one pending LE connection, we should
5657                  * keep the background scan running.
5658                  */
5659
5660                 /* If controller is connecting, we should not start scanning
5661                  * since some controllers are not able to scan and connect at
5662                  * the same time.
5663                  */
5664                 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5665                 if (conn)
5666                         return;
5667
5668                 /* If controller is currently scanning, we stop it to ensure we
5669                  * don't miss any advertising (due to duplicates filter).
5670                  */
5671                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5672                         hci_req_add_le_scan_disable(&req);
5673
5674                 hci_req_add_le_passive_scan(&req);
5675
5676                 BT_DBG("%s starting background scanning", hdev->name);
5677         }
5678
5679         err = hci_req_run(&req, update_background_scan_complete);
5680         if (err)
5681                 BT_ERR("Failed to run HCI request: err %d", err);
5682 }