Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[cascardo/linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "smp.h"
41
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
45
46 /* HCI device list */
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
49
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
53
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
56
57 /* ----- HCI requests ----- */
58
59 #define HCI_REQ_DONE      0
60 #define HCI_REQ_PEND      1
61 #define HCI_REQ_CANCELED  2
62
63 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
64 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
65
66 /* ---- HCI notifications ---- */
67
68 static void hci_notify(struct hci_dev *hdev, int event)
69 {
70         hci_sock_dev_event(hdev, event);
71 }
72
73 /* ---- HCI debugfs entries ---- */
74
75 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76                              size_t count, loff_t *ppos)
77 {
78         struct hci_dev *hdev = file->private_data;
79         char buf[3];
80
81         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
82         buf[1] = '\n';
83         buf[2] = '\0';
84         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85 }
86
87 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88                               size_t count, loff_t *ppos)
89 {
90         struct hci_dev *hdev = file->private_data;
91         struct sk_buff *skb;
92         char buf[32];
93         size_t buf_size = min(count, (sizeof(buf)-1));
94         bool enable;
95         int err;
96
97         if (!test_bit(HCI_UP, &hdev->flags))
98                 return -ENETDOWN;
99
100         if (copy_from_user(buf, user_buf, buf_size))
101                 return -EFAULT;
102
103         buf[buf_size] = '\0';
104         if (strtobool(buf, &enable))
105                 return -EINVAL;
106
107         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
108                 return -EALREADY;
109
110         hci_req_lock(hdev);
111         if (enable)
112                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113                                      HCI_CMD_TIMEOUT);
114         else
115                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116                                      HCI_CMD_TIMEOUT);
117         hci_req_unlock(hdev);
118
119         if (IS_ERR(skb))
120                 return PTR_ERR(skb);
121
122         err = -bt_to_errno(skb->data[0]);
123         kfree_skb(skb);
124
125         if (err < 0)
126                 return err;
127
128         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
129
130         return count;
131 }
132
133 static const struct file_operations dut_mode_fops = {
134         .open           = simple_open,
135         .read           = dut_mode_read,
136         .write          = dut_mode_write,
137         .llseek         = default_llseek,
138 };
139
140 static int features_show(struct seq_file *f, void *ptr)
141 {
142         struct hci_dev *hdev = f->private;
143         u8 p;
144
145         hci_dev_lock(hdev);
146         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
147                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
148                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149                            hdev->features[p][0], hdev->features[p][1],
150                            hdev->features[p][2], hdev->features[p][3],
151                            hdev->features[p][4], hdev->features[p][5],
152                            hdev->features[p][6], hdev->features[p][7]);
153         }
154         if (lmp_le_capable(hdev))
155                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157                            hdev->le_features[0], hdev->le_features[1],
158                            hdev->le_features[2], hdev->le_features[3],
159                            hdev->le_features[4], hdev->le_features[5],
160                            hdev->le_features[6], hdev->le_features[7]);
161         hci_dev_unlock(hdev);
162
163         return 0;
164 }
165
166 static int features_open(struct inode *inode, struct file *file)
167 {
168         return single_open(file, features_show, inode->i_private);
169 }
170
171 static const struct file_operations features_fops = {
172         .open           = features_open,
173         .read           = seq_read,
174         .llseek         = seq_lseek,
175         .release        = single_release,
176 };
177
178 static int blacklist_show(struct seq_file *f, void *p)
179 {
180         struct hci_dev *hdev = f->private;
181         struct bdaddr_list *b;
182
183         hci_dev_lock(hdev);
184         list_for_each_entry(b, &hdev->blacklist, list)
185                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
186         hci_dev_unlock(hdev);
187
188         return 0;
189 }
190
191 static int blacklist_open(struct inode *inode, struct file *file)
192 {
193         return single_open(file, blacklist_show, inode->i_private);
194 }
195
196 static const struct file_operations blacklist_fops = {
197         .open           = blacklist_open,
198         .read           = seq_read,
199         .llseek         = seq_lseek,
200         .release        = single_release,
201 };
202
203 static int whitelist_show(struct seq_file *f, void *p)
204 {
205         struct hci_dev *hdev = f->private;
206         struct bdaddr_list *b;
207
208         hci_dev_lock(hdev);
209         list_for_each_entry(b, &hdev->whitelist, list)
210                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
211         hci_dev_unlock(hdev);
212
213         return 0;
214 }
215
216 static int whitelist_open(struct inode *inode, struct file *file)
217 {
218         return single_open(file, whitelist_show, inode->i_private);
219 }
220
221 static const struct file_operations whitelist_fops = {
222         .open           = whitelist_open,
223         .read           = seq_read,
224         .llseek         = seq_lseek,
225         .release        = single_release,
226 };
227
228 static int uuids_show(struct seq_file *f, void *p)
229 {
230         struct hci_dev *hdev = f->private;
231         struct bt_uuid *uuid;
232
233         hci_dev_lock(hdev);
234         list_for_each_entry(uuid, &hdev->uuids, list) {
235                 u8 i, val[16];
236
237                 /* The Bluetooth UUID values are stored in big endian,
238                  * but with reversed byte order. So convert them into
239                  * the right order for the %pUb modifier.
240                  */
241                 for (i = 0; i < 16; i++)
242                         val[i] = uuid->uuid[15 - i];
243
244                 seq_printf(f, "%pUb\n", val);
245         }
246         hci_dev_unlock(hdev);
247
248         return 0;
249 }
250
251 static int uuids_open(struct inode *inode, struct file *file)
252 {
253         return single_open(file, uuids_show, inode->i_private);
254 }
255
256 static const struct file_operations uuids_fops = {
257         .open           = uuids_open,
258         .read           = seq_read,
259         .llseek         = seq_lseek,
260         .release        = single_release,
261 };
262
263 static int inquiry_cache_show(struct seq_file *f, void *p)
264 {
265         struct hci_dev *hdev = f->private;
266         struct discovery_state *cache = &hdev->discovery;
267         struct inquiry_entry *e;
268
269         hci_dev_lock(hdev);
270
271         list_for_each_entry(e, &cache->all, all) {
272                 struct inquiry_data *data = &e->data;
273                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
274                            &data->bdaddr,
275                            data->pscan_rep_mode, data->pscan_period_mode,
276                            data->pscan_mode, data->dev_class[2],
277                            data->dev_class[1], data->dev_class[0],
278                            __le16_to_cpu(data->clock_offset),
279                            data->rssi, data->ssp_mode, e->timestamp);
280         }
281
282         hci_dev_unlock(hdev);
283
284         return 0;
285 }
286
287 static int inquiry_cache_open(struct inode *inode, struct file *file)
288 {
289         return single_open(file, inquiry_cache_show, inode->i_private);
290 }
291
292 static const struct file_operations inquiry_cache_fops = {
293         .open           = inquiry_cache_open,
294         .read           = seq_read,
295         .llseek         = seq_lseek,
296         .release        = single_release,
297 };
298
299 static int link_keys_show(struct seq_file *f, void *ptr)
300 {
301         struct hci_dev *hdev = f->private;
302         struct list_head *p, *n;
303
304         hci_dev_lock(hdev);
305         list_for_each_safe(p, n, &hdev->link_keys) {
306                 struct link_key *key = list_entry(p, struct link_key, list);
307                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
308                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
309         }
310         hci_dev_unlock(hdev);
311
312         return 0;
313 }
314
315 static int link_keys_open(struct inode *inode, struct file *file)
316 {
317         return single_open(file, link_keys_show, inode->i_private);
318 }
319
320 static const struct file_operations link_keys_fops = {
321         .open           = link_keys_open,
322         .read           = seq_read,
323         .llseek         = seq_lseek,
324         .release        = single_release,
325 };
326
327 static int dev_class_show(struct seq_file *f, void *ptr)
328 {
329         struct hci_dev *hdev = f->private;
330
331         hci_dev_lock(hdev);
332         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
333                    hdev->dev_class[1], hdev->dev_class[0]);
334         hci_dev_unlock(hdev);
335
336         return 0;
337 }
338
339 static int dev_class_open(struct inode *inode, struct file *file)
340 {
341         return single_open(file, dev_class_show, inode->i_private);
342 }
343
344 static const struct file_operations dev_class_fops = {
345         .open           = dev_class_open,
346         .read           = seq_read,
347         .llseek         = seq_lseek,
348         .release        = single_release,
349 };
350
351 static int voice_setting_get(void *data, u64 *val)
352 {
353         struct hci_dev *hdev = data;
354
355         hci_dev_lock(hdev);
356         *val = hdev->voice_setting;
357         hci_dev_unlock(hdev);
358
359         return 0;
360 }
361
362 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
363                         NULL, "0x%4.4llx\n");
364
365 static int auto_accept_delay_set(void *data, u64 val)
366 {
367         struct hci_dev *hdev = data;
368
369         hci_dev_lock(hdev);
370         hdev->auto_accept_delay = val;
371         hci_dev_unlock(hdev);
372
373         return 0;
374 }
375
376 static int auto_accept_delay_get(void *data, u64 *val)
377 {
378         struct hci_dev *hdev = data;
379
380         hci_dev_lock(hdev);
381         *val = hdev->auto_accept_delay;
382         hci_dev_unlock(hdev);
383
384         return 0;
385 }
386
387 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
388                         auto_accept_delay_set, "%llu\n");
389
390 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
391                                      size_t count, loff_t *ppos)
392 {
393         struct hci_dev *hdev = file->private_data;
394         char buf[3];
395
396         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
397         buf[1] = '\n';
398         buf[2] = '\0';
399         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
400 }
401
402 static ssize_t force_sc_support_write(struct file *file,
403                                       const char __user *user_buf,
404                                       size_t count, loff_t *ppos)
405 {
406         struct hci_dev *hdev = file->private_data;
407         char buf[32];
408         size_t buf_size = min(count, (sizeof(buf)-1));
409         bool enable;
410
411         if (test_bit(HCI_UP, &hdev->flags))
412                 return -EBUSY;
413
414         if (copy_from_user(buf, user_buf, buf_size))
415                 return -EFAULT;
416
417         buf[buf_size] = '\0';
418         if (strtobool(buf, &enable))
419                 return -EINVAL;
420
421         if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
422                 return -EALREADY;
423
424         change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
425
426         return count;
427 }
428
429 static const struct file_operations force_sc_support_fops = {
430         .open           = simple_open,
431         .read           = force_sc_support_read,
432         .write          = force_sc_support_write,
433         .llseek         = default_llseek,
434 };
435
436 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
437                                  size_t count, loff_t *ppos)
438 {
439         struct hci_dev *hdev = file->private_data;
440         char buf[3];
441
442         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
443         buf[1] = '\n';
444         buf[2] = '\0';
445         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
446 }
447
448 static const struct file_operations sc_only_mode_fops = {
449         .open           = simple_open,
450         .read           = sc_only_mode_read,
451         .llseek         = default_llseek,
452 };
453
454 static int idle_timeout_set(void *data, u64 val)
455 {
456         struct hci_dev *hdev = data;
457
458         if (val != 0 && (val < 500 || val > 3600000))
459                 return -EINVAL;
460
461         hci_dev_lock(hdev);
462         hdev->idle_timeout = val;
463         hci_dev_unlock(hdev);
464
465         return 0;
466 }
467
468 static int idle_timeout_get(void *data, u64 *val)
469 {
470         struct hci_dev *hdev = data;
471
472         hci_dev_lock(hdev);
473         *val = hdev->idle_timeout;
474         hci_dev_unlock(hdev);
475
476         return 0;
477 }
478
479 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
480                         idle_timeout_set, "%llu\n");
481
482 static int rpa_timeout_set(void *data, u64 val)
483 {
484         struct hci_dev *hdev = data;
485
486         /* Require the RPA timeout to be at least 30 seconds and at most
487          * 24 hours.
488          */
489         if (val < 30 || val > (60 * 60 * 24))
490                 return -EINVAL;
491
492         hci_dev_lock(hdev);
493         hdev->rpa_timeout = val;
494         hci_dev_unlock(hdev);
495
496         return 0;
497 }
498
499 static int rpa_timeout_get(void *data, u64 *val)
500 {
501         struct hci_dev *hdev = data;
502
503         hci_dev_lock(hdev);
504         *val = hdev->rpa_timeout;
505         hci_dev_unlock(hdev);
506
507         return 0;
508 }
509
510 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
511                         rpa_timeout_set, "%llu\n");
512
513 static int sniff_min_interval_set(void *data, u64 val)
514 {
515         struct hci_dev *hdev = data;
516
517         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
518                 return -EINVAL;
519
520         hci_dev_lock(hdev);
521         hdev->sniff_min_interval = val;
522         hci_dev_unlock(hdev);
523
524         return 0;
525 }
526
527 static int sniff_min_interval_get(void *data, u64 *val)
528 {
529         struct hci_dev *hdev = data;
530
531         hci_dev_lock(hdev);
532         *val = hdev->sniff_min_interval;
533         hci_dev_unlock(hdev);
534
535         return 0;
536 }
537
538 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
539                         sniff_min_interval_set, "%llu\n");
540
541 static int sniff_max_interval_set(void *data, u64 val)
542 {
543         struct hci_dev *hdev = data;
544
545         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
546                 return -EINVAL;
547
548         hci_dev_lock(hdev);
549         hdev->sniff_max_interval = val;
550         hci_dev_unlock(hdev);
551
552         return 0;
553 }
554
555 static int sniff_max_interval_get(void *data, u64 *val)
556 {
557         struct hci_dev *hdev = data;
558
559         hci_dev_lock(hdev);
560         *val = hdev->sniff_max_interval;
561         hci_dev_unlock(hdev);
562
563         return 0;
564 }
565
566 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
567                         sniff_max_interval_set, "%llu\n");
568
569 static int conn_info_min_age_set(void *data, u64 val)
570 {
571         struct hci_dev *hdev = data;
572
573         if (val == 0 || val > hdev->conn_info_max_age)
574                 return -EINVAL;
575
576         hci_dev_lock(hdev);
577         hdev->conn_info_min_age = val;
578         hci_dev_unlock(hdev);
579
580         return 0;
581 }
582
583 static int conn_info_min_age_get(void *data, u64 *val)
584 {
585         struct hci_dev *hdev = data;
586
587         hci_dev_lock(hdev);
588         *val = hdev->conn_info_min_age;
589         hci_dev_unlock(hdev);
590
591         return 0;
592 }
593
594 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
595                         conn_info_min_age_set, "%llu\n");
596
597 static int conn_info_max_age_set(void *data, u64 val)
598 {
599         struct hci_dev *hdev = data;
600
601         if (val == 0 || val < hdev->conn_info_min_age)
602                 return -EINVAL;
603
604         hci_dev_lock(hdev);
605         hdev->conn_info_max_age = val;
606         hci_dev_unlock(hdev);
607
608         return 0;
609 }
610
611 static int conn_info_max_age_get(void *data, u64 *val)
612 {
613         struct hci_dev *hdev = data;
614
615         hci_dev_lock(hdev);
616         *val = hdev->conn_info_max_age;
617         hci_dev_unlock(hdev);
618
619         return 0;
620 }
621
622 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
623                         conn_info_max_age_set, "%llu\n");
624
625 static int identity_show(struct seq_file *f, void *p)
626 {
627         struct hci_dev *hdev = f->private;
628         bdaddr_t addr;
629         u8 addr_type;
630
631         hci_dev_lock(hdev);
632
633         hci_copy_identity_address(hdev, &addr, &addr_type);
634
635         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
636                    16, hdev->irk, &hdev->rpa);
637
638         hci_dev_unlock(hdev);
639
640         return 0;
641 }
642
643 static int identity_open(struct inode *inode, struct file *file)
644 {
645         return single_open(file, identity_show, inode->i_private);
646 }
647
648 static const struct file_operations identity_fops = {
649         .open           = identity_open,
650         .read           = seq_read,
651         .llseek         = seq_lseek,
652         .release        = single_release,
653 };
654
655 static int random_address_show(struct seq_file *f, void *p)
656 {
657         struct hci_dev *hdev = f->private;
658
659         hci_dev_lock(hdev);
660         seq_printf(f, "%pMR\n", &hdev->random_addr);
661         hci_dev_unlock(hdev);
662
663         return 0;
664 }
665
666 static int random_address_open(struct inode *inode, struct file *file)
667 {
668         return single_open(file, random_address_show, inode->i_private);
669 }
670
671 static const struct file_operations random_address_fops = {
672         .open           = random_address_open,
673         .read           = seq_read,
674         .llseek         = seq_lseek,
675         .release        = single_release,
676 };
677
678 static int static_address_show(struct seq_file *f, void *p)
679 {
680         struct hci_dev *hdev = f->private;
681
682         hci_dev_lock(hdev);
683         seq_printf(f, "%pMR\n", &hdev->static_addr);
684         hci_dev_unlock(hdev);
685
686         return 0;
687 }
688
689 static int static_address_open(struct inode *inode, struct file *file)
690 {
691         return single_open(file, static_address_show, inode->i_private);
692 }
693
694 static const struct file_operations static_address_fops = {
695         .open           = static_address_open,
696         .read           = seq_read,
697         .llseek         = seq_lseek,
698         .release        = single_release,
699 };
700
701 static ssize_t force_static_address_read(struct file *file,
702                                          char __user *user_buf,
703                                          size_t count, loff_t *ppos)
704 {
705         struct hci_dev *hdev = file->private_data;
706         char buf[3];
707
708         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
709         buf[1] = '\n';
710         buf[2] = '\0';
711         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
712 }
713
714 static ssize_t force_static_address_write(struct file *file,
715                                           const char __user *user_buf,
716                                           size_t count, loff_t *ppos)
717 {
718         struct hci_dev *hdev = file->private_data;
719         char buf[32];
720         size_t buf_size = min(count, (sizeof(buf)-1));
721         bool enable;
722
723         if (test_bit(HCI_UP, &hdev->flags))
724                 return -EBUSY;
725
726         if (copy_from_user(buf, user_buf, buf_size))
727                 return -EFAULT;
728
729         buf[buf_size] = '\0';
730         if (strtobool(buf, &enable))
731                 return -EINVAL;
732
733         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
734                 return -EALREADY;
735
736         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
737
738         return count;
739 }
740
741 static const struct file_operations force_static_address_fops = {
742         .open           = simple_open,
743         .read           = force_static_address_read,
744         .write          = force_static_address_write,
745         .llseek         = default_llseek,
746 };
747
748 static int white_list_show(struct seq_file *f, void *ptr)
749 {
750         struct hci_dev *hdev = f->private;
751         struct bdaddr_list *b;
752
753         hci_dev_lock(hdev);
754         list_for_each_entry(b, &hdev->le_white_list, list)
755                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
756         hci_dev_unlock(hdev);
757
758         return 0;
759 }
760
761 static int white_list_open(struct inode *inode, struct file *file)
762 {
763         return single_open(file, white_list_show, inode->i_private);
764 }
765
766 static const struct file_operations white_list_fops = {
767         .open           = white_list_open,
768         .read           = seq_read,
769         .llseek         = seq_lseek,
770         .release        = single_release,
771 };
772
773 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
774 {
775         struct hci_dev *hdev = f->private;
776         struct list_head *p, *n;
777
778         hci_dev_lock(hdev);
779         list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
780                 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
781                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
782                            &irk->bdaddr, irk->addr_type,
783                            16, irk->val, &irk->rpa);
784         }
785         hci_dev_unlock(hdev);
786
787         return 0;
788 }
789
790 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
791 {
792         return single_open(file, identity_resolving_keys_show,
793                            inode->i_private);
794 }
795
796 static const struct file_operations identity_resolving_keys_fops = {
797         .open           = identity_resolving_keys_open,
798         .read           = seq_read,
799         .llseek         = seq_lseek,
800         .release        = single_release,
801 };
802
803 static int long_term_keys_show(struct seq_file *f, void *ptr)
804 {
805         struct hci_dev *hdev = f->private;
806         struct list_head *p, *n;
807
808         hci_dev_lock(hdev);
809         list_for_each_safe(p, n, &hdev->long_term_keys) {
810                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
811                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
812                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
813                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
814                            __le64_to_cpu(ltk->rand), 16, ltk->val);
815         }
816         hci_dev_unlock(hdev);
817
818         return 0;
819 }
820
821 static int long_term_keys_open(struct inode *inode, struct file *file)
822 {
823         return single_open(file, long_term_keys_show, inode->i_private);
824 }
825
826 static const struct file_operations long_term_keys_fops = {
827         .open           = long_term_keys_open,
828         .read           = seq_read,
829         .llseek         = seq_lseek,
830         .release        = single_release,
831 };
832
833 static int conn_min_interval_set(void *data, u64 val)
834 {
835         struct hci_dev *hdev = data;
836
837         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
838                 return -EINVAL;
839
840         hci_dev_lock(hdev);
841         hdev->le_conn_min_interval = val;
842         hci_dev_unlock(hdev);
843
844         return 0;
845 }
846
847 static int conn_min_interval_get(void *data, u64 *val)
848 {
849         struct hci_dev *hdev = data;
850
851         hci_dev_lock(hdev);
852         *val = hdev->le_conn_min_interval;
853         hci_dev_unlock(hdev);
854
855         return 0;
856 }
857
858 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
859                         conn_min_interval_set, "%llu\n");
860
861 static int conn_max_interval_set(void *data, u64 val)
862 {
863         struct hci_dev *hdev = data;
864
865         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
866                 return -EINVAL;
867
868         hci_dev_lock(hdev);
869         hdev->le_conn_max_interval = val;
870         hci_dev_unlock(hdev);
871
872         return 0;
873 }
874
875 static int conn_max_interval_get(void *data, u64 *val)
876 {
877         struct hci_dev *hdev = data;
878
879         hci_dev_lock(hdev);
880         *val = hdev->le_conn_max_interval;
881         hci_dev_unlock(hdev);
882
883         return 0;
884 }
885
886 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
887                         conn_max_interval_set, "%llu\n");
888
889 static int conn_latency_set(void *data, u64 val)
890 {
891         struct hci_dev *hdev = data;
892
893         if (val > 0x01f3)
894                 return -EINVAL;
895
896         hci_dev_lock(hdev);
897         hdev->le_conn_latency = val;
898         hci_dev_unlock(hdev);
899
900         return 0;
901 }
902
903 static int conn_latency_get(void *data, u64 *val)
904 {
905         struct hci_dev *hdev = data;
906
907         hci_dev_lock(hdev);
908         *val = hdev->le_conn_latency;
909         hci_dev_unlock(hdev);
910
911         return 0;
912 }
913
914 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
915                         conn_latency_set, "%llu\n");
916
917 static int supervision_timeout_set(void *data, u64 val)
918 {
919         struct hci_dev *hdev = data;
920
921         if (val < 0x000a || val > 0x0c80)
922                 return -EINVAL;
923
924         hci_dev_lock(hdev);
925         hdev->le_supv_timeout = val;
926         hci_dev_unlock(hdev);
927
928         return 0;
929 }
930
931 static int supervision_timeout_get(void *data, u64 *val)
932 {
933         struct hci_dev *hdev = data;
934
935         hci_dev_lock(hdev);
936         *val = hdev->le_supv_timeout;
937         hci_dev_unlock(hdev);
938
939         return 0;
940 }
941
942 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
943                         supervision_timeout_set, "%llu\n");
944
945 static int adv_channel_map_set(void *data, u64 val)
946 {
947         struct hci_dev *hdev = data;
948
949         if (val < 0x01 || val > 0x07)
950                 return -EINVAL;
951
952         hci_dev_lock(hdev);
953         hdev->le_adv_channel_map = val;
954         hci_dev_unlock(hdev);
955
956         return 0;
957 }
958
959 static int adv_channel_map_get(void *data, u64 *val)
960 {
961         struct hci_dev *hdev = data;
962
963         hci_dev_lock(hdev);
964         *val = hdev->le_adv_channel_map;
965         hci_dev_unlock(hdev);
966
967         return 0;
968 }
969
970 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
971                         adv_channel_map_set, "%llu\n");
972
973 static int adv_min_interval_set(void *data, u64 val)
974 {
975         struct hci_dev *hdev = data;
976
977         if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
978                 return -EINVAL;
979
980         hci_dev_lock(hdev);
981         hdev->le_adv_min_interval = val;
982         hci_dev_unlock(hdev);
983
984         return 0;
985 }
986
987 static int adv_min_interval_get(void *data, u64 *val)
988 {
989         struct hci_dev *hdev = data;
990
991         hci_dev_lock(hdev);
992         *val = hdev->le_adv_min_interval;
993         hci_dev_unlock(hdev);
994
995         return 0;
996 }
997
998 DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
999                         adv_min_interval_set, "%llu\n");
1000
1001 static int adv_max_interval_set(void *data, u64 val)
1002 {
1003         struct hci_dev *hdev = data;
1004
1005         if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
1006                 return -EINVAL;
1007
1008         hci_dev_lock(hdev);
1009         hdev->le_adv_max_interval = val;
1010         hci_dev_unlock(hdev);
1011
1012         return 0;
1013 }
1014
1015 static int adv_max_interval_get(void *data, u64 *val)
1016 {
1017         struct hci_dev *hdev = data;
1018
1019         hci_dev_lock(hdev);
1020         *val = hdev->le_adv_max_interval;
1021         hci_dev_unlock(hdev);
1022
1023         return 0;
1024 }
1025
1026 DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1027                         adv_max_interval_set, "%llu\n");
1028
1029 static int device_list_show(struct seq_file *f, void *ptr)
1030 {
1031         struct hci_dev *hdev = f->private;
1032         struct hci_conn_params *p;
1033
1034         hci_dev_lock(hdev);
1035         list_for_each_entry(p, &hdev->le_conn_params, list) {
1036                 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
1037                            p->auto_connect);
1038         }
1039         hci_dev_unlock(hdev);
1040
1041         return 0;
1042 }
1043
1044 static int device_list_open(struct inode *inode, struct file *file)
1045 {
1046         return single_open(file, device_list_show, inode->i_private);
1047 }
1048
1049 static const struct file_operations device_list_fops = {
1050         .open           = device_list_open,
1051         .read           = seq_read,
1052         .llseek         = seq_lseek,
1053         .release        = single_release,
1054 };
1055
1056 /* ---- HCI requests ---- */
1057
1058 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1059 {
1060         BT_DBG("%s result 0x%2.2x", hdev->name, result);
1061
1062         if (hdev->req_status == HCI_REQ_PEND) {
1063                 hdev->req_result = result;
1064                 hdev->req_status = HCI_REQ_DONE;
1065                 wake_up_interruptible(&hdev->req_wait_q);
1066         }
1067 }
1068
1069 static void hci_req_cancel(struct hci_dev *hdev, int err)
1070 {
1071         BT_DBG("%s err 0x%2.2x", hdev->name, err);
1072
1073         if (hdev->req_status == HCI_REQ_PEND) {
1074                 hdev->req_result = err;
1075                 hdev->req_status = HCI_REQ_CANCELED;
1076                 wake_up_interruptible(&hdev->req_wait_q);
1077         }
1078 }
1079
1080 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1081                                             u8 event)
1082 {
1083         struct hci_ev_cmd_complete *ev;
1084         struct hci_event_hdr *hdr;
1085         struct sk_buff *skb;
1086
1087         hci_dev_lock(hdev);
1088
1089         skb = hdev->recv_evt;
1090         hdev->recv_evt = NULL;
1091
1092         hci_dev_unlock(hdev);
1093
1094         if (!skb)
1095                 return ERR_PTR(-ENODATA);
1096
1097         if (skb->len < sizeof(*hdr)) {
1098                 BT_ERR("Too short HCI event");
1099                 goto failed;
1100         }
1101
1102         hdr = (void *) skb->data;
1103         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1104
1105         if (event) {
1106                 if (hdr->evt != event)
1107                         goto failed;
1108                 return skb;
1109         }
1110
1111         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1112                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1113                 goto failed;
1114         }
1115
1116         if (skb->len < sizeof(*ev)) {
1117                 BT_ERR("Too short cmd_complete event");
1118                 goto failed;
1119         }
1120
1121         ev = (void *) skb->data;
1122         skb_pull(skb, sizeof(*ev));
1123
1124         if (opcode == __le16_to_cpu(ev->opcode))
1125                 return skb;
1126
1127         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1128                __le16_to_cpu(ev->opcode));
1129
1130 failed:
1131         kfree_skb(skb);
1132         return ERR_PTR(-ENODATA);
1133 }
1134
1135 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1136                                   const void *param, u8 event, u32 timeout)
1137 {
1138         DECLARE_WAITQUEUE(wait, current);
1139         struct hci_request req;
1140         int err = 0;
1141
1142         BT_DBG("%s", hdev->name);
1143
1144         hci_req_init(&req, hdev);
1145
1146         hci_req_add_ev(&req, opcode, plen, param, event);
1147
1148         hdev->req_status = HCI_REQ_PEND;
1149
1150         err = hci_req_run(&req, hci_req_sync_complete);
1151         if (err < 0)
1152                 return ERR_PTR(err);
1153
1154         add_wait_queue(&hdev->req_wait_q, &wait);
1155         set_current_state(TASK_INTERRUPTIBLE);
1156
1157         schedule_timeout(timeout);
1158
1159         remove_wait_queue(&hdev->req_wait_q, &wait);
1160
1161         if (signal_pending(current))
1162                 return ERR_PTR(-EINTR);
1163
1164         switch (hdev->req_status) {
1165         case HCI_REQ_DONE:
1166                 err = -bt_to_errno(hdev->req_result);
1167                 break;
1168
1169         case HCI_REQ_CANCELED:
1170                 err = -hdev->req_result;
1171                 break;
1172
1173         default:
1174                 err = -ETIMEDOUT;
1175                 break;
1176         }
1177
1178         hdev->req_status = hdev->req_result = 0;
1179
1180         BT_DBG("%s end: err %d", hdev->name, err);
1181
1182         if (err < 0)
1183                 return ERR_PTR(err);
1184
1185         return hci_get_cmd_complete(hdev, opcode, event);
1186 }
1187 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1188
1189 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1190                                const void *param, u32 timeout)
1191 {
1192         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1193 }
1194 EXPORT_SYMBOL(__hci_cmd_sync);
1195
1196 /* Execute request and wait for completion. */
1197 static int __hci_req_sync(struct hci_dev *hdev,
1198                           void (*func)(struct hci_request *req,
1199                                       unsigned long opt),
1200                           unsigned long opt, __u32 timeout)
1201 {
1202         struct hci_request req;
1203         DECLARE_WAITQUEUE(wait, current);
1204         int err = 0;
1205
1206         BT_DBG("%s start", hdev->name);
1207
1208         hci_req_init(&req, hdev);
1209
1210         hdev->req_status = HCI_REQ_PEND;
1211
1212         func(&req, opt);
1213
1214         err = hci_req_run(&req, hci_req_sync_complete);
1215         if (err < 0) {
1216                 hdev->req_status = 0;
1217
1218                 /* ENODATA means the HCI request command queue is empty.
1219                  * This can happen when a request with conditionals doesn't
1220                  * trigger any commands to be sent. This is normal behavior
1221                  * and should not trigger an error return.
1222                  */
1223                 if (err == -ENODATA)
1224                         return 0;
1225
1226                 return err;
1227         }
1228
1229         add_wait_queue(&hdev->req_wait_q, &wait);
1230         set_current_state(TASK_INTERRUPTIBLE);
1231
1232         schedule_timeout(timeout);
1233
1234         remove_wait_queue(&hdev->req_wait_q, &wait);
1235
1236         if (signal_pending(current))
1237                 return -EINTR;
1238
1239         switch (hdev->req_status) {
1240         case HCI_REQ_DONE:
1241                 err = -bt_to_errno(hdev->req_result);
1242                 break;
1243
1244         case HCI_REQ_CANCELED:
1245                 err = -hdev->req_result;
1246                 break;
1247
1248         default:
1249                 err = -ETIMEDOUT;
1250                 break;
1251         }
1252
1253         hdev->req_status = hdev->req_result = 0;
1254
1255         BT_DBG("%s end: err %d", hdev->name, err);
1256
1257         return err;
1258 }
1259
1260 static int hci_req_sync(struct hci_dev *hdev,
1261                         void (*req)(struct hci_request *req,
1262                                     unsigned long opt),
1263                         unsigned long opt, __u32 timeout)
1264 {
1265         int ret;
1266
1267         if (!test_bit(HCI_UP, &hdev->flags))
1268                 return -ENETDOWN;
1269
1270         /* Serialize all requests */
1271         hci_req_lock(hdev);
1272         ret = __hci_req_sync(hdev, req, opt, timeout);
1273         hci_req_unlock(hdev);
1274
1275         return ret;
1276 }
1277
1278 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1279 {
1280         BT_DBG("%s %ld", req->hdev->name, opt);
1281
1282         /* Reset device */
1283         set_bit(HCI_RESET, &req->hdev->flags);
1284         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1285 }
1286
1287 static void bredr_init(struct hci_request *req)
1288 {
1289         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1290
1291         /* Read Local Supported Features */
1292         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1293
1294         /* Read Local Version */
1295         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1296
1297         /* Read BD Address */
1298         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1299 }
1300
1301 static void amp_init(struct hci_request *req)
1302 {
1303         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1304
1305         /* Read Local Version */
1306         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1307
1308         /* Read Local Supported Commands */
1309         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1310
1311         /* Read Local Supported Features */
1312         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1313
1314         /* Read Local AMP Info */
1315         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1316
1317         /* Read Data Blk size */
1318         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1319
1320         /* Read Flow Control Mode */
1321         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1322
1323         /* Read Location Data */
1324         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1325 }
1326
1327 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1328 {
1329         struct hci_dev *hdev = req->hdev;
1330
1331         BT_DBG("%s %ld", hdev->name, opt);
1332
1333         /* Reset */
1334         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1335                 hci_reset_req(req, 0);
1336
1337         switch (hdev->dev_type) {
1338         case HCI_BREDR:
1339                 bredr_init(req);
1340                 break;
1341
1342         case HCI_AMP:
1343                 amp_init(req);
1344                 break;
1345
1346         default:
1347                 BT_ERR("Unknown device type %d", hdev->dev_type);
1348                 break;
1349         }
1350 }
1351
1352 static void bredr_setup(struct hci_request *req)
1353 {
1354         struct hci_dev *hdev = req->hdev;
1355
1356         __le16 param;
1357         __u8 flt_type;
1358
1359         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1360         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1361
1362         /* Read Class of Device */
1363         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1364
1365         /* Read Local Name */
1366         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1367
1368         /* Read Voice Setting */
1369         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1370
1371         /* Read Number of Supported IAC */
1372         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1373
1374         /* Read Current IAC LAP */
1375         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1376
1377         /* Clear Event Filters */
1378         flt_type = HCI_FLT_CLEAR_ALL;
1379         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1380
1381         /* Connection accept timeout ~20 secs */
1382         param = cpu_to_le16(0x7d00);
1383         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1384
1385         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1386          * but it does not support page scan related HCI commands.
1387          */
1388         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1389                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1390                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1391         }
1392 }
1393
1394 static void le_setup(struct hci_request *req)
1395 {
1396         struct hci_dev *hdev = req->hdev;
1397
1398         /* Read LE Buffer Size */
1399         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1400
1401         /* Read LE Local Supported Features */
1402         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1403
1404         /* Read LE Supported States */
1405         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1406
1407         /* Read LE White List Size */
1408         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1409
1410         /* Clear LE White List */
1411         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1412
1413         /* LE-only controllers have LE implicitly enabled */
1414         if (!lmp_bredr_capable(hdev))
1415                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1416 }
1417
1418 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1419 {
1420         if (lmp_ext_inq_capable(hdev))
1421                 return 0x02;
1422
1423         if (lmp_inq_rssi_capable(hdev))
1424                 return 0x01;
1425
1426         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1427             hdev->lmp_subver == 0x0757)
1428                 return 0x01;
1429
1430         if (hdev->manufacturer == 15) {
1431                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1432                         return 0x01;
1433                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1434                         return 0x01;
1435                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1436                         return 0x01;
1437         }
1438
1439         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1440             hdev->lmp_subver == 0x1805)
1441                 return 0x01;
1442
1443         return 0x00;
1444 }
1445
1446 static void hci_setup_inquiry_mode(struct hci_request *req)
1447 {
1448         u8 mode;
1449
1450         mode = hci_get_inquiry_mode(req->hdev);
1451
1452         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1453 }
1454
1455 static void hci_setup_event_mask(struct hci_request *req)
1456 {
1457         struct hci_dev *hdev = req->hdev;
1458
1459         /* The second byte is 0xff instead of 0x9f (two reserved bits
1460          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1461          * command otherwise.
1462          */
1463         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1464
1465         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1466          * any event mask for pre 1.2 devices.
1467          */
1468         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1469                 return;
1470
1471         if (lmp_bredr_capable(hdev)) {
1472                 events[4] |= 0x01; /* Flow Specification Complete */
1473                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1474                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1475                 events[5] |= 0x08; /* Synchronous Connection Complete */
1476                 events[5] |= 0x10; /* Synchronous Connection Changed */
1477         } else {
1478                 /* Use a different default for LE-only devices */
1479                 memset(events, 0, sizeof(events));
1480                 events[0] |= 0x10; /* Disconnection Complete */
1481                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1482                 events[1] |= 0x20; /* Command Complete */
1483                 events[1] |= 0x40; /* Command Status */
1484                 events[1] |= 0x80; /* Hardware Error */
1485                 events[2] |= 0x04; /* Number of Completed Packets */
1486                 events[3] |= 0x02; /* Data Buffer Overflow */
1487
1488                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1489                         events[0] |= 0x80; /* Encryption Change */
1490                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
1491                 }
1492         }
1493
1494         if (lmp_inq_rssi_capable(hdev))
1495                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1496
1497         if (lmp_sniffsubr_capable(hdev))
1498                 events[5] |= 0x20; /* Sniff Subrating */
1499
1500         if (lmp_pause_enc_capable(hdev))
1501                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1502
1503         if (lmp_ext_inq_capable(hdev))
1504                 events[5] |= 0x40; /* Extended Inquiry Result */
1505
1506         if (lmp_no_flush_capable(hdev))
1507                 events[7] |= 0x01; /* Enhanced Flush Complete */
1508
1509         if (lmp_lsto_capable(hdev))
1510                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1511
1512         if (lmp_ssp_capable(hdev)) {
1513                 events[6] |= 0x01;      /* IO Capability Request */
1514                 events[6] |= 0x02;      /* IO Capability Response */
1515                 events[6] |= 0x04;      /* User Confirmation Request */
1516                 events[6] |= 0x08;      /* User Passkey Request */
1517                 events[6] |= 0x10;      /* Remote OOB Data Request */
1518                 events[6] |= 0x20;      /* Simple Pairing Complete */
1519                 events[7] |= 0x04;      /* User Passkey Notification */
1520                 events[7] |= 0x08;      /* Keypress Notification */
1521                 events[7] |= 0x10;      /* Remote Host Supported
1522                                          * Features Notification
1523                                          */
1524         }
1525
1526         if (lmp_le_capable(hdev))
1527                 events[7] |= 0x20;      /* LE Meta-Event */
1528
1529         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1530 }
1531
1532 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1533 {
1534         struct hci_dev *hdev = req->hdev;
1535
1536         if (lmp_bredr_capable(hdev))
1537                 bredr_setup(req);
1538         else
1539                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1540
1541         if (lmp_le_capable(hdev))
1542                 le_setup(req);
1543
1544         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1545          * local supported commands HCI command.
1546          */
1547         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1548                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1549
1550         if (lmp_ssp_capable(hdev)) {
1551                 /* When SSP is available, then the host features page
1552                  * should also be available as well. However some
1553                  * controllers list the max_page as 0 as long as SSP
1554                  * has not been enabled. To achieve proper debugging
1555                  * output, force the minimum max_page to 1 at least.
1556                  */
1557                 hdev->max_page = 0x01;
1558
1559                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1560                         u8 mode = 0x01;
1561                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1562                                     sizeof(mode), &mode);
1563                 } else {
1564                         struct hci_cp_write_eir cp;
1565
1566                         memset(hdev->eir, 0, sizeof(hdev->eir));
1567                         memset(&cp, 0, sizeof(cp));
1568
1569                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1570                 }
1571         }
1572
1573         if (lmp_inq_rssi_capable(hdev))
1574                 hci_setup_inquiry_mode(req);
1575
1576         if (lmp_inq_tx_pwr_capable(hdev))
1577                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1578
1579         if (lmp_ext_feat_capable(hdev)) {
1580                 struct hci_cp_read_local_ext_features cp;
1581
1582                 cp.page = 0x01;
1583                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1584                             sizeof(cp), &cp);
1585         }
1586
1587         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1588                 u8 enable = 1;
1589                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1590                             &enable);
1591         }
1592 }
1593
1594 static void hci_setup_link_policy(struct hci_request *req)
1595 {
1596         struct hci_dev *hdev = req->hdev;
1597         struct hci_cp_write_def_link_policy cp;
1598         u16 link_policy = 0;
1599
1600         if (lmp_rswitch_capable(hdev))
1601                 link_policy |= HCI_LP_RSWITCH;
1602         if (lmp_hold_capable(hdev))
1603                 link_policy |= HCI_LP_HOLD;
1604         if (lmp_sniff_capable(hdev))
1605                 link_policy |= HCI_LP_SNIFF;
1606         if (lmp_park_capable(hdev))
1607                 link_policy |= HCI_LP_PARK;
1608
1609         cp.policy = cpu_to_le16(link_policy);
1610         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1611 }
1612
1613 static void hci_set_le_support(struct hci_request *req)
1614 {
1615         struct hci_dev *hdev = req->hdev;
1616         struct hci_cp_write_le_host_supported cp;
1617
1618         /* LE-only devices do not support explicit enablement */
1619         if (!lmp_bredr_capable(hdev))
1620                 return;
1621
1622         memset(&cp, 0, sizeof(cp));
1623
1624         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1625                 cp.le = 0x01;
1626                 cp.simul = 0x00;
1627         }
1628
1629         if (cp.le != lmp_host_le_capable(hdev))
1630                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1631                             &cp);
1632 }
1633
1634 static void hci_set_event_mask_page_2(struct hci_request *req)
1635 {
1636         struct hci_dev *hdev = req->hdev;
1637         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1638
1639         /* If Connectionless Slave Broadcast master role is supported
1640          * enable all necessary events for it.
1641          */
1642         if (lmp_csb_master_capable(hdev)) {
1643                 events[1] |= 0x40;      /* Triggered Clock Capture */
1644                 events[1] |= 0x80;      /* Synchronization Train Complete */
1645                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1646                 events[2] |= 0x20;      /* CSB Channel Map Change */
1647         }
1648
1649         /* If Connectionless Slave Broadcast slave role is supported
1650          * enable all necessary events for it.
1651          */
1652         if (lmp_csb_slave_capable(hdev)) {
1653                 events[2] |= 0x01;      /* Synchronization Train Received */
1654                 events[2] |= 0x02;      /* CSB Receive */
1655                 events[2] |= 0x04;      /* CSB Timeout */
1656                 events[2] |= 0x08;      /* Truncated Page Complete */
1657         }
1658
1659         /* Enable Authenticated Payload Timeout Expired event if supported */
1660         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1661                 events[2] |= 0x80;
1662
1663         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1664 }
1665
1666 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1667 {
1668         struct hci_dev *hdev = req->hdev;
1669         u8 p;
1670
1671         hci_setup_event_mask(req);
1672
1673         /* Some Broadcom based Bluetooth controllers do not support the
1674          * Delete Stored Link Key command. They are clearly indicating its
1675          * absence in the bit mask of supported commands.
1676          *
1677          * Check the supported commands and only if the the command is marked
1678          * as supported send it. If not supported assume that the controller
1679          * does not have actual support for stored link keys which makes this
1680          * command redundant anyway.
1681          *
1682          * Some controllers indicate that they support handling deleting
1683          * stored link keys, but they don't. The quirk lets a driver
1684          * just disable this command.
1685          */
1686         if (hdev->commands[6] & 0x80 &&
1687             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1688                 struct hci_cp_delete_stored_link_key cp;
1689
1690                 bacpy(&cp.bdaddr, BDADDR_ANY);
1691                 cp.delete_all = 0x01;
1692                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1693                             sizeof(cp), &cp);
1694         }
1695
1696         if (hdev->commands[5] & 0x10)
1697                 hci_setup_link_policy(req);
1698
1699         if (lmp_le_capable(hdev)) {
1700                 u8 events[8];
1701
1702                 memset(events, 0, sizeof(events));
1703                 events[0] = 0x0f;
1704
1705                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1706                         events[0] |= 0x10;      /* LE Long Term Key Request */
1707
1708                 /* If controller supports the Connection Parameters Request
1709                  * Link Layer Procedure, enable the corresponding event.
1710                  */
1711                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1712                         events[0] |= 0x20;      /* LE Remote Connection
1713                                                  * Parameter Request
1714                                                  */
1715
1716                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1717                             events);
1718
1719                 if (hdev->commands[25] & 0x40) {
1720                         /* Read LE Advertising Channel TX Power */
1721                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1722                 }
1723
1724                 hci_set_le_support(req);
1725         }
1726
1727         /* Read features beyond page 1 if available */
1728         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1729                 struct hci_cp_read_local_ext_features cp;
1730
1731                 cp.page = p;
1732                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1733                             sizeof(cp), &cp);
1734         }
1735 }
1736
1737 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1738 {
1739         struct hci_dev *hdev = req->hdev;
1740
1741         /* Set event mask page 2 if the HCI command for it is supported */
1742         if (hdev->commands[22] & 0x04)
1743                 hci_set_event_mask_page_2(req);
1744
1745         /* Read local codec list if the HCI command is supported */
1746         if (hdev->commands[29] & 0x20)
1747                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1748
1749         /* Get MWS transport configuration if the HCI command is supported */
1750         if (hdev->commands[30] & 0x08)
1751                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1752
1753         /* Check for Synchronization Train support */
1754         if (lmp_sync_train_capable(hdev))
1755                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1756
1757         /* Enable Secure Connections if supported and configured */
1758         if ((lmp_sc_capable(hdev) ||
1759              test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1760             test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1761                 u8 support = 0x01;
1762                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1763                             sizeof(support), &support);
1764         }
1765 }
1766
1767 static int __hci_init(struct hci_dev *hdev)
1768 {
1769         int err;
1770
1771         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1772         if (err < 0)
1773                 return err;
1774
1775         /* The Device Under Test (DUT) mode is special and available for
1776          * all controller types. So just create it early on.
1777          */
1778         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1779                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1780                                     &dut_mode_fops);
1781         }
1782
1783         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1784          * BR/EDR/LE type controllers. AMP controllers only need the
1785          * first stage init.
1786          */
1787         if (hdev->dev_type != HCI_BREDR)
1788                 return 0;
1789
1790         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1791         if (err < 0)
1792                 return err;
1793
1794         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1795         if (err < 0)
1796                 return err;
1797
1798         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1799         if (err < 0)
1800                 return err;
1801
1802         /* Only create debugfs entries during the initial setup
1803          * phase and not every time the controller gets powered on.
1804          */
1805         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1806                 return 0;
1807
1808         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1809                             &features_fops);
1810         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1811                            &hdev->manufacturer);
1812         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1813         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1814         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1815                             &blacklist_fops);
1816         debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1817                             &whitelist_fops);
1818         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1819
1820         debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1821                             &conn_info_min_age_fops);
1822         debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1823                             &conn_info_max_age_fops);
1824
1825         if (lmp_bredr_capable(hdev)) {
1826                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1827                                     hdev, &inquiry_cache_fops);
1828                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1829                                     hdev, &link_keys_fops);
1830                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1831                                     hdev, &dev_class_fops);
1832                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1833                                     hdev, &voice_setting_fops);
1834         }
1835
1836         if (lmp_ssp_capable(hdev)) {
1837                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1838                                     hdev, &auto_accept_delay_fops);
1839                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1840                                     hdev, &force_sc_support_fops);
1841                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1842                                     hdev, &sc_only_mode_fops);
1843         }
1844
1845         if (lmp_sniff_capable(hdev)) {
1846                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1847                                     hdev, &idle_timeout_fops);
1848                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1849                                     hdev, &sniff_min_interval_fops);
1850                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1851                                     hdev, &sniff_max_interval_fops);
1852         }
1853
1854         if (lmp_le_capable(hdev)) {
1855                 debugfs_create_file("identity", 0400, hdev->debugfs,
1856                                     hdev, &identity_fops);
1857                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1858                                     hdev, &rpa_timeout_fops);
1859                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1860                                     hdev, &random_address_fops);
1861                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1862                                     hdev, &static_address_fops);
1863
1864                 /* For controllers with a public address, provide a debug
1865                  * option to force the usage of the configured static
1866                  * address. By default the public address is used.
1867                  */
1868                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1869                         debugfs_create_file("force_static_address", 0644,
1870                                             hdev->debugfs, hdev,
1871                                             &force_static_address_fops);
1872
1873                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1874                                   &hdev->le_white_list_size);
1875                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1876                                     &white_list_fops);
1877                 debugfs_create_file("identity_resolving_keys", 0400,
1878                                     hdev->debugfs, hdev,
1879                                     &identity_resolving_keys_fops);
1880                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1881                                     hdev, &long_term_keys_fops);
1882                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1883                                     hdev, &conn_min_interval_fops);
1884                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1885                                     hdev, &conn_max_interval_fops);
1886                 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1887                                     hdev, &conn_latency_fops);
1888                 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1889                                     hdev, &supervision_timeout_fops);
1890                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1891                                     hdev, &adv_channel_map_fops);
1892                 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1893                                     hdev, &adv_min_interval_fops);
1894                 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1895                                     hdev, &adv_max_interval_fops);
1896                 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1897                                     &device_list_fops);
1898                 debugfs_create_u16("discov_interleaved_timeout", 0644,
1899                                    hdev->debugfs,
1900                                    &hdev->discov_interleaved_timeout);
1901         }
1902
1903         return 0;
1904 }
1905
1906 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1907 {
1908         struct hci_dev *hdev = req->hdev;
1909
1910         BT_DBG("%s %ld", hdev->name, opt);
1911
1912         /* Reset */
1913         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1914                 hci_reset_req(req, 0);
1915
1916         /* Read Local Version */
1917         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1918
1919         /* Read BD Address */
1920         if (hdev->set_bdaddr)
1921                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1922 }
1923
1924 static int __hci_unconf_init(struct hci_dev *hdev)
1925 {
1926         int err;
1927
1928         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1929                 return 0;
1930
1931         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1932         if (err < 0)
1933                 return err;
1934
1935         return 0;
1936 }
1937
1938 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1939 {
1940         __u8 scan = opt;
1941
1942         BT_DBG("%s %x", req->hdev->name, scan);
1943
1944         /* Inquiry and Page scans */
1945         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1946 }
1947
1948 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1949 {
1950         __u8 auth = opt;
1951
1952         BT_DBG("%s %x", req->hdev->name, auth);
1953
1954         /* Authentication */
1955         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1956 }
1957
1958 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1959 {
1960         __u8 encrypt = opt;
1961
1962         BT_DBG("%s %x", req->hdev->name, encrypt);
1963
1964         /* Encryption */
1965         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1966 }
1967
1968 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1969 {
1970         __le16 policy = cpu_to_le16(opt);
1971
1972         BT_DBG("%s %x", req->hdev->name, policy);
1973
1974         /* Default link policy */
1975         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1976 }
1977
1978 /* Get HCI device by index.
1979  * Device is held on return. */
1980 struct hci_dev *hci_dev_get(int index)
1981 {
1982         struct hci_dev *hdev = NULL, *d;
1983
1984         BT_DBG("%d", index);
1985
1986         if (index < 0)
1987                 return NULL;
1988
1989         read_lock(&hci_dev_list_lock);
1990         list_for_each_entry(d, &hci_dev_list, list) {
1991                 if (d->id == index) {
1992                         hdev = hci_dev_hold(d);
1993                         break;
1994                 }
1995         }
1996         read_unlock(&hci_dev_list_lock);
1997         return hdev;
1998 }
1999
2000 /* ---- Inquiry support ---- */
2001
2002 bool hci_discovery_active(struct hci_dev *hdev)
2003 {
2004         struct discovery_state *discov = &hdev->discovery;
2005
2006         switch (discov->state) {
2007         case DISCOVERY_FINDING:
2008         case DISCOVERY_RESOLVING:
2009                 return true;
2010
2011         default:
2012                 return false;
2013         }
2014 }
2015
2016 void hci_discovery_set_state(struct hci_dev *hdev, int state)
2017 {
2018         int old_state = hdev->discovery.state;
2019
2020         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2021
2022         if (old_state == state)
2023                 return;
2024
2025         hdev->discovery.state = state;
2026
2027         switch (state) {
2028         case DISCOVERY_STOPPED:
2029                 hci_update_background_scan(hdev);
2030
2031                 if (old_state != DISCOVERY_STARTING)
2032                         mgmt_discovering(hdev, 0);
2033                 break;
2034         case DISCOVERY_STARTING:
2035                 break;
2036         case DISCOVERY_FINDING:
2037                 mgmt_discovering(hdev, 1);
2038                 break;
2039         case DISCOVERY_RESOLVING:
2040                 break;
2041         case DISCOVERY_STOPPING:
2042                 break;
2043         }
2044 }
2045
2046 void hci_inquiry_cache_flush(struct hci_dev *hdev)
2047 {
2048         struct discovery_state *cache = &hdev->discovery;
2049         struct inquiry_entry *p, *n;
2050
2051         list_for_each_entry_safe(p, n, &cache->all, all) {
2052                 list_del(&p->all);
2053                 kfree(p);
2054         }
2055
2056         INIT_LIST_HEAD(&cache->unknown);
2057         INIT_LIST_HEAD(&cache->resolve);
2058 }
2059
2060 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2061                                                bdaddr_t *bdaddr)
2062 {
2063         struct discovery_state *cache = &hdev->discovery;
2064         struct inquiry_entry *e;
2065
2066         BT_DBG("cache %p, %pMR", cache, bdaddr);
2067
2068         list_for_each_entry(e, &cache->all, all) {
2069                 if (!bacmp(&e->data.bdaddr, bdaddr))
2070                         return e;
2071         }
2072
2073         return NULL;
2074 }
2075
2076 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2077                                                        bdaddr_t *bdaddr)
2078 {
2079         struct discovery_state *cache = &hdev->discovery;
2080         struct inquiry_entry *e;
2081
2082         BT_DBG("cache %p, %pMR", cache, bdaddr);
2083
2084         list_for_each_entry(e, &cache->unknown, list) {
2085                 if (!bacmp(&e->data.bdaddr, bdaddr))
2086                         return e;
2087         }
2088
2089         return NULL;
2090 }
2091
2092 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2093                                                        bdaddr_t *bdaddr,
2094                                                        int state)
2095 {
2096         struct discovery_state *cache = &hdev->discovery;
2097         struct inquiry_entry *e;
2098
2099         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2100
2101         list_for_each_entry(e, &cache->resolve, list) {
2102                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2103                         return e;
2104                 if (!bacmp(&e->data.bdaddr, bdaddr))
2105                         return e;
2106         }
2107
2108         return NULL;
2109 }
2110
2111 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2112                                       struct inquiry_entry *ie)
2113 {
2114         struct discovery_state *cache = &hdev->discovery;
2115         struct list_head *pos = &cache->resolve;
2116         struct inquiry_entry *p;
2117
2118         list_del(&ie->list);
2119
2120         list_for_each_entry(p, &cache->resolve, list) {
2121                 if (p->name_state != NAME_PENDING &&
2122                     abs(p->data.rssi) >= abs(ie->data.rssi))
2123                         break;
2124                 pos = &p->list;
2125         }
2126
2127         list_add(&ie->list, pos);
2128 }
2129
2130 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2131                              bool name_known)
2132 {
2133         struct discovery_state *cache = &hdev->discovery;
2134         struct inquiry_entry *ie;
2135         u32 flags = 0;
2136
2137         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2138
2139         hci_remove_remote_oob_data(hdev, &data->bdaddr);
2140
2141         if (!data->ssp_mode)
2142                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2143
2144         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2145         if (ie) {
2146                 if (!ie->data.ssp_mode)
2147                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2148
2149                 if (ie->name_state == NAME_NEEDED &&
2150                     data->rssi != ie->data.rssi) {
2151                         ie->data.rssi = data->rssi;
2152                         hci_inquiry_cache_update_resolve(hdev, ie);
2153                 }
2154
2155                 goto update;
2156         }
2157
2158         /* Entry not in the cache. Add new one. */
2159         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
2160         if (!ie) {
2161                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2162                 goto done;
2163         }
2164
2165         list_add(&ie->all, &cache->all);
2166
2167         if (name_known) {
2168                 ie->name_state = NAME_KNOWN;
2169         } else {
2170                 ie->name_state = NAME_NOT_KNOWN;
2171                 list_add(&ie->list, &cache->unknown);
2172         }
2173
2174 update:
2175         if (name_known && ie->name_state != NAME_KNOWN &&
2176             ie->name_state != NAME_PENDING) {
2177                 ie->name_state = NAME_KNOWN;
2178                 list_del(&ie->list);
2179         }
2180
2181         memcpy(&ie->data, data, sizeof(*data));
2182         ie->timestamp = jiffies;
2183         cache->timestamp = jiffies;
2184
2185         if (ie->name_state == NAME_NOT_KNOWN)
2186                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2187
2188 done:
2189         return flags;
2190 }
2191
2192 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2193 {
2194         struct discovery_state *cache = &hdev->discovery;
2195         struct inquiry_info *info = (struct inquiry_info *) buf;
2196         struct inquiry_entry *e;
2197         int copied = 0;
2198
2199         list_for_each_entry(e, &cache->all, all) {
2200                 struct inquiry_data *data = &e->data;
2201
2202                 if (copied >= num)
2203                         break;
2204
2205                 bacpy(&info->bdaddr, &data->bdaddr);
2206                 info->pscan_rep_mode    = data->pscan_rep_mode;
2207                 info->pscan_period_mode = data->pscan_period_mode;
2208                 info->pscan_mode        = data->pscan_mode;
2209                 memcpy(info->dev_class, data->dev_class, 3);
2210                 info->clock_offset      = data->clock_offset;
2211
2212                 info++;
2213                 copied++;
2214         }
2215
2216         BT_DBG("cache %p, copied %d", cache, copied);
2217         return copied;
2218 }
2219
2220 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2221 {
2222         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2223         struct hci_dev *hdev = req->hdev;
2224         struct hci_cp_inquiry cp;
2225
2226         BT_DBG("%s", hdev->name);
2227
2228         if (test_bit(HCI_INQUIRY, &hdev->flags))
2229                 return;
2230
2231         /* Start Inquiry */
2232         memcpy(&cp.lap, &ir->lap, 3);
2233         cp.length  = ir->length;
2234         cp.num_rsp = ir->num_rsp;
2235         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2236 }
2237
2238 int hci_inquiry(void __user *arg)
2239 {
2240         __u8 __user *ptr = arg;
2241         struct hci_inquiry_req ir;
2242         struct hci_dev *hdev;
2243         int err = 0, do_inquiry = 0, max_rsp;
2244         long timeo;
2245         __u8 *buf;
2246
2247         if (copy_from_user(&ir, ptr, sizeof(ir)))
2248                 return -EFAULT;
2249
2250         hdev = hci_dev_get(ir.dev_id);
2251         if (!hdev)
2252                 return -ENODEV;
2253
2254         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2255                 err = -EBUSY;
2256                 goto done;
2257         }
2258
2259         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2260                 err = -EOPNOTSUPP;
2261                 goto done;
2262         }
2263
2264         if (hdev->dev_type != HCI_BREDR) {
2265                 err = -EOPNOTSUPP;
2266                 goto done;
2267         }
2268
2269         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2270                 err = -EOPNOTSUPP;
2271                 goto done;
2272         }
2273
2274         hci_dev_lock(hdev);
2275         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2276             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2277                 hci_inquiry_cache_flush(hdev);
2278                 do_inquiry = 1;
2279         }
2280         hci_dev_unlock(hdev);
2281
2282         timeo = ir.length * msecs_to_jiffies(2000);
2283
2284         if (do_inquiry) {
2285                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2286                                    timeo);
2287                 if (err < 0)
2288                         goto done;
2289
2290                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2291                  * cleared). If it is interrupted by a signal, return -EINTR.
2292                  */
2293                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
2294                                 TASK_INTERRUPTIBLE))
2295                         return -EINTR;
2296         }
2297
2298         /* for unlimited number of responses we will use buffer with
2299          * 255 entries
2300          */
2301         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2302
2303         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2304          * copy it to the user space.
2305          */
2306         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2307         if (!buf) {
2308                 err = -ENOMEM;
2309                 goto done;
2310         }
2311
2312         hci_dev_lock(hdev);
2313         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2314         hci_dev_unlock(hdev);
2315
2316         BT_DBG("num_rsp %d", ir.num_rsp);
2317
2318         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2319                 ptr += sizeof(ir);
2320                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2321                                  ir.num_rsp))
2322                         err = -EFAULT;
2323         } else
2324                 err = -EFAULT;
2325
2326         kfree(buf);
2327
2328 done:
2329         hci_dev_put(hdev);
2330         return err;
2331 }
2332
2333 static int hci_dev_do_open(struct hci_dev *hdev)
2334 {
2335         int ret = 0;
2336
2337         BT_DBG("%s %p", hdev->name, hdev);
2338
2339         hci_req_lock(hdev);
2340
2341         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2342                 ret = -ENODEV;
2343                 goto done;
2344         }
2345
2346         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2347             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2348                 /* Check for rfkill but allow the HCI setup stage to
2349                  * proceed (which in itself doesn't cause any RF activity).
2350                  */
2351                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2352                         ret = -ERFKILL;
2353                         goto done;
2354                 }
2355
2356                 /* Check for valid public address or a configured static
2357                  * random adddress, but let the HCI setup proceed to
2358                  * be able to determine if there is a public address
2359                  * or not.
2360                  *
2361                  * In case of user channel usage, it is not important
2362                  * if a public address or static random address is
2363                  * available.
2364                  *
2365                  * This check is only valid for BR/EDR controllers
2366                  * since AMP controllers do not have an address.
2367                  */
2368                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2369                     hdev->dev_type == HCI_BREDR &&
2370                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2371                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2372                         ret = -EADDRNOTAVAIL;
2373                         goto done;
2374                 }
2375         }
2376
2377         if (test_bit(HCI_UP, &hdev->flags)) {
2378                 ret = -EALREADY;
2379                 goto done;
2380         }
2381
2382         if (hdev->open(hdev)) {
2383                 ret = -EIO;
2384                 goto done;
2385         }
2386
2387         atomic_set(&hdev->cmd_cnt, 1);
2388         set_bit(HCI_INIT, &hdev->flags);
2389
2390         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2391                 if (hdev->setup)
2392                         ret = hdev->setup(hdev);
2393
2394                 /* The transport driver can set these quirks before
2395                  * creating the HCI device or in its setup callback.
2396                  *
2397                  * In case any of them is set, the controller has to
2398                  * start up as unconfigured.
2399                  */
2400                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2401                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2402                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2403
2404                 /* For an unconfigured controller it is required to
2405                  * read at least the version information provided by
2406                  * the Read Local Version Information command.
2407                  *
2408                  * If the set_bdaddr driver callback is provided, then
2409                  * also the original Bluetooth public device address
2410                  * will be read using the Read BD Address command.
2411                  */
2412                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2413                         ret = __hci_unconf_init(hdev);
2414         }
2415
2416         if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2417                 /* If public address change is configured, ensure that
2418                  * the address gets programmed. If the driver does not
2419                  * support changing the public address, fail the power
2420                  * on procedure.
2421                  */
2422                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2423                     hdev->set_bdaddr)
2424                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2425                 else
2426                         ret = -EADDRNOTAVAIL;
2427         }
2428
2429         if (!ret) {
2430                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2431                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2432                         ret = __hci_init(hdev);
2433         }
2434
2435         clear_bit(HCI_INIT, &hdev->flags);
2436
2437         if (!ret) {
2438                 hci_dev_hold(hdev);
2439                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2440                 set_bit(HCI_UP, &hdev->flags);
2441                 hci_notify(hdev, HCI_DEV_UP);
2442                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2443                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2444                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2445                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2446                     hdev->dev_type == HCI_BREDR) {
2447                         hci_dev_lock(hdev);
2448                         mgmt_powered(hdev, 1);
2449                         hci_dev_unlock(hdev);
2450                 }
2451         } else {
2452                 /* Init failed, cleanup */
2453                 flush_work(&hdev->tx_work);
2454                 flush_work(&hdev->cmd_work);
2455                 flush_work(&hdev->rx_work);
2456
2457                 skb_queue_purge(&hdev->cmd_q);
2458                 skb_queue_purge(&hdev->rx_q);
2459
2460                 if (hdev->flush)
2461                         hdev->flush(hdev);
2462
2463                 if (hdev->sent_cmd) {
2464                         kfree_skb(hdev->sent_cmd);
2465                         hdev->sent_cmd = NULL;
2466                 }
2467
2468                 hdev->close(hdev);
2469                 hdev->flags &= BIT(HCI_RAW);
2470         }
2471
2472 done:
2473         hci_req_unlock(hdev);
2474         return ret;
2475 }
2476
2477 /* ---- HCI ioctl helpers ---- */
2478
2479 int hci_dev_open(__u16 dev)
2480 {
2481         struct hci_dev *hdev;
2482         int err;
2483
2484         hdev = hci_dev_get(dev);
2485         if (!hdev)
2486                 return -ENODEV;
2487
2488         /* Devices that are marked as unconfigured can only be powered
2489          * up as user channel. Trying to bring them up as normal devices
2490          * will result into a failure. Only user channel operation is
2491          * possible.
2492          *
2493          * When this function is called for a user channel, the flag
2494          * HCI_USER_CHANNEL will be set first before attempting to
2495          * open the device.
2496          */
2497         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2498             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2499                 err = -EOPNOTSUPP;
2500                 goto done;
2501         }
2502
2503         /* We need to ensure that no other power on/off work is pending
2504          * before proceeding to call hci_dev_do_open. This is
2505          * particularly important if the setup procedure has not yet
2506          * completed.
2507          */
2508         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2509                 cancel_delayed_work(&hdev->power_off);
2510
2511         /* After this call it is guaranteed that the setup procedure
2512          * has finished. This means that error conditions like RFKILL
2513          * or no valid public or static random address apply.
2514          */
2515         flush_workqueue(hdev->req_workqueue);
2516
2517         /* For controllers not using the management interface and that
2518          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
2519          * so that pairing works for them. Once the management interface
2520          * is in use this bit will be cleared again and userspace has
2521          * to explicitly enable it.
2522          */
2523         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2524             !test_bit(HCI_MGMT, &hdev->dev_flags))
2525                 set_bit(HCI_BONDABLE, &hdev->dev_flags);
2526
2527         err = hci_dev_do_open(hdev);
2528
2529 done:
2530         hci_dev_put(hdev);
2531         return err;
2532 }
2533
2534 /* This function requires the caller holds hdev->lock */
2535 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2536 {
2537         struct hci_conn_params *p;
2538
2539         list_for_each_entry(p, &hdev->le_conn_params, list) {
2540                 if (p->conn) {
2541                         hci_conn_drop(p->conn);
2542                         p->conn = NULL;
2543                 }
2544                 list_del_init(&p->action);
2545         }
2546
2547         BT_DBG("All LE pending actions cleared");
2548 }
2549
2550 static int hci_dev_do_close(struct hci_dev *hdev)
2551 {
2552         BT_DBG("%s %p", hdev->name, hdev);
2553
2554         cancel_delayed_work(&hdev->power_off);
2555
2556         hci_req_cancel(hdev, ENODEV);
2557         hci_req_lock(hdev);
2558
2559         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2560                 cancel_delayed_work_sync(&hdev->cmd_timer);
2561                 hci_req_unlock(hdev);
2562                 return 0;
2563         }
2564
2565         /* Flush RX and TX works */
2566         flush_work(&hdev->tx_work);
2567         flush_work(&hdev->rx_work);
2568
2569         if (hdev->discov_timeout > 0) {
2570                 cancel_delayed_work(&hdev->discov_off);
2571                 hdev->discov_timeout = 0;
2572                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2573                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2574         }
2575
2576         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2577                 cancel_delayed_work(&hdev->service_cache);
2578
2579         cancel_delayed_work_sync(&hdev->le_scan_disable);
2580
2581         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2582                 cancel_delayed_work_sync(&hdev->rpa_expired);
2583
2584         hci_dev_lock(hdev);
2585         hci_inquiry_cache_flush(hdev);
2586         hci_pend_le_actions_clear(hdev);
2587         hci_conn_hash_flush(hdev);
2588         hci_dev_unlock(hdev);
2589
2590         hci_notify(hdev, HCI_DEV_DOWN);
2591
2592         if (hdev->flush)
2593                 hdev->flush(hdev);
2594
2595         /* Reset device */
2596         skb_queue_purge(&hdev->cmd_q);
2597         atomic_set(&hdev->cmd_cnt, 1);
2598         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2599             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2600             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2601                 set_bit(HCI_INIT, &hdev->flags);
2602                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2603                 clear_bit(HCI_INIT, &hdev->flags);
2604         }
2605
2606         /* flush cmd  work */
2607         flush_work(&hdev->cmd_work);
2608
2609         /* Drop queues */
2610         skb_queue_purge(&hdev->rx_q);
2611         skb_queue_purge(&hdev->cmd_q);
2612         skb_queue_purge(&hdev->raw_q);
2613
2614         /* Drop last sent command */
2615         if (hdev->sent_cmd) {
2616                 cancel_delayed_work_sync(&hdev->cmd_timer);
2617                 kfree_skb(hdev->sent_cmd);
2618                 hdev->sent_cmd = NULL;
2619         }
2620
2621         kfree_skb(hdev->recv_evt);
2622         hdev->recv_evt = NULL;
2623
2624         /* After this point our queues are empty
2625          * and no tasks are scheduled. */
2626         hdev->close(hdev);
2627
2628         /* Clear flags */
2629         hdev->flags &= BIT(HCI_RAW);
2630         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2631
2632         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2633                 if (hdev->dev_type == HCI_BREDR) {
2634                         hci_dev_lock(hdev);
2635                         mgmt_powered(hdev, 0);
2636                         hci_dev_unlock(hdev);
2637                 }
2638         }
2639
2640         /* Controller radio is available but is currently powered down */
2641         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2642
2643         memset(hdev->eir, 0, sizeof(hdev->eir));
2644         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2645         bacpy(&hdev->random_addr, BDADDR_ANY);
2646
2647         hci_req_unlock(hdev);
2648
2649         hci_dev_put(hdev);
2650         return 0;
2651 }
2652
2653 int hci_dev_close(__u16 dev)
2654 {
2655         struct hci_dev *hdev;
2656         int err;
2657
2658         hdev = hci_dev_get(dev);
2659         if (!hdev)
2660                 return -ENODEV;
2661
2662         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2663                 err = -EBUSY;
2664                 goto done;
2665         }
2666
2667         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2668                 cancel_delayed_work(&hdev->power_off);
2669
2670         err = hci_dev_do_close(hdev);
2671
2672 done:
2673         hci_dev_put(hdev);
2674         return err;
2675 }
2676
2677 int hci_dev_reset(__u16 dev)
2678 {
2679         struct hci_dev *hdev;
2680         int ret = 0;
2681
2682         hdev = hci_dev_get(dev);
2683         if (!hdev)
2684                 return -ENODEV;
2685
2686         hci_req_lock(hdev);
2687
2688         if (!test_bit(HCI_UP, &hdev->flags)) {
2689                 ret = -ENETDOWN;
2690                 goto done;
2691         }
2692
2693         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2694                 ret = -EBUSY;
2695                 goto done;
2696         }
2697
2698         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2699                 ret = -EOPNOTSUPP;
2700                 goto done;
2701         }
2702
2703         /* Drop queues */
2704         skb_queue_purge(&hdev->rx_q);
2705         skb_queue_purge(&hdev->cmd_q);
2706
2707         hci_dev_lock(hdev);
2708         hci_inquiry_cache_flush(hdev);
2709         hci_conn_hash_flush(hdev);
2710         hci_dev_unlock(hdev);
2711
2712         if (hdev->flush)
2713                 hdev->flush(hdev);
2714
2715         atomic_set(&hdev->cmd_cnt, 1);
2716         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2717
2718         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2719
2720 done:
2721         hci_req_unlock(hdev);
2722         hci_dev_put(hdev);
2723         return ret;
2724 }
2725
2726 int hci_dev_reset_stat(__u16 dev)
2727 {
2728         struct hci_dev *hdev;
2729         int ret = 0;
2730
2731         hdev = hci_dev_get(dev);
2732         if (!hdev)
2733                 return -ENODEV;
2734
2735         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2736                 ret = -EBUSY;
2737                 goto done;
2738         }
2739
2740         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2741                 ret = -EOPNOTSUPP;
2742                 goto done;
2743         }
2744
2745         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2746
2747 done:
2748         hci_dev_put(hdev);
2749         return ret;
2750 }
2751
2752 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2753 {
2754         bool conn_changed, discov_changed;
2755
2756         BT_DBG("%s scan 0x%02x", hdev->name, scan);
2757
2758         if ((scan & SCAN_PAGE))
2759                 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2760                                                  &hdev->dev_flags);
2761         else
2762                 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2763                                                   &hdev->dev_flags);
2764
2765         if ((scan & SCAN_INQUIRY)) {
2766                 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2767                                                    &hdev->dev_flags);
2768         } else {
2769                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2770                 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2771                                                     &hdev->dev_flags);
2772         }
2773
2774         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2775                 return;
2776
2777         if (conn_changed || discov_changed) {
2778                 /* In case this was disabled through mgmt */
2779                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2780
2781                 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2782                         mgmt_update_adv_data(hdev);
2783
2784                 mgmt_new_settings(hdev);
2785         }
2786 }
2787
2788 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2789 {
2790         struct hci_dev *hdev;
2791         struct hci_dev_req dr;
2792         int err = 0;
2793
2794         if (copy_from_user(&dr, arg, sizeof(dr)))
2795                 return -EFAULT;
2796
2797         hdev = hci_dev_get(dr.dev_id);
2798         if (!hdev)
2799                 return -ENODEV;
2800
2801         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2802                 err = -EBUSY;
2803                 goto done;
2804         }
2805
2806         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2807                 err = -EOPNOTSUPP;
2808                 goto done;
2809         }
2810
2811         if (hdev->dev_type != HCI_BREDR) {
2812                 err = -EOPNOTSUPP;
2813                 goto done;
2814         }
2815
2816         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2817                 err = -EOPNOTSUPP;
2818                 goto done;
2819         }
2820
2821         switch (cmd) {
2822         case HCISETAUTH:
2823                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2824                                    HCI_INIT_TIMEOUT);
2825                 break;
2826
2827         case HCISETENCRYPT:
2828                 if (!lmp_encrypt_capable(hdev)) {
2829                         err = -EOPNOTSUPP;
2830                         break;
2831                 }
2832
2833                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2834                         /* Auth must be enabled first */
2835                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2836                                            HCI_INIT_TIMEOUT);
2837                         if (err)
2838                                 break;
2839                 }
2840
2841                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2842                                    HCI_INIT_TIMEOUT);
2843                 break;
2844
2845         case HCISETSCAN:
2846                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2847                                    HCI_INIT_TIMEOUT);
2848
2849                 /* Ensure that the connectable and discoverable states
2850                  * get correctly modified as this was a non-mgmt change.
2851                  */
2852                 if (!err)
2853                         hci_update_scan_state(hdev, dr.dev_opt);
2854                 break;
2855
2856         case HCISETLINKPOL:
2857                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2858                                    HCI_INIT_TIMEOUT);
2859                 break;
2860
2861         case HCISETLINKMODE:
2862                 hdev->link_mode = ((__u16) dr.dev_opt) &
2863                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2864                 break;
2865
2866         case HCISETPTYPE:
2867                 hdev->pkt_type = (__u16) dr.dev_opt;
2868                 break;
2869
2870         case HCISETACLMTU:
2871                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2872                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2873                 break;
2874
2875         case HCISETSCOMTU:
2876                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2877                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2878                 break;
2879
2880         default:
2881                 err = -EINVAL;
2882                 break;
2883         }
2884
2885 done:
2886         hci_dev_put(hdev);
2887         return err;
2888 }
2889
2890 int hci_get_dev_list(void __user *arg)
2891 {
2892         struct hci_dev *hdev;
2893         struct hci_dev_list_req *dl;
2894         struct hci_dev_req *dr;
2895         int n = 0, size, err;
2896         __u16 dev_num;
2897
2898         if (get_user(dev_num, (__u16 __user *) arg))
2899                 return -EFAULT;
2900
2901         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2902                 return -EINVAL;
2903
2904         size = sizeof(*dl) + dev_num * sizeof(*dr);
2905
2906         dl = kzalloc(size, GFP_KERNEL);
2907         if (!dl)
2908                 return -ENOMEM;
2909
2910         dr = dl->dev_req;
2911
2912         read_lock(&hci_dev_list_lock);
2913         list_for_each_entry(hdev, &hci_dev_list, list) {
2914                 unsigned long flags = hdev->flags;
2915
2916                 /* When the auto-off is configured it means the transport
2917                  * is running, but in that case still indicate that the
2918                  * device is actually down.
2919                  */
2920                 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2921                         flags &= ~BIT(HCI_UP);
2922
2923                 (dr + n)->dev_id  = hdev->id;
2924                 (dr + n)->dev_opt = flags;
2925
2926                 if (++n >= dev_num)
2927                         break;
2928         }
2929         read_unlock(&hci_dev_list_lock);
2930
2931         dl->dev_num = n;
2932         size = sizeof(*dl) + n * sizeof(*dr);
2933
2934         err = copy_to_user(arg, dl, size);
2935         kfree(dl);
2936
2937         return err ? -EFAULT : 0;
2938 }
2939
2940 int hci_get_dev_info(void __user *arg)
2941 {
2942         struct hci_dev *hdev;
2943         struct hci_dev_info di;
2944         unsigned long flags;
2945         int err = 0;
2946
2947         if (copy_from_user(&di, arg, sizeof(di)))
2948                 return -EFAULT;
2949
2950         hdev = hci_dev_get(di.dev_id);
2951         if (!hdev)
2952                 return -ENODEV;
2953
2954         /* When the auto-off is configured it means the transport
2955          * is running, but in that case still indicate that the
2956          * device is actually down.
2957          */
2958         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2959                 flags = hdev->flags & ~BIT(HCI_UP);
2960         else
2961                 flags = hdev->flags;
2962
2963         strcpy(di.name, hdev->name);
2964         di.bdaddr   = hdev->bdaddr;
2965         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2966         di.flags    = flags;
2967         di.pkt_type = hdev->pkt_type;
2968         if (lmp_bredr_capable(hdev)) {
2969                 di.acl_mtu  = hdev->acl_mtu;
2970                 di.acl_pkts = hdev->acl_pkts;
2971                 di.sco_mtu  = hdev->sco_mtu;
2972                 di.sco_pkts = hdev->sco_pkts;
2973         } else {
2974                 di.acl_mtu  = hdev->le_mtu;
2975                 di.acl_pkts = hdev->le_pkts;
2976                 di.sco_mtu  = 0;
2977                 di.sco_pkts = 0;
2978         }
2979         di.link_policy = hdev->link_policy;
2980         di.link_mode   = hdev->link_mode;
2981
2982         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2983         memcpy(&di.features, &hdev->features, sizeof(di.features));
2984
2985         if (copy_to_user(arg, &di, sizeof(di)))
2986                 err = -EFAULT;
2987
2988         hci_dev_put(hdev);
2989
2990         return err;
2991 }
2992
2993 /* ---- Interface to HCI drivers ---- */
2994
2995 static int hci_rfkill_set_block(void *data, bool blocked)
2996 {
2997         struct hci_dev *hdev = data;
2998
2999         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
3000
3001         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
3002                 return -EBUSY;
3003
3004         if (blocked) {
3005                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3006                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3007                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
3008                         hci_dev_do_close(hdev);
3009         } else {
3010                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
3011         }
3012
3013         return 0;
3014 }
3015
3016 static const struct rfkill_ops hci_rfkill_ops = {
3017         .set_block = hci_rfkill_set_block,
3018 };
3019
3020 static void hci_power_on(struct work_struct *work)
3021 {
3022         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
3023         int err;
3024
3025         BT_DBG("%s", hdev->name);
3026
3027         err = hci_dev_do_open(hdev);
3028         if (err < 0) {
3029                 mgmt_set_powered_failed(hdev, err);
3030                 return;
3031         }
3032
3033         /* During the HCI setup phase, a few error conditions are
3034          * ignored and they need to be checked now. If they are still
3035          * valid, it is important to turn the device back off.
3036          */
3037         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
3038             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
3039             (hdev->dev_type == HCI_BREDR &&
3040              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3041              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
3042                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3043                 hci_dev_do_close(hdev);
3044         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
3045                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3046                                    HCI_AUTO_OFF_TIMEOUT);
3047         }
3048
3049         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
3050                 /* For unconfigured devices, set the HCI_RAW flag
3051                  * so that userspace can easily identify them.
3052                  */
3053                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3054                         set_bit(HCI_RAW, &hdev->flags);
3055
3056                 /* For fully configured devices, this will send
3057                  * the Index Added event. For unconfigured devices,
3058                  * it will send Unconfigued Index Added event.
3059                  *
3060                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3061                  * and no event will be send.
3062                  */
3063                 mgmt_index_added(hdev);
3064         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
3065                 /* When the controller is now configured, then it
3066                  * is important to clear the HCI_RAW flag.
3067                  */
3068                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3069                         clear_bit(HCI_RAW, &hdev->flags);
3070
3071                 /* Powering on the controller with HCI_CONFIG set only
3072                  * happens with the transition from unconfigured to
3073                  * configured. This will send the Index Added event.
3074                  */
3075                 mgmt_index_added(hdev);
3076         }
3077 }
3078
3079 static void hci_power_off(struct work_struct *work)
3080 {
3081         struct hci_dev *hdev = container_of(work, struct hci_dev,
3082                                             power_off.work);
3083
3084         BT_DBG("%s", hdev->name);
3085
3086         hci_dev_do_close(hdev);
3087 }
3088
3089 static void hci_discov_off(struct work_struct *work)
3090 {
3091         struct hci_dev *hdev;
3092
3093         hdev = container_of(work, struct hci_dev, discov_off.work);
3094
3095         BT_DBG("%s", hdev->name);
3096
3097         mgmt_discoverable_timeout(hdev);
3098 }
3099
3100 void hci_uuids_clear(struct hci_dev *hdev)
3101 {
3102         struct bt_uuid *uuid, *tmp;
3103
3104         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3105                 list_del(&uuid->list);
3106                 kfree(uuid);
3107         }
3108 }
3109
3110 void hci_link_keys_clear(struct hci_dev *hdev)
3111 {
3112         struct list_head *p, *n;
3113
3114         list_for_each_safe(p, n, &hdev->link_keys) {
3115                 struct link_key *key;
3116
3117                 key = list_entry(p, struct link_key, list);
3118
3119                 list_del(p);
3120                 kfree(key);
3121         }
3122 }
3123
3124 void hci_smp_ltks_clear(struct hci_dev *hdev)
3125 {
3126         struct smp_ltk *k, *tmp;
3127
3128         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3129                 list_del(&k->list);
3130                 kfree(k);
3131         }
3132 }
3133
3134 void hci_smp_irks_clear(struct hci_dev *hdev)
3135 {
3136         struct smp_irk *k, *tmp;
3137
3138         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3139                 list_del(&k->list);
3140                 kfree(k);
3141         }
3142 }
3143
3144 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3145 {
3146         struct link_key *k;
3147
3148         list_for_each_entry(k, &hdev->link_keys, list)
3149                 if (bacmp(bdaddr, &k->bdaddr) == 0)
3150                         return k;
3151
3152         return NULL;
3153 }
3154
3155 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3156                                u8 key_type, u8 old_key_type)
3157 {
3158         /* Legacy key */
3159         if (key_type < 0x03)
3160                 return true;
3161
3162         /* Debug keys are insecure so don't store them persistently */
3163         if (key_type == HCI_LK_DEBUG_COMBINATION)
3164                 return false;
3165
3166         /* Changed combination key and there's no previous one */
3167         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3168                 return false;
3169
3170         /* Security mode 3 case */
3171         if (!conn)
3172                 return true;
3173
3174         /* Neither local nor remote side had no-bonding as requirement */
3175         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3176                 return true;
3177
3178         /* Local side had dedicated bonding as requirement */
3179         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3180                 return true;
3181
3182         /* Remote side had dedicated bonding as requirement */
3183         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3184                 return true;
3185
3186         /* If none of the above criteria match, then don't store the key
3187          * persistently */
3188         return false;
3189 }
3190
3191 static u8 ltk_role(u8 type)
3192 {
3193         if (type == SMP_LTK)
3194                 return HCI_ROLE_MASTER;
3195
3196         return HCI_ROLE_SLAVE;
3197 }
3198
3199 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3200                              u8 role)
3201 {
3202         struct smp_ltk *k;
3203
3204         list_for_each_entry(k, &hdev->long_term_keys, list) {
3205                 if (k->ediv != ediv || k->rand != rand)
3206                         continue;
3207
3208                 if (ltk_role(k->type) != role)
3209                         continue;
3210
3211                 return k;
3212         }
3213
3214         return NULL;
3215 }
3216
3217 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3218                                      u8 addr_type, u8 role)
3219 {
3220         struct smp_ltk *k;
3221
3222         list_for_each_entry(k, &hdev->long_term_keys, list)
3223                 if (addr_type == k->bdaddr_type &&
3224                     bacmp(bdaddr, &k->bdaddr) == 0 &&
3225                     ltk_role(k->type) == role)
3226                         return k;
3227
3228         return NULL;
3229 }
3230
3231 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3232 {
3233         struct smp_irk *irk;
3234
3235         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3236                 if (!bacmp(&irk->rpa, rpa))
3237                         return irk;
3238         }
3239
3240         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3241                 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3242                         bacpy(&irk->rpa, rpa);
3243                         return irk;
3244                 }
3245         }
3246
3247         return NULL;
3248 }
3249
3250 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3251                                      u8 addr_type)
3252 {
3253         struct smp_irk *irk;
3254
3255         /* Identity Address must be public or static random */
3256         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3257                 return NULL;
3258
3259         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3260                 if (addr_type == irk->addr_type &&
3261                     bacmp(bdaddr, &irk->bdaddr) == 0)
3262                         return irk;
3263         }
3264
3265         return NULL;
3266 }
3267
3268 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3269                                   bdaddr_t *bdaddr, u8 *val, u8 type,
3270                                   u8 pin_len, bool *persistent)
3271 {
3272         struct link_key *key, *old_key;
3273         u8 old_key_type;
3274
3275         old_key = hci_find_link_key(hdev, bdaddr);
3276         if (old_key) {
3277                 old_key_type = old_key->type;
3278                 key = old_key;
3279         } else {
3280                 old_key_type = conn ? conn->key_type : 0xff;
3281                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3282                 if (!key)
3283                         return NULL;
3284                 list_add(&key->list, &hdev->link_keys);
3285         }
3286
3287         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3288
3289         /* Some buggy controller combinations generate a changed
3290          * combination key for legacy pairing even when there's no
3291          * previous key */
3292         if (type == HCI_LK_CHANGED_COMBINATION &&
3293             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3294                 type = HCI_LK_COMBINATION;
3295                 if (conn)
3296                         conn->key_type = type;
3297         }
3298
3299         bacpy(&key->bdaddr, bdaddr);
3300         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3301         key->pin_len = pin_len;
3302
3303         if (type == HCI_LK_CHANGED_COMBINATION)
3304                 key->type = old_key_type;
3305         else
3306                 key->type = type;
3307
3308         if (persistent)
3309                 *persistent = hci_persistent_key(hdev, conn, type,
3310                                                  old_key_type);
3311
3312         return key;
3313 }
3314
3315 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3316                             u8 addr_type, u8 type, u8 authenticated,
3317                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3318 {
3319         struct smp_ltk *key, *old_key;
3320         u8 role = ltk_role(type);
3321
3322         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
3323         if (old_key)
3324                 key = old_key;
3325         else {
3326                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3327                 if (!key)
3328                         return NULL;
3329                 list_add(&key->list, &hdev->long_term_keys);
3330         }
3331
3332         bacpy(&key->bdaddr, bdaddr);
3333         key->bdaddr_type = addr_type;
3334         memcpy(key->val, tk, sizeof(key->val));
3335         key->authenticated = authenticated;
3336         key->ediv = ediv;
3337         key->rand = rand;
3338         key->enc_size = enc_size;
3339         key->type = type;
3340
3341         return key;
3342 }
3343
3344 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3345                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3346 {
3347         struct smp_irk *irk;
3348
3349         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3350         if (!irk) {
3351                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3352                 if (!irk)
3353                         return NULL;
3354
3355                 bacpy(&irk->bdaddr, bdaddr);
3356                 irk->addr_type = addr_type;
3357
3358                 list_add(&irk->list, &hdev->identity_resolving_keys);
3359         }
3360
3361         memcpy(irk->val, val, 16);
3362         bacpy(&irk->rpa, rpa);
3363
3364         return irk;
3365 }
3366
3367 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3368 {
3369         struct link_key *key;
3370
3371         key = hci_find_link_key(hdev, bdaddr);
3372         if (!key)
3373                 return -ENOENT;
3374
3375         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3376
3377         list_del(&key->list);
3378         kfree(key);
3379
3380         return 0;
3381 }
3382
3383 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3384 {
3385         struct smp_ltk *k, *tmp;
3386         int removed = 0;
3387
3388         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3389                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3390                         continue;
3391
3392                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3393
3394                 list_del(&k->list);
3395                 kfree(k);
3396                 removed++;
3397         }
3398
3399         return removed ? 0 : -ENOENT;
3400 }
3401
3402 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3403 {
3404         struct smp_irk *k, *tmp;
3405
3406         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3407                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3408                         continue;
3409
3410                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3411
3412                 list_del(&k->list);
3413                 kfree(k);
3414         }
3415 }
3416
3417 /* HCI command timer function */
3418 static void hci_cmd_timeout(struct work_struct *work)
3419 {
3420         struct hci_dev *hdev = container_of(work, struct hci_dev,
3421                                             cmd_timer.work);
3422
3423         if (hdev->sent_cmd) {
3424                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3425                 u16 opcode = __le16_to_cpu(sent->opcode);
3426
3427                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3428         } else {
3429                 BT_ERR("%s command tx timeout", hdev->name);
3430         }
3431
3432         atomic_set(&hdev->cmd_cnt, 1);
3433         queue_work(hdev->workqueue, &hdev->cmd_work);
3434 }
3435
3436 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3437                                           bdaddr_t *bdaddr)
3438 {
3439         struct oob_data *data;
3440
3441         list_for_each_entry(data, &hdev->remote_oob_data, list)
3442                 if (bacmp(bdaddr, &data->bdaddr) == 0)
3443                         return data;
3444
3445         return NULL;
3446 }
3447
3448 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3449 {
3450         struct oob_data *data;
3451
3452         data = hci_find_remote_oob_data(hdev, bdaddr);
3453         if (!data)
3454                 return -ENOENT;
3455
3456         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3457
3458         list_del(&data->list);
3459         kfree(data);
3460
3461         return 0;
3462 }
3463
3464 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3465 {
3466         struct oob_data *data, *n;
3467
3468         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3469                 list_del(&data->list);
3470                 kfree(data);
3471         }
3472 }
3473
3474 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3475                             u8 *hash, u8 *randomizer)
3476 {
3477         struct oob_data *data;
3478
3479         data = hci_find_remote_oob_data(hdev, bdaddr);
3480         if (!data) {
3481                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3482                 if (!data)
3483                         return -ENOMEM;
3484
3485                 bacpy(&data->bdaddr, bdaddr);
3486                 list_add(&data->list, &hdev->remote_oob_data);
3487         }
3488
3489         memcpy(data->hash192, hash, sizeof(data->hash192));
3490         memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3491
3492         memset(data->hash256, 0, sizeof(data->hash256));
3493         memset(data->randomizer256, 0, sizeof(data->randomizer256));
3494
3495         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3496
3497         return 0;
3498 }
3499
3500 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3501                                 u8 *hash192, u8 *randomizer192,
3502                                 u8 *hash256, u8 *randomizer256)
3503 {
3504         struct oob_data *data;
3505
3506         data = hci_find_remote_oob_data(hdev, bdaddr);
3507         if (!data) {
3508                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3509                 if (!data)
3510                         return -ENOMEM;
3511
3512                 bacpy(&data->bdaddr, bdaddr);
3513                 list_add(&data->list, &hdev->remote_oob_data);
3514         }
3515
3516         memcpy(data->hash192, hash192, sizeof(data->hash192));
3517         memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3518
3519         memcpy(data->hash256, hash256, sizeof(data->hash256));
3520         memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3521
3522         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3523
3524         return 0;
3525 }
3526
3527 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3528                                          bdaddr_t *bdaddr, u8 type)
3529 {
3530         struct bdaddr_list *b;
3531
3532         list_for_each_entry(b, bdaddr_list, list) {
3533                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3534                         return b;
3535         }
3536
3537         return NULL;
3538 }
3539
3540 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3541 {
3542         struct list_head *p, *n;
3543
3544         list_for_each_safe(p, n, bdaddr_list) {
3545                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3546
3547                 list_del(p);
3548                 kfree(b);
3549         }
3550 }
3551
3552 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3553 {
3554         struct bdaddr_list *entry;
3555
3556         if (!bacmp(bdaddr, BDADDR_ANY))
3557                 return -EBADF;
3558
3559         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3560                 return -EEXIST;
3561
3562         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3563         if (!entry)
3564                 return -ENOMEM;
3565
3566         bacpy(&entry->bdaddr, bdaddr);
3567         entry->bdaddr_type = type;
3568
3569         list_add(&entry->list, list);
3570
3571         return 0;
3572 }
3573
3574 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3575 {
3576         struct bdaddr_list *entry;
3577
3578         if (!bacmp(bdaddr, BDADDR_ANY)) {
3579                 hci_bdaddr_list_clear(list);
3580                 return 0;
3581         }
3582
3583         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3584         if (!entry)
3585                 return -ENOENT;
3586
3587         list_del(&entry->list);
3588         kfree(entry);
3589
3590         return 0;
3591 }
3592
3593 /* This function requires the caller holds hdev->lock */
3594 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3595                                                bdaddr_t *addr, u8 addr_type)
3596 {
3597         struct hci_conn_params *params;
3598
3599         /* The conn params list only contains identity addresses */
3600         if (!hci_is_identity_address(addr, addr_type))
3601                 return NULL;
3602
3603         list_for_each_entry(params, &hdev->le_conn_params, list) {
3604                 if (bacmp(&params->addr, addr) == 0 &&
3605                     params->addr_type == addr_type) {
3606                         return params;
3607                 }
3608         }
3609
3610         return NULL;
3611 }
3612
3613 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3614 {
3615         struct hci_conn *conn;
3616
3617         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3618         if (!conn)
3619                 return false;
3620
3621         if (conn->dst_type != type)
3622                 return false;
3623
3624         if (conn->state != BT_CONNECTED)
3625                 return false;
3626
3627         return true;
3628 }
3629
3630 /* This function requires the caller holds hdev->lock */
3631 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3632                                                   bdaddr_t *addr, u8 addr_type)
3633 {
3634         struct hci_conn_params *param;
3635
3636         /* The list only contains identity addresses */
3637         if (!hci_is_identity_address(addr, addr_type))
3638                 return NULL;
3639
3640         list_for_each_entry(param, list, action) {
3641                 if (bacmp(&param->addr, addr) == 0 &&
3642                     param->addr_type == addr_type)
3643                         return param;
3644         }
3645
3646         return NULL;
3647 }
3648
3649 /* This function requires the caller holds hdev->lock */
3650 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3651                                             bdaddr_t *addr, u8 addr_type)
3652 {
3653         struct hci_conn_params *params;
3654
3655         if (!hci_is_identity_address(addr, addr_type))
3656                 return NULL;
3657
3658         params = hci_conn_params_lookup(hdev, addr, addr_type);
3659         if (params)
3660                 return params;
3661
3662         params = kzalloc(sizeof(*params), GFP_KERNEL);
3663         if (!params) {
3664                 BT_ERR("Out of memory");
3665                 return NULL;
3666         }
3667
3668         bacpy(&params->addr, addr);
3669         params->addr_type = addr_type;
3670
3671         list_add(&params->list, &hdev->le_conn_params);
3672         INIT_LIST_HEAD(&params->action);
3673
3674         params->conn_min_interval = hdev->le_conn_min_interval;
3675         params->conn_max_interval = hdev->le_conn_max_interval;
3676         params->conn_latency = hdev->le_conn_latency;
3677         params->supervision_timeout = hdev->le_supv_timeout;
3678         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3679
3680         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3681
3682         return params;
3683 }
3684
3685 /* This function requires the caller holds hdev->lock */
3686 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3687                         u8 auto_connect)
3688 {
3689         struct hci_conn_params *params;
3690
3691         params = hci_conn_params_add(hdev, addr, addr_type);
3692         if (!params)
3693                 return -EIO;
3694
3695         if (params->auto_connect == auto_connect)
3696                 return 0;
3697
3698         list_del_init(&params->action);
3699
3700         switch (auto_connect) {
3701         case HCI_AUTO_CONN_DISABLED:
3702         case HCI_AUTO_CONN_LINK_LOSS:
3703                 hci_update_background_scan(hdev);
3704                 break;
3705         case HCI_AUTO_CONN_REPORT:
3706                 list_add(&params->action, &hdev->pend_le_reports);
3707                 hci_update_background_scan(hdev);
3708                 break;
3709         case HCI_AUTO_CONN_DIRECT:
3710         case HCI_AUTO_CONN_ALWAYS:
3711                 if (!is_connected(hdev, addr, addr_type)) {
3712                         list_add(&params->action, &hdev->pend_le_conns);
3713                         hci_update_background_scan(hdev);
3714                 }
3715                 break;
3716         }
3717
3718         params->auto_connect = auto_connect;
3719
3720         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3721                auto_connect);
3722
3723         return 0;
3724 }
3725
3726 /* This function requires the caller holds hdev->lock */
3727 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3728 {
3729         struct hci_conn_params *params;
3730
3731         params = hci_conn_params_lookup(hdev, addr, addr_type);
3732         if (!params)
3733                 return;
3734
3735         if (params->conn)
3736                 hci_conn_drop(params->conn);
3737
3738         list_del(&params->action);
3739         list_del(&params->list);
3740         kfree(params);
3741
3742         hci_update_background_scan(hdev);
3743
3744         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3745 }
3746
3747 /* This function requires the caller holds hdev->lock */
3748 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3749 {
3750         struct hci_conn_params *params, *tmp;
3751
3752         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3753                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3754                         continue;
3755                 list_del(&params->list);
3756                 kfree(params);
3757         }
3758
3759         BT_DBG("All LE disabled connection parameters were removed");
3760 }
3761
3762 /* This function requires the caller holds hdev->lock */
3763 void hci_conn_params_clear_all(struct hci_dev *hdev)
3764 {
3765         struct hci_conn_params *params, *tmp;
3766
3767         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3768                 if (params->conn)
3769                         hci_conn_drop(params->conn);
3770                 list_del(&params->action);
3771                 list_del(&params->list);
3772                 kfree(params);
3773         }
3774
3775         hci_update_background_scan(hdev);
3776
3777         BT_DBG("All LE connection parameters were removed");
3778 }
3779
3780 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3781 {
3782         if (status) {
3783                 BT_ERR("Failed to start inquiry: status %d", status);
3784
3785                 hci_dev_lock(hdev);
3786                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3787                 hci_dev_unlock(hdev);
3788                 return;
3789         }
3790 }
3791
3792 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3793 {
3794         /* General inquiry access code (GIAC) */
3795         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3796         struct hci_request req;
3797         struct hci_cp_inquiry cp;
3798         int err;
3799
3800         if (status) {
3801                 BT_ERR("Failed to disable LE scanning: status %d", status);
3802                 return;
3803         }
3804
3805         switch (hdev->discovery.type) {
3806         case DISCOV_TYPE_LE:
3807                 hci_dev_lock(hdev);
3808                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3809                 hci_dev_unlock(hdev);
3810                 break;
3811
3812         case DISCOV_TYPE_INTERLEAVED:
3813                 hci_req_init(&req, hdev);
3814
3815                 memset(&cp, 0, sizeof(cp));
3816                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3817                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3818                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3819
3820                 hci_dev_lock(hdev);
3821
3822                 hci_inquiry_cache_flush(hdev);
3823
3824                 err = hci_req_run(&req, inquiry_complete);
3825                 if (err) {
3826                         BT_ERR("Inquiry request failed: err %d", err);
3827                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3828                 }
3829
3830                 hci_dev_unlock(hdev);
3831                 break;
3832         }
3833 }
3834
3835 static void le_scan_disable_work(struct work_struct *work)
3836 {
3837         struct hci_dev *hdev = container_of(work, struct hci_dev,
3838                                             le_scan_disable.work);
3839         struct hci_request req;
3840         int err;
3841
3842         BT_DBG("%s", hdev->name);
3843
3844         hci_req_init(&req, hdev);
3845
3846         hci_req_add_le_scan_disable(&req);
3847
3848         err = hci_req_run(&req, le_scan_disable_work_complete);
3849         if (err)
3850                 BT_ERR("Disable LE scanning request failed: err %d", err);
3851 }
3852
3853 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3854 {
3855         struct hci_dev *hdev = req->hdev;
3856
3857         /* If we're advertising or initiating an LE connection we can't
3858          * go ahead and change the random address at this time. This is
3859          * because the eventual initiator address used for the
3860          * subsequently created connection will be undefined (some
3861          * controllers use the new address and others the one we had
3862          * when the operation started).
3863          *
3864          * In this kind of scenario skip the update and let the random
3865          * address be updated at the next cycle.
3866          */
3867         if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3868             hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3869                 BT_DBG("Deferring random address update");
3870                 return;
3871         }
3872
3873         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3874 }
3875
3876 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3877                               u8 *own_addr_type)
3878 {
3879         struct hci_dev *hdev = req->hdev;
3880         int err;
3881
3882         /* If privacy is enabled use a resolvable private address. If
3883          * current RPA has expired or there is something else than
3884          * the current RPA in use, then generate a new one.
3885          */
3886         if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3887                 int to;
3888
3889                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3890
3891                 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3892                     !bacmp(&hdev->random_addr, &hdev->rpa))
3893                         return 0;
3894
3895                 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3896                 if (err < 0) {
3897                         BT_ERR("%s failed to generate new RPA", hdev->name);
3898                         return err;
3899                 }
3900
3901                 set_random_addr(req, &hdev->rpa);
3902
3903                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3904                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3905
3906                 return 0;
3907         }
3908
3909         /* In case of required privacy without resolvable private address,
3910          * use an unresolvable private address. This is useful for active
3911          * scanning and non-connectable advertising.
3912          */
3913         if (require_privacy) {
3914                 bdaddr_t urpa;
3915
3916                 get_random_bytes(&urpa, 6);
3917                 urpa.b[5] &= 0x3f;      /* Clear two most significant bits */
3918
3919                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3920                 set_random_addr(req, &urpa);
3921                 return 0;
3922         }
3923
3924         /* If forcing static address is in use or there is no public
3925          * address use the static address as random address (but skip
3926          * the HCI command if the current random address is already the
3927          * static one.
3928          */
3929         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3930             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3931                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3932                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3933                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3934                                     &hdev->static_addr);
3935                 return 0;
3936         }
3937
3938         /* Neither privacy nor static address is being used so use a
3939          * public address.
3940          */
3941         *own_addr_type = ADDR_LE_DEV_PUBLIC;
3942
3943         return 0;
3944 }
3945
3946 /* Copy the Identity Address of the controller.
3947  *
3948  * If the controller has a public BD_ADDR, then by default use that one.
3949  * If this is a LE only controller without a public address, default to
3950  * the static random address.
3951  *
3952  * For debugging purposes it is possible to force controllers with a
3953  * public address to use the static random address instead.
3954  */
3955 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3956                                u8 *bdaddr_type)
3957 {
3958         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3959             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3960                 bacpy(bdaddr, &hdev->static_addr);
3961                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3962         } else {
3963                 bacpy(bdaddr, &hdev->bdaddr);
3964                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3965         }
3966 }
3967
3968 /* Alloc HCI device */
3969 struct hci_dev *hci_alloc_dev(void)
3970 {
3971         struct hci_dev *hdev;
3972
3973         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3974         if (!hdev)
3975                 return NULL;
3976
3977         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3978         hdev->esco_type = (ESCO_HV1);
3979         hdev->link_mode = (HCI_LM_ACCEPT);
3980         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3981         hdev->io_capability = 0x03;     /* No Input No Output */
3982         hdev->manufacturer = 0xffff;    /* Default to internal use */
3983         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3984         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3985
3986         hdev->sniff_max_interval = 800;
3987         hdev->sniff_min_interval = 80;
3988
3989         hdev->le_adv_channel_map = 0x07;
3990         hdev->le_adv_min_interval = 0x0800;
3991         hdev->le_adv_max_interval = 0x0800;
3992         hdev->le_scan_interval = 0x0060;
3993         hdev->le_scan_window = 0x0030;
3994         hdev->le_conn_min_interval = 0x0028;
3995         hdev->le_conn_max_interval = 0x0038;
3996         hdev->le_conn_latency = 0x0000;
3997         hdev->le_supv_timeout = 0x002a;
3998
3999         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
4000         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
4001         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4002         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
4003
4004         mutex_init(&hdev->lock);
4005         mutex_init(&hdev->req_lock);
4006
4007         INIT_LIST_HEAD(&hdev->mgmt_pending);
4008         INIT_LIST_HEAD(&hdev->blacklist);
4009         INIT_LIST_HEAD(&hdev->whitelist);
4010         INIT_LIST_HEAD(&hdev->uuids);
4011         INIT_LIST_HEAD(&hdev->link_keys);
4012         INIT_LIST_HEAD(&hdev->long_term_keys);
4013         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
4014         INIT_LIST_HEAD(&hdev->remote_oob_data);
4015         INIT_LIST_HEAD(&hdev->le_white_list);
4016         INIT_LIST_HEAD(&hdev->le_conn_params);
4017         INIT_LIST_HEAD(&hdev->pend_le_conns);
4018         INIT_LIST_HEAD(&hdev->pend_le_reports);
4019         INIT_LIST_HEAD(&hdev->conn_hash.list);
4020
4021         INIT_WORK(&hdev->rx_work, hci_rx_work);
4022         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4023         INIT_WORK(&hdev->tx_work, hci_tx_work);
4024         INIT_WORK(&hdev->power_on, hci_power_on);
4025
4026         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4027         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4028         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4029
4030         skb_queue_head_init(&hdev->rx_q);
4031         skb_queue_head_init(&hdev->cmd_q);
4032         skb_queue_head_init(&hdev->raw_q);
4033
4034         init_waitqueue_head(&hdev->req_wait_q);
4035
4036         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
4037
4038         hci_init_sysfs(hdev);
4039         discovery_init(hdev);
4040
4041         return hdev;
4042 }
4043 EXPORT_SYMBOL(hci_alloc_dev);
4044
4045 /* Free HCI device */
4046 void hci_free_dev(struct hci_dev *hdev)
4047 {
4048         /* will free via device release */
4049         put_device(&hdev->dev);
4050 }
4051 EXPORT_SYMBOL(hci_free_dev);
4052
4053 /* Register HCI device */
4054 int hci_register_dev(struct hci_dev *hdev)
4055 {
4056         int id, error;
4057
4058         if (!hdev->open || !hdev->close || !hdev->send)
4059                 return -EINVAL;
4060
4061         /* Do not allow HCI_AMP devices to register at index 0,
4062          * so the index can be used as the AMP controller ID.
4063          */
4064         switch (hdev->dev_type) {
4065         case HCI_BREDR:
4066                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4067                 break;
4068         case HCI_AMP:
4069                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4070                 break;
4071         default:
4072                 return -EINVAL;
4073         }
4074
4075         if (id < 0)
4076                 return id;
4077
4078         sprintf(hdev->name, "hci%d", id);
4079         hdev->id = id;
4080
4081         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4082
4083         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4084                                           WQ_MEM_RECLAIM, 1, hdev->name);
4085         if (!hdev->workqueue) {
4086                 error = -ENOMEM;
4087                 goto err;
4088         }
4089
4090         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4091                                               WQ_MEM_RECLAIM, 1, hdev->name);
4092         if (!hdev->req_workqueue) {
4093                 destroy_workqueue(hdev->workqueue);
4094                 error = -ENOMEM;
4095                 goto err;
4096         }
4097
4098         if (!IS_ERR_OR_NULL(bt_debugfs))
4099                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4100
4101         dev_set_name(&hdev->dev, "%s", hdev->name);
4102
4103         hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
4104                                                CRYPTO_ALG_ASYNC);
4105         if (IS_ERR(hdev->tfm_aes)) {
4106                 BT_ERR("Unable to create crypto context");
4107                 error = PTR_ERR(hdev->tfm_aes);
4108                 hdev->tfm_aes = NULL;
4109                 goto err_wqueue;
4110         }
4111
4112         error = device_add(&hdev->dev);
4113         if (error < 0)
4114                 goto err_tfm;
4115
4116         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4117                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4118                                     hdev);
4119         if (hdev->rfkill) {
4120                 if (rfkill_register(hdev->rfkill) < 0) {
4121                         rfkill_destroy(hdev->rfkill);
4122                         hdev->rfkill = NULL;
4123                 }
4124         }
4125
4126         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4127                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4128
4129         set_bit(HCI_SETUP, &hdev->dev_flags);
4130         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4131
4132         if (hdev->dev_type == HCI_BREDR) {
4133                 /* Assume BR/EDR support until proven otherwise (such as
4134                  * through reading supported features during init.
4135                  */
4136                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4137         }
4138
4139         write_lock(&hci_dev_list_lock);
4140         list_add(&hdev->list, &hci_dev_list);
4141         write_unlock(&hci_dev_list_lock);
4142
4143         /* Devices that are marked for raw-only usage are unconfigured
4144          * and should not be included in normal operation.
4145          */
4146         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4147                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4148
4149         hci_notify(hdev, HCI_DEV_REG);
4150         hci_dev_hold(hdev);
4151
4152         queue_work(hdev->req_workqueue, &hdev->power_on);
4153
4154         return id;
4155
4156 err_tfm:
4157         crypto_free_blkcipher(hdev->tfm_aes);
4158 err_wqueue:
4159         destroy_workqueue(hdev->workqueue);
4160         destroy_workqueue(hdev->req_workqueue);
4161 err:
4162         ida_simple_remove(&hci_index_ida, hdev->id);
4163
4164         return error;
4165 }
4166 EXPORT_SYMBOL(hci_register_dev);
4167
4168 /* Unregister HCI device */
4169 void hci_unregister_dev(struct hci_dev *hdev)
4170 {
4171         int i, id;
4172
4173         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4174
4175         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4176
4177         id = hdev->id;
4178
4179         write_lock(&hci_dev_list_lock);
4180         list_del(&hdev->list);
4181         write_unlock(&hci_dev_list_lock);
4182
4183         hci_dev_do_close(hdev);
4184
4185         for (i = 0; i < NUM_REASSEMBLY; i++)
4186                 kfree_skb(hdev->reassembly[i]);
4187
4188         cancel_work_sync(&hdev->power_on);
4189
4190         if (!test_bit(HCI_INIT, &hdev->flags) &&
4191             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4192             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4193                 hci_dev_lock(hdev);
4194                 mgmt_index_removed(hdev);
4195                 hci_dev_unlock(hdev);
4196         }
4197
4198         /* mgmt_index_removed should take care of emptying the
4199          * pending list */
4200         BUG_ON(!list_empty(&hdev->mgmt_pending));
4201
4202         hci_notify(hdev, HCI_DEV_UNREG);
4203
4204         if (hdev->rfkill) {
4205                 rfkill_unregister(hdev->rfkill);
4206                 rfkill_destroy(hdev->rfkill);
4207         }
4208
4209         if (hdev->tfm_aes)
4210                 crypto_free_blkcipher(hdev->tfm_aes);
4211
4212         device_del(&hdev->dev);
4213
4214         debugfs_remove_recursive(hdev->debugfs);
4215
4216         destroy_workqueue(hdev->workqueue);
4217         destroy_workqueue(hdev->req_workqueue);
4218
4219         hci_dev_lock(hdev);
4220         hci_bdaddr_list_clear(&hdev->blacklist);
4221         hci_bdaddr_list_clear(&hdev->whitelist);
4222         hci_uuids_clear(hdev);
4223         hci_link_keys_clear(hdev);
4224         hci_smp_ltks_clear(hdev);
4225         hci_smp_irks_clear(hdev);
4226         hci_remote_oob_data_clear(hdev);
4227         hci_bdaddr_list_clear(&hdev->le_white_list);
4228         hci_conn_params_clear_all(hdev);
4229         hci_dev_unlock(hdev);
4230
4231         hci_dev_put(hdev);
4232
4233         ida_simple_remove(&hci_index_ida, id);
4234 }
4235 EXPORT_SYMBOL(hci_unregister_dev);
4236
4237 /* Suspend HCI device */
4238 int hci_suspend_dev(struct hci_dev *hdev)
4239 {
4240         hci_notify(hdev, HCI_DEV_SUSPEND);
4241         return 0;
4242 }
4243 EXPORT_SYMBOL(hci_suspend_dev);
4244
4245 /* Resume HCI device */
4246 int hci_resume_dev(struct hci_dev *hdev)
4247 {
4248         hci_notify(hdev, HCI_DEV_RESUME);
4249         return 0;
4250 }
4251 EXPORT_SYMBOL(hci_resume_dev);
4252
4253 /* Receive frame from HCI drivers */
4254 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4255 {
4256         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4257                       && !test_bit(HCI_INIT, &hdev->flags))) {
4258                 kfree_skb(skb);
4259                 return -ENXIO;
4260         }
4261
4262         /* Incoming skb */
4263         bt_cb(skb)->incoming = 1;
4264
4265         /* Time stamp */
4266         __net_timestamp(skb);
4267
4268         skb_queue_tail(&hdev->rx_q, skb);
4269         queue_work(hdev->workqueue, &hdev->rx_work);
4270
4271         return 0;
4272 }
4273 EXPORT_SYMBOL(hci_recv_frame);
4274
4275 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4276                           int count, __u8 index)
4277 {
4278         int len = 0;
4279         int hlen = 0;
4280         int remain = count;
4281         struct sk_buff *skb;
4282         struct bt_skb_cb *scb;
4283
4284         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4285             index >= NUM_REASSEMBLY)
4286                 return -EILSEQ;
4287
4288         skb = hdev->reassembly[index];
4289
4290         if (!skb) {
4291                 switch (type) {
4292                 case HCI_ACLDATA_PKT:
4293                         len = HCI_MAX_FRAME_SIZE;
4294                         hlen = HCI_ACL_HDR_SIZE;
4295                         break;
4296                 case HCI_EVENT_PKT:
4297                         len = HCI_MAX_EVENT_SIZE;
4298                         hlen = HCI_EVENT_HDR_SIZE;
4299                         break;
4300                 case HCI_SCODATA_PKT:
4301                         len = HCI_MAX_SCO_SIZE;
4302                         hlen = HCI_SCO_HDR_SIZE;
4303                         break;
4304                 }
4305
4306                 skb = bt_skb_alloc(len, GFP_ATOMIC);
4307                 if (!skb)
4308                         return -ENOMEM;
4309
4310                 scb = (void *) skb->cb;
4311                 scb->expect = hlen;
4312                 scb->pkt_type = type;
4313
4314                 hdev->reassembly[index] = skb;
4315         }
4316
4317         while (count) {
4318                 scb = (void *) skb->cb;
4319                 len = min_t(uint, scb->expect, count);
4320
4321                 memcpy(skb_put(skb, len), data, len);
4322
4323                 count -= len;
4324                 data += len;
4325                 scb->expect -= len;
4326                 remain = count;
4327
4328                 switch (type) {
4329                 case HCI_EVENT_PKT:
4330                         if (skb->len == HCI_EVENT_HDR_SIZE) {
4331                                 struct hci_event_hdr *h = hci_event_hdr(skb);
4332                                 scb->expect = h->plen;
4333
4334                                 if (skb_tailroom(skb) < scb->expect) {
4335                                         kfree_skb(skb);
4336                                         hdev->reassembly[index] = NULL;
4337                                         return -ENOMEM;
4338                                 }
4339                         }
4340                         break;
4341
4342                 case HCI_ACLDATA_PKT:
4343                         if (skb->len  == HCI_ACL_HDR_SIZE) {
4344                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4345                                 scb->expect = __le16_to_cpu(h->dlen);
4346
4347                                 if (skb_tailroom(skb) < scb->expect) {
4348                                         kfree_skb(skb);
4349                                         hdev->reassembly[index] = NULL;
4350                                         return -ENOMEM;
4351                                 }
4352                         }
4353                         break;
4354
4355                 case HCI_SCODATA_PKT:
4356                         if (skb->len == HCI_SCO_HDR_SIZE) {
4357                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4358                                 scb->expect = h->dlen;
4359
4360                                 if (skb_tailroom(skb) < scb->expect) {
4361                                         kfree_skb(skb);
4362                                         hdev->reassembly[index] = NULL;
4363                                         return -ENOMEM;
4364                                 }
4365                         }
4366                         break;
4367                 }
4368
4369                 if (scb->expect == 0) {
4370                         /* Complete frame */
4371
4372                         bt_cb(skb)->pkt_type = type;
4373                         hci_recv_frame(hdev, skb);
4374
4375                         hdev->reassembly[index] = NULL;
4376                         return remain;
4377                 }
4378         }
4379
4380         return remain;
4381 }
4382
4383 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4384 {
4385         int rem = 0;
4386
4387         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4388                 return -EILSEQ;
4389
4390         while (count) {
4391                 rem = hci_reassembly(hdev, type, data, count, type - 1);
4392                 if (rem < 0)
4393                         return rem;
4394
4395                 data += (count - rem);
4396                 count = rem;
4397         }
4398
4399         return rem;
4400 }
4401 EXPORT_SYMBOL(hci_recv_fragment);
4402
4403 #define STREAM_REASSEMBLY 0
4404
4405 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4406 {
4407         int type;
4408         int rem = 0;
4409
4410         while (count) {
4411                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4412
4413                 if (!skb) {
4414                         struct { char type; } *pkt;
4415
4416                         /* Start of the frame */
4417                         pkt = data;
4418                         type = pkt->type;
4419
4420                         data++;
4421                         count--;
4422                 } else
4423                         type = bt_cb(skb)->pkt_type;
4424
4425                 rem = hci_reassembly(hdev, type, data, count,
4426                                      STREAM_REASSEMBLY);
4427                 if (rem < 0)
4428                         return rem;
4429
4430                 data += (count - rem);
4431                 count = rem;
4432         }
4433
4434         return rem;
4435 }
4436 EXPORT_SYMBOL(hci_recv_stream_fragment);
4437
4438 /* ---- Interface to upper protocols ---- */
4439
4440 int hci_register_cb(struct hci_cb *cb)
4441 {
4442         BT_DBG("%p name %s", cb, cb->name);
4443
4444         write_lock(&hci_cb_list_lock);
4445         list_add(&cb->list, &hci_cb_list);
4446         write_unlock(&hci_cb_list_lock);
4447
4448         return 0;
4449 }
4450 EXPORT_SYMBOL(hci_register_cb);
4451
4452 int hci_unregister_cb(struct hci_cb *cb)
4453 {
4454         BT_DBG("%p name %s", cb, cb->name);
4455
4456         write_lock(&hci_cb_list_lock);
4457         list_del(&cb->list);
4458         write_unlock(&hci_cb_list_lock);
4459
4460         return 0;
4461 }
4462 EXPORT_SYMBOL(hci_unregister_cb);
4463
4464 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4465 {
4466         int err;
4467
4468         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4469
4470         /* Time stamp */
4471         __net_timestamp(skb);
4472
4473         /* Send copy to monitor */
4474         hci_send_to_monitor(hdev, skb);
4475
4476         if (atomic_read(&hdev->promisc)) {
4477                 /* Send copy to the sockets */
4478                 hci_send_to_sock(hdev, skb);
4479         }
4480
4481         /* Get rid of skb owner, prior to sending to the driver. */
4482         skb_orphan(skb);
4483
4484         err = hdev->send(hdev, skb);
4485         if (err < 0) {
4486                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4487                 kfree_skb(skb);
4488         }
4489 }
4490
4491 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4492 {
4493         skb_queue_head_init(&req->cmd_q);
4494         req->hdev = hdev;
4495         req->err = 0;
4496 }
4497
4498 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4499 {
4500         struct hci_dev *hdev = req->hdev;
4501         struct sk_buff *skb;
4502         unsigned long flags;
4503
4504         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4505
4506         /* If an error occured during request building, remove all HCI
4507          * commands queued on the HCI request queue.
4508          */
4509         if (req->err) {
4510                 skb_queue_purge(&req->cmd_q);
4511                 return req->err;
4512         }
4513
4514         /* Do not allow empty requests */
4515         if (skb_queue_empty(&req->cmd_q))
4516                 return -ENODATA;
4517
4518         skb = skb_peek_tail(&req->cmd_q);
4519         bt_cb(skb)->req.complete = complete;
4520
4521         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4522         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4523         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4524
4525         queue_work(hdev->workqueue, &hdev->cmd_work);
4526
4527         return 0;
4528 }
4529
4530 bool hci_req_pending(struct hci_dev *hdev)
4531 {
4532         return (hdev->req_status == HCI_REQ_PEND);
4533 }
4534
4535 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4536                                        u32 plen, const void *param)
4537 {
4538         int len = HCI_COMMAND_HDR_SIZE + plen;
4539         struct hci_command_hdr *hdr;
4540         struct sk_buff *skb;
4541
4542         skb = bt_skb_alloc(len, GFP_ATOMIC);
4543         if (!skb)
4544                 return NULL;
4545
4546         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4547         hdr->opcode = cpu_to_le16(opcode);
4548         hdr->plen   = plen;
4549
4550         if (plen)
4551                 memcpy(skb_put(skb, plen), param, plen);
4552
4553         BT_DBG("skb len %d", skb->len);
4554
4555         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4556
4557         return skb;
4558 }
4559
4560 /* Send HCI command */
4561 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4562                  const void *param)
4563 {
4564         struct sk_buff *skb;
4565
4566         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4567
4568         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4569         if (!skb) {
4570                 BT_ERR("%s no memory for command", hdev->name);
4571                 return -ENOMEM;
4572         }
4573
4574         /* Stand-alone HCI commands must be flaged as
4575          * single-command requests.
4576          */
4577         bt_cb(skb)->req.start = true;
4578
4579         skb_queue_tail(&hdev->cmd_q, skb);
4580         queue_work(hdev->workqueue, &hdev->cmd_work);
4581
4582         return 0;
4583 }
4584
4585 /* Queue a command to an asynchronous HCI request */
4586 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4587                     const void *param, u8 event)
4588 {
4589         struct hci_dev *hdev = req->hdev;
4590         struct sk_buff *skb;
4591
4592         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4593
4594         /* If an error occured during request building, there is no point in
4595          * queueing the HCI command. We can simply return.
4596          */
4597         if (req->err)
4598                 return;
4599
4600         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4601         if (!skb) {
4602                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4603                        hdev->name, opcode);
4604                 req->err = -ENOMEM;
4605                 return;
4606         }
4607
4608         if (skb_queue_empty(&req->cmd_q))
4609                 bt_cb(skb)->req.start = true;
4610
4611         bt_cb(skb)->req.event = event;
4612
4613         skb_queue_tail(&req->cmd_q, skb);
4614 }
4615
4616 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4617                  const void *param)
4618 {
4619         hci_req_add_ev(req, opcode, plen, param, 0);
4620 }
4621
4622 /* Get data from the previously sent command */
4623 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4624 {
4625         struct hci_command_hdr *hdr;
4626
4627         if (!hdev->sent_cmd)
4628                 return NULL;
4629
4630         hdr = (void *) hdev->sent_cmd->data;
4631
4632         if (hdr->opcode != cpu_to_le16(opcode))
4633                 return NULL;
4634
4635         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4636
4637         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4638 }
4639
4640 /* Send ACL data */
4641 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4642 {
4643         struct hci_acl_hdr *hdr;
4644         int len = skb->len;
4645
4646         skb_push(skb, HCI_ACL_HDR_SIZE);
4647         skb_reset_transport_header(skb);
4648         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4649         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4650         hdr->dlen   = cpu_to_le16(len);
4651 }
4652
4653 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4654                           struct sk_buff *skb, __u16 flags)
4655 {
4656         struct hci_conn *conn = chan->conn;
4657         struct hci_dev *hdev = conn->hdev;
4658         struct sk_buff *list;
4659
4660         skb->len = skb_headlen(skb);
4661         skb->data_len = 0;
4662
4663         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4664
4665         switch (hdev->dev_type) {
4666         case HCI_BREDR:
4667                 hci_add_acl_hdr(skb, conn->handle, flags);
4668                 break;
4669         case HCI_AMP:
4670                 hci_add_acl_hdr(skb, chan->handle, flags);
4671                 break;
4672         default:
4673                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4674                 return;
4675         }
4676
4677         list = skb_shinfo(skb)->frag_list;
4678         if (!list) {
4679                 /* Non fragmented */
4680                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4681
4682                 skb_queue_tail(queue, skb);
4683         } else {
4684                 /* Fragmented */
4685                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4686
4687                 skb_shinfo(skb)->frag_list = NULL;
4688
4689                 /* Queue all fragments atomically */
4690                 spin_lock(&queue->lock);
4691
4692                 __skb_queue_tail(queue, skb);
4693
4694                 flags &= ~ACL_START;
4695                 flags |= ACL_CONT;
4696                 do {
4697                         skb = list; list = list->next;
4698
4699                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4700                         hci_add_acl_hdr(skb, conn->handle, flags);
4701
4702                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4703
4704                         __skb_queue_tail(queue, skb);
4705                 } while (list);
4706
4707                 spin_unlock(&queue->lock);
4708         }
4709 }
4710
4711 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4712 {
4713         struct hci_dev *hdev = chan->conn->hdev;
4714
4715         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4716
4717         hci_queue_acl(chan, &chan->data_q, skb, flags);
4718
4719         queue_work(hdev->workqueue, &hdev->tx_work);
4720 }
4721
4722 /* Send SCO data */
4723 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4724 {
4725         struct hci_dev *hdev = conn->hdev;
4726         struct hci_sco_hdr hdr;
4727
4728         BT_DBG("%s len %d", hdev->name, skb->len);
4729
4730         hdr.handle = cpu_to_le16(conn->handle);
4731         hdr.dlen   = skb->len;
4732
4733         skb_push(skb, HCI_SCO_HDR_SIZE);
4734         skb_reset_transport_header(skb);
4735         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4736
4737         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4738
4739         skb_queue_tail(&conn->data_q, skb);
4740         queue_work(hdev->workqueue, &hdev->tx_work);
4741 }
4742
4743 /* ---- HCI TX task (outgoing data) ---- */
4744
4745 /* HCI Connection scheduler */
4746 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4747                                      int *quote)
4748 {
4749         struct hci_conn_hash *h = &hdev->conn_hash;
4750         struct hci_conn *conn = NULL, *c;
4751         unsigned int num = 0, min = ~0;
4752
4753         /* We don't have to lock device here. Connections are always
4754          * added and removed with TX task disabled. */
4755
4756         rcu_read_lock();
4757
4758         list_for_each_entry_rcu(c, &h->list, list) {
4759                 if (c->type != type || skb_queue_empty(&c->data_q))
4760                         continue;
4761
4762                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4763                         continue;
4764
4765                 num++;
4766
4767                 if (c->sent < min) {
4768                         min  = c->sent;
4769                         conn = c;
4770                 }
4771
4772                 if (hci_conn_num(hdev, type) == num)
4773                         break;
4774         }
4775
4776         rcu_read_unlock();
4777
4778         if (conn) {
4779                 int cnt, q;
4780
4781                 switch (conn->type) {
4782                 case ACL_LINK:
4783                         cnt = hdev->acl_cnt;
4784                         break;
4785                 case SCO_LINK:
4786                 case ESCO_LINK:
4787                         cnt = hdev->sco_cnt;
4788                         break;
4789                 case LE_LINK:
4790                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4791                         break;
4792                 default:
4793                         cnt = 0;
4794                         BT_ERR("Unknown link type");
4795                 }
4796
4797                 q = cnt / num;
4798                 *quote = q ? q : 1;
4799         } else
4800                 *quote = 0;
4801
4802         BT_DBG("conn %p quote %d", conn, *quote);
4803         return conn;
4804 }
4805
4806 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4807 {
4808         struct hci_conn_hash *h = &hdev->conn_hash;
4809         struct hci_conn *c;
4810
4811         BT_ERR("%s link tx timeout", hdev->name);
4812
4813         rcu_read_lock();
4814
4815         /* Kill stalled connections */
4816         list_for_each_entry_rcu(c, &h->list, list) {
4817                 if (c->type == type && c->sent) {
4818                         BT_ERR("%s killing stalled connection %pMR",
4819                                hdev->name, &c->dst);
4820                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4821                 }
4822         }
4823
4824         rcu_read_unlock();
4825 }
4826
4827 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4828                                       int *quote)
4829 {
4830         struct hci_conn_hash *h = &hdev->conn_hash;
4831         struct hci_chan *chan = NULL;
4832         unsigned int num = 0, min = ~0, cur_prio = 0;
4833         struct hci_conn *conn;
4834         int cnt, q, conn_num = 0;
4835
4836         BT_DBG("%s", hdev->name);
4837
4838         rcu_read_lock();
4839
4840         list_for_each_entry_rcu(conn, &h->list, list) {
4841                 struct hci_chan *tmp;
4842
4843                 if (conn->type != type)
4844                         continue;
4845
4846                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4847                         continue;
4848
4849                 conn_num++;
4850
4851                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4852                         struct sk_buff *skb;
4853
4854                         if (skb_queue_empty(&tmp->data_q))
4855                                 continue;
4856
4857                         skb = skb_peek(&tmp->data_q);
4858                         if (skb->priority < cur_prio)
4859                                 continue;
4860
4861                         if (skb->priority > cur_prio) {
4862                                 num = 0;
4863                                 min = ~0;
4864                                 cur_prio = skb->priority;
4865                         }
4866
4867                         num++;
4868
4869                         if (conn->sent < min) {
4870                                 min  = conn->sent;
4871                                 chan = tmp;
4872                         }
4873                 }
4874
4875                 if (hci_conn_num(hdev, type) == conn_num)
4876                         break;
4877         }
4878
4879         rcu_read_unlock();
4880
4881         if (!chan)
4882                 return NULL;
4883
4884         switch (chan->conn->type) {
4885         case ACL_LINK:
4886                 cnt = hdev->acl_cnt;
4887                 break;
4888         case AMP_LINK:
4889                 cnt = hdev->block_cnt;
4890                 break;
4891         case SCO_LINK:
4892         case ESCO_LINK:
4893                 cnt = hdev->sco_cnt;
4894                 break;
4895         case LE_LINK:
4896                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4897                 break;
4898         default:
4899                 cnt = 0;
4900                 BT_ERR("Unknown link type");
4901         }
4902
4903         q = cnt / num;
4904         *quote = q ? q : 1;
4905         BT_DBG("chan %p quote %d", chan, *quote);
4906         return chan;
4907 }
4908
4909 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4910 {
4911         struct hci_conn_hash *h = &hdev->conn_hash;
4912         struct hci_conn *conn;
4913         int num = 0;
4914
4915         BT_DBG("%s", hdev->name);
4916
4917         rcu_read_lock();
4918
4919         list_for_each_entry_rcu(conn, &h->list, list) {
4920                 struct hci_chan *chan;
4921
4922                 if (conn->type != type)
4923                         continue;
4924
4925                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4926                         continue;
4927
4928                 num++;
4929
4930                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4931                         struct sk_buff *skb;
4932
4933                         if (chan->sent) {
4934                                 chan->sent = 0;
4935                                 continue;
4936                         }
4937
4938                         if (skb_queue_empty(&chan->data_q))
4939                                 continue;
4940
4941                         skb = skb_peek(&chan->data_q);
4942                         if (skb->priority >= HCI_PRIO_MAX - 1)
4943                                 continue;
4944
4945                         skb->priority = HCI_PRIO_MAX - 1;
4946
4947                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4948                                skb->priority);
4949                 }
4950
4951                 if (hci_conn_num(hdev, type) == num)
4952                         break;
4953         }
4954
4955         rcu_read_unlock();
4956
4957 }
4958
4959 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4960 {
4961         /* Calculate count of blocks used by this packet */
4962         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4963 }
4964
4965 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4966 {
4967         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4968                 /* ACL tx timeout must be longer than maximum
4969                  * link supervision timeout (40.9 seconds) */
4970                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4971                                        HCI_ACL_TX_TIMEOUT))
4972                         hci_link_tx_to(hdev, ACL_LINK);
4973         }
4974 }
4975
4976 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4977 {
4978         unsigned int cnt = hdev->acl_cnt;
4979         struct hci_chan *chan;
4980         struct sk_buff *skb;
4981         int quote;
4982
4983         __check_timeout(hdev, cnt);
4984
4985         while (hdev->acl_cnt &&
4986                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4987                 u32 priority = (skb_peek(&chan->data_q))->priority;
4988                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4989                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4990                                skb->len, skb->priority);
4991
4992                         /* Stop if priority has changed */
4993                         if (skb->priority < priority)
4994                                 break;
4995
4996                         skb = skb_dequeue(&chan->data_q);
4997
4998                         hci_conn_enter_active_mode(chan->conn,
4999                                                    bt_cb(skb)->force_active);
5000
5001                         hci_send_frame(hdev, skb);
5002                         hdev->acl_last_tx = jiffies;
5003
5004                         hdev->acl_cnt--;
5005                         chan->sent++;
5006                         chan->conn->sent++;
5007                 }
5008         }
5009
5010         if (cnt != hdev->acl_cnt)
5011                 hci_prio_recalculate(hdev, ACL_LINK);
5012 }
5013
5014 static void hci_sched_acl_blk(struct hci_dev *hdev)
5015 {
5016         unsigned int cnt = hdev->block_cnt;
5017         struct hci_chan *chan;
5018         struct sk_buff *skb;
5019         int quote;
5020         u8 type;
5021
5022         __check_timeout(hdev, cnt);
5023
5024         BT_DBG("%s", hdev->name);
5025
5026         if (hdev->dev_type == HCI_AMP)
5027                 type = AMP_LINK;
5028         else
5029                 type = ACL_LINK;
5030
5031         while (hdev->block_cnt > 0 &&
5032                (chan = hci_chan_sent(hdev, type, &quote))) {
5033                 u32 priority = (skb_peek(&chan->data_q))->priority;
5034                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5035                         int blocks;
5036
5037                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5038                                skb->len, skb->priority);
5039
5040                         /* Stop if priority has changed */
5041                         if (skb->priority < priority)
5042                                 break;
5043
5044                         skb = skb_dequeue(&chan->data_q);
5045
5046                         blocks = __get_blocks(hdev, skb);
5047                         if (blocks > hdev->block_cnt)
5048                                 return;
5049
5050                         hci_conn_enter_active_mode(chan->conn,
5051                                                    bt_cb(skb)->force_active);
5052
5053                         hci_send_frame(hdev, skb);
5054                         hdev->acl_last_tx = jiffies;
5055
5056                         hdev->block_cnt -= blocks;
5057                         quote -= blocks;
5058
5059                         chan->sent += blocks;
5060                         chan->conn->sent += blocks;
5061                 }
5062         }
5063
5064         if (cnt != hdev->block_cnt)
5065                 hci_prio_recalculate(hdev, type);
5066 }
5067
5068 static void hci_sched_acl(struct hci_dev *hdev)
5069 {
5070         BT_DBG("%s", hdev->name);
5071
5072         /* No ACL link over BR/EDR controller */
5073         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5074                 return;
5075
5076         /* No AMP link over AMP controller */
5077         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
5078                 return;
5079
5080         switch (hdev->flow_ctl_mode) {
5081         case HCI_FLOW_CTL_MODE_PACKET_BASED:
5082                 hci_sched_acl_pkt(hdev);
5083                 break;
5084
5085         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5086                 hci_sched_acl_blk(hdev);
5087                 break;
5088         }
5089 }
5090
5091 /* Schedule SCO */
5092 static void hci_sched_sco(struct hci_dev *hdev)
5093 {
5094         struct hci_conn *conn;
5095         struct sk_buff *skb;
5096         int quote;
5097
5098         BT_DBG("%s", hdev->name);
5099
5100         if (!hci_conn_num(hdev, SCO_LINK))
5101                 return;
5102
5103         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5104                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5105                         BT_DBG("skb %p len %d", skb, skb->len);
5106                         hci_send_frame(hdev, skb);
5107
5108                         conn->sent++;
5109                         if (conn->sent == ~0)
5110                                 conn->sent = 0;
5111                 }
5112         }
5113 }
5114
5115 static void hci_sched_esco(struct hci_dev *hdev)
5116 {
5117         struct hci_conn *conn;
5118         struct sk_buff *skb;
5119         int quote;
5120
5121         BT_DBG("%s", hdev->name);
5122
5123         if (!hci_conn_num(hdev, ESCO_LINK))
5124                 return;
5125
5126         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5127                                                      &quote))) {
5128                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5129                         BT_DBG("skb %p len %d", skb, skb->len);
5130                         hci_send_frame(hdev, skb);
5131
5132                         conn->sent++;
5133                         if (conn->sent == ~0)
5134                                 conn->sent = 0;
5135                 }
5136         }
5137 }
5138
5139 static void hci_sched_le(struct hci_dev *hdev)
5140 {
5141         struct hci_chan *chan;
5142         struct sk_buff *skb;
5143         int quote, cnt, tmp;
5144
5145         BT_DBG("%s", hdev->name);
5146
5147         if (!hci_conn_num(hdev, LE_LINK))
5148                 return;
5149
5150         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5151                 /* LE tx timeout must be longer than maximum
5152                  * link supervision timeout (40.9 seconds) */
5153                 if (!hdev->le_cnt && hdev->le_pkts &&
5154                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
5155                         hci_link_tx_to(hdev, LE_LINK);
5156         }
5157
5158         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5159         tmp = cnt;
5160         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
5161                 u32 priority = (skb_peek(&chan->data_q))->priority;
5162                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5163                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5164                                skb->len, skb->priority);
5165
5166                         /* Stop if priority has changed */
5167                         if (skb->priority < priority)
5168                                 break;
5169
5170                         skb = skb_dequeue(&chan->data_q);
5171
5172                         hci_send_frame(hdev, skb);
5173                         hdev->le_last_tx = jiffies;
5174
5175                         cnt--;
5176                         chan->sent++;
5177                         chan->conn->sent++;
5178                 }
5179         }
5180
5181         if (hdev->le_pkts)
5182                 hdev->le_cnt = cnt;
5183         else
5184                 hdev->acl_cnt = cnt;
5185
5186         if (cnt != tmp)
5187                 hci_prio_recalculate(hdev, LE_LINK);
5188 }
5189
5190 static void hci_tx_work(struct work_struct *work)
5191 {
5192         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5193         struct sk_buff *skb;
5194
5195         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5196                hdev->sco_cnt, hdev->le_cnt);
5197
5198         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5199                 /* Schedule queues and send stuff to HCI driver */
5200                 hci_sched_acl(hdev);
5201                 hci_sched_sco(hdev);
5202                 hci_sched_esco(hdev);
5203                 hci_sched_le(hdev);
5204         }
5205
5206         /* Send next queued raw (unknown type) packet */
5207         while ((skb = skb_dequeue(&hdev->raw_q)))
5208                 hci_send_frame(hdev, skb);
5209 }
5210
5211 /* ----- HCI RX task (incoming data processing) ----- */
5212
5213 /* ACL data packet */
5214 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5215 {
5216         struct hci_acl_hdr *hdr = (void *) skb->data;
5217         struct hci_conn *conn;
5218         __u16 handle, flags;
5219
5220         skb_pull(skb, HCI_ACL_HDR_SIZE);
5221
5222         handle = __le16_to_cpu(hdr->handle);
5223         flags  = hci_flags(handle);
5224         handle = hci_handle(handle);
5225
5226         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5227                handle, flags);
5228
5229         hdev->stat.acl_rx++;
5230
5231         hci_dev_lock(hdev);
5232         conn = hci_conn_hash_lookup_handle(hdev, handle);
5233         hci_dev_unlock(hdev);
5234
5235         if (conn) {
5236                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5237
5238                 /* Send to upper protocol */
5239                 l2cap_recv_acldata(conn, skb, flags);
5240                 return;
5241         } else {
5242                 BT_ERR("%s ACL packet for unknown connection handle %d",
5243                        hdev->name, handle);
5244         }
5245
5246         kfree_skb(skb);
5247 }
5248
5249 /* SCO data packet */
5250 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5251 {
5252         struct hci_sco_hdr *hdr = (void *) skb->data;
5253         struct hci_conn *conn;
5254         __u16 handle;
5255
5256         skb_pull(skb, HCI_SCO_HDR_SIZE);
5257
5258         handle = __le16_to_cpu(hdr->handle);
5259
5260         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5261
5262         hdev->stat.sco_rx++;
5263
5264         hci_dev_lock(hdev);
5265         conn = hci_conn_hash_lookup_handle(hdev, handle);
5266         hci_dev_unlock(hdev);
5267
5268         if (conn) {
5269                 /* Send to upper protocol */
5270                 sco_recv_scodata(conn, skb);
5271                 return;
5272         } else {
5273                 BT_ERR("%s SCO packet for unknown connection handle %d",
5274                        hdev->name, handle);
5275         }
5276
5277         kfree_skb(skb);
5278 }
5279
5280 static bool hci_req_is_complete(struct hci_dev *hdev)
5281 {
5282         struct sk_buff *skb;
5283
5284         skb = skb_peek(&hdev->cmd_q);
5285         if (!skb)
5286                 return true;
5287
5288         return bt_cb(skb)->req.start;
5289 }
5290
5291 static void hci_resend_last(struct hci_dev *hdev)
5292 {
5293         struct hci_command_hdr *sent;
5294         struct sk_buff *skb;
5295         u16 opcode;
5296
5297         if (!hdev->sent_cmd)
5298                 return;
5299
5300         sent = (void *) hdev->sent_cmd->data;
5301         opcode = __le16_to_cpu(sent->opcode);
5302         if (opcode == HCI_OP_RESET)
5303                 return;
5304
5305         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5306         if (!skb)
5307                 return;
5308
5309         skb_queue_head(&hdev->cmd_q, skb);
5310         queue_work(hdev->workqueue, &hdev->cmd_work);
5311 }
5312
5313 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5314 {
5315         hci_req_complete_t req_complete = NULL;
5316         struct sk_buff *skb;
5317         unsigned long flags;
5318
5319         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5320
5321         /* If the completed command doesn't match the last one that was
5322          * sent we need to do special handling of it.
5323          */
5324         if (!hci_sent_cmd_data(hdev, opcode)) {
5325                 /* Some CSR based controllers generate a spontaneous
5326                  * reset complete event during init and any pending
5327                  * command will never be completed. In such a case we
5328                  * need to resend whatever was the last sent
5329                  * command.
5330                  */
5331                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5332                         hci_resend_last(hdev);
5333
5334                 return;
5335         }
5336
5337         /* If the command succeeded and there's still more commands in
5338          * this request the request is not yet complete.
5339          */
5340         if (!status && !hci_req_is_complete(hdev))
5341                 return;
5342
5343         /* If this was the last command in a request the complete
5344          * callback would be found in hdev->sent_cmd instead of the
5345          * command queue (hdev->cmd_q).
5346          */
5347         if (hdev->sent_cmd) {
5348                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5349
5350                 if (req_complete) {
5351                         /* We must set the complete callback to NULL to
5352                          * avoid calling the callback more than once if
5353                          * this function gets called again.
5354                          */
5355                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
5356
5357                         goto call_complete;
5358                 }
5359         }
5360
5361         /* Remove all pending commands belonging to this request */
5362         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5363         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5364                 if (bt_cb(skb)->req.start) {
5365                         __skb_queue_head(&hdev->cmd_q, skb);
5366                         break;
5367                 }
5368
5369                 req_complete = bt_cb(skb)->req.complete;
5370                 kfree_skb(skb);
5371         }
5372         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5373
5374 call_complete:
5375         if (req_complete)
5376                 req_complete(hdev, status);
5377 }
5378
5379 static void hci_rx_work(struct work_struct *work)
5380 {
5381         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5382         struct sk_buff *skb;
5383
5384         BT_DBG("%s", hdev->name);
5385
5386         while ((skb = skb_dequeue(&hdev->rx_q))) {
5387                 /* Send copy to monitor */
5388                 hci_send_to_monitor(hdev, skb);
5389
5390                 if (atomic_read(&hdev->promisc)) {
5391                         /* Send copy to the sockets */
5392                         hci_send_to_sock(hdev, skb);
5393                 }
5394
5395                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5396                         kfree_skb(skb);
5397                         continue;
5398                 }
5399
5400                 if (test_bit(HCI_INIT, &hdev->flags)) {
5401                         /* Don't process data packets in this states. */
5402                         switch (bt_cb(skb)->pkt_type) {
5403                         case HCI_ACLDATA_PKT:
5404                         case HCI_SCODATA_PKT:
5405                                 kfree_skb(skb);
5406                                 continue;
5407                         }
5408                 }
5409
5410                 /* Process frame */
5411                 switch (bt_cb(skb)->pkt_type) {
5412                 case HCI_EVENT_PKT:
5413                         BT_DBG("%s Event packet", hdev->name);
5414                         hci_event_packet(hdev, skb);
5415                         break;
5416
5417                 case HCI_ACLDATA_PKT:
5418                         BT_DBG("%s ACL data packet", hdev->name);
5419                         hci_acldata_packet(hdev, skb);
5420                         break;
5421
5422                 case HCI_SCODATA_PKT:
5423                         BT_DBG("%s SCO data packet", hdev->name);
5424                         hci_scodata_packet(hdev, skb);
5425                         break;
5426
5427                 default:
5428                         kfree_skb(skb);
5429                         break;
5430                 }
5431         }
5432 }
5433
5434 static void hci_cmd_work(struct work_struct *work)
5435 {
5436         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5437         struct sk_buff *skb;
5438
5439         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5440                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5441
5442         /* Send queued commands */
5443         if (atomic_read(&hdev->cmd_cnt)) {
5444                 skb = skb_dequeue(&hdev->cmd_q);
5445                 if (!skb)
5446                         return;
5447
5448                 kfree_skb(hdev->sent_cmd);
5449
5450                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5451                 if (hdev->sent_cmd) {
5452                         atomic_dec(&hdev->cmd_cnt);
5453                         hci_send_frame(hdev, skb);
5454                         if (test_bit(HCI_RESET, &hdev->flags))
5455                                 cancel_delayed_work(&hdev->cmd_timer);
5456                         else
5457                                 schedule_delayed_work(&hdev->cmd_timer,
5458                                                       HCI_CMD_TIMEOUT);
5459                 } else {
5460                         skb_queue_head(&hdev->cmd_q, skb);
5461                         queue_work(hdev->workqueue, &hdev->cmd_work);
5462                 }
5463         }
5464 }
5465
5466 void hci_req_add_le_scan_disable(struct hci_request *req)
5467 {
5468         struct hci_cp_le_set_scan_enable cp;
5469
5470         memset(&cp, 0, sizeof(cp));
5471         cp.enable = LE_SCAN_DISABLE;
5472         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5473 }
5474
5475 static void add_to_white_list(struct hci_request *req,
5476                               struct hci_conn_params *params)
5477 {
5478         struct hci_cp_le_add_to_white_list cp;
5479
5480         cp.bdaddr_type = params->addr_type;
5481         bacpy(&cp.bdaddr, &params->addr);
5482
5483         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5484 }
5485
5486 static u8 update_white_list(struct hci_request *req)
5487 {
5488         struct hci_dev *hdev = req->hdev;
5489         struct hci_conn_params *params;
5490         struct bdaddr_list *b;
5491         uint8_t white_list_entries = 0;
5492
5493         /* Go through the current white list programmed into the
5494          * controller one by one and check if that address is still
5495          * in the list of pending connections or list of devices to
5496          * report. If not present in either list, then queue the
5497          * command to remove it from the controller.
5498          */
5499         list_for_each_entry(b, &hdev->le_white_list, list) {
5500                 struct hci_cp_le_del_from_white_list cp;
5501
5502                 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5503                                               &b->bdaddr, b->bdaddr_type) ||
5504                     hci_pend_le_action_lookup(&hdev->pend_le_reports,
5505                                               &b->bdaddr, b->bdaddr_type)) {
5506                         white_list_entries++;
5507                         continue;
5508                 }
5509
5510                 cp.bdaddr_type = b->bdaddr_type;
5511                 bacpy(&cp.bdaddr, &b->bdaddr);
5512
5513                 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5514                             sizeof(cp), &cp);
5515         }
5516
5517         /* Since all no longer valid white list entries have been
5518          * removed, walk through the list of pending connections
5519          * and ensure that any new device gets programmed into
5520          * the controller.
5521          *
5522          * If the list of the devices is larger than the list of
5523          * available white list entries in the controller, then
5524          * just abort and return filer policy value to not use the
5525          * white list.
5526          */
5527         list_for_each_entry(params, &hdev->pend_le_conns, action) {
5528                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5529                                            &params->addr, params->addr_type))
5530                         continue;
5531
5532                 if (white_list_entries >= hdev->le_white_list_size) {
5533                         /* Select filter policy to accept all advertising */
5534                         return 0x00;
5535                 }
5536
5537                 if (hci_find_irk_by_addr(hdev, &params->addr,
5538                                          params->addr_type)) {
5539                         /* White list can not be used with RPAs */
5540                         return 0x00;
5541                 }
5542
5543                 white_list_entries++;
5544                 add_to_white_list(req, params);
5545         }
5546
5547         /* After adding all new pending connections, walk through
5548          * the list of pending reports and also add these to the
5549          * white list if there is still space.
5550          */
5551         list_for_each_entry(params, &hdev->pend_le_reports, action) {
5552                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5553                                            &params->addr, params->addr_type))
5554                         continue;
5555
5556                 if (white_list_entries >= hdev->le_white_list_size) {
5557                         /* Select filter policy to accept all advertising */
5558                         return 0x00;
5559                 }
5560
5561                 if (hci_find_irk_by_addr(hdev, &params->addr,
5562                                          params->addr_type)) {
5563                         /* White list can not be used with RPAs */
5564                         return 0x00;
5565                 }
5566
5567                 white_list_entries++;
5568                 add_to_white_list(req, params);
5569         }
5570
5571         /* Select filter policy to use white list */
5572         return 0x01;
5573 }
5574
5575 void hci_req_add_le_passive_scan(struct hci_request *req)
5576 {
5577         struct hci_cp_le_set_scan_param param_cp;
5578         struct hci_cp_le_set_scan_enable enable_cp;
5579         struct hci_dev *hdev = req->hdev;
5580         u8 own_addr_type;
5581         u8 filter_policy;
5582
5583         /* Set require_privacy to false since no SCAN_REQ are send
5584          * during passive scanning. Not using an unresolvable address
5585          * here is important so that peer devices using direct
5586          * advertising with our address will be correctly reported
5587          * by the controller.
5588          */
5589         if (hci_update_random_address(req, false, &own_addr_type))
5590                 return;
5591
5592         /* Adding or removing entries from the white list must
5593          * happen before enabling scanning. The controller does
5594          * not allow white list modification while scanning.
5595          */
5596         filter_policy = update_white_list(req);
5597
5598         memset(&param_cp, 0, sizeof(param_cp));
5599         param_cp.type = LE_SCAN_PASSIVE;
5600         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5601         param_cp.window = cpu_to_le16(hdev->le_scan_window);
5602         param_cp.own_address_type = own_addr_type;
5603         param_cp.filter_policy = filter_policy;
5604         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5605                     &param_cp);
5606
5607         memset(&enable_cp, 0, sizeof(enable_cp));
5608         enable_cp.enable = LE_SCAN_ENABLE;
5609         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5610         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5611                     &enable_cp);
5612 }
5613
5614 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5615 {
5616         if (status)
5617                 BT_DBG("HCI request failed to update background scanning: "
5618                        "status 0x%2.2x", status);
5619 }
5620
5621 /* This function controls the background scanning based on hdev->pend_le_conns
5622  * list. If there are pending LE connection we start the background scanning,
5623  * otherwise we stop it.
5624  *
5625  * This function requires the caller holds hdev->lock.
5626  */
5627 void hci_update_background_scan(struct hci_dev *hdev)
5628 {
5629         struct hci_request req;
5630         struct hci_conn *conn;
5631         int err;
5632
5633         if (!test_bit(HCI_UP, &hdev->flags) ||
5634             test_bit(HCI_INIT, &hdev->flags) ||
5635             test_bit(HCI_SETUP, &hdev->dev_flags) ||
5636             test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5637             test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5638             test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5639                 return;
5640
5641         /* No point in doing scanning if LE support hasn't been enabled */
5642         if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5643                 return;
5644
5645         /* If discovery is active don't interfere with it */
5646         if (hdev->discovery.state != DISCOVERY_STOPPED)
5647                 return;
5648
5649         hci_req_init(&req, hdev);
5650
5651         if (list_empty(&hdev->pend_le_conns) &&
5652             list_empty(&hdev->pend_le_reports)) {
5653                 /* If there is no pending LE connections or devices
5654                  * to be scanned for, we should stop the background
5655                  * scanning.
5656                  */
5657
5658                 /* If controller is not scanning we are done. */
5659                 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5660                         return;
5661
5662                 hci_req_add_le_scan_disable(&req);
5663
5664                 BT_DBG("%s stopping background scanning", hdev->name);
5665         } else {
5666                 /* If there is at least one pending LE connection, we should
5667                  * keep the background scan running.
5668                  */
5669
5670                 /* If controller is connecting, we should not start scanning
5671                  * since some controllers are not able to scan and connect at
5672                  * the same time.
5673                  */
5674                 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5675                 if (conn)
5676                         return;
5677
5678                 /* If controller is currently scanning, we stop it to ensure we
5679                  * don't miss any advertising (due to duplicates filter).
5680                  */
5681                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5682                         hci_req_add_le_scan_disable(&req);
5683
5684                 hci_req_add_le_passive_scan(&req);
5685
5686                 BT_DBG("%s starting background scanning", hdev->name);
5687         }
5688
5689         err = hci_req_run(&req, update_background_scan_complete);
5690         if (err)
5691                 BT_ERR("Failed to run HCI request: err %d", err);
5692 }