Bluetooth: Fix hci_sync missing wakeup interrupt
[cascardo/linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "smp.h"
41
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
45
46 /* HCI device list */
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
49
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
53
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
56
57 /* ----- HCI requests ----- */
58
59 #define HCI_REQ_DONE      0
60 #define HCI_REQ_PEND      1
61 #define HCI_REQ_CANCELED  2
62
63 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
64 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
65
66 /* ---- HCI notifications ---- */
67
68 static void hci_notify(struct hci_dev *hdev, int event)
69 {
70         hci_sock_dev_event(hdev, event);
71 }
72
73 /* ---- HCI debugfs entries ---- */
74
75 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76                              size_t count, loff_t *ppos)
77 {
78         struct hci_dev *hdev = file->private_data;
79         char buf[3];
80
81         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
82         buf[1] = '\n';
83         buf[2] = '\0';
84         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85 }
86
87 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88                               size_t count, loff_t *ppos)
89 {
90         struct hci_dev *hdev = file->private_data;
91         struct sk_buff *skb;
92         char buf[32];
93         size_t buf_size = min(count, (sizeof(buf)-1));
94         bool enable;
95         int err;
96
97         if (!test_bit(HCI_UP, &hdev->flags))
98                 return -ENETDOWN;
99
100         if (copy_from_user(buf, user_buf, buf_size))
101                 return -EFAULT;
102
103         buf[buf_size] = '\0';
104         if (strtobool(buf, &enable))
105                 return -EINVAL;
106
107         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
108                 return -EALREADY;
109
110         hci_req_lock(hdev);
111         if (enable)
112                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113                                      HCI_CMD_TIMEOUT);
114         else
115                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116                                      HCI_CMD_TIMEOUT);
117         hci_req_unlock(hdev);
118
119         if (IS_ERR(skb))
120                 return PTR_ERR(skb);
121
122         err = -bt_to_errno(skb->data[0]);
123         kfree_skb(skb);
124
125         if (err < 0)
126                 return err;
127
128         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
129
130         return count;
131 }
132
133 static const struct file_operations dut_mode_fops = {
134         .open           = simple_open,
135         .read           = dut_mode_read,
136         .write          = dut_mode_write,
137         .llseek         = default_llseek,
138 };
139
140 static int features_show(struct seq_file *f, void *ptr)
141 {
142         struct hci_dev *hdev = f->private;
143         u8 p;
144
145         hci_dev_lock(hdev);
146         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
147                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
148                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149                            hdev->features[p][0], hdev->features[p][1],
150                            hdev->features[p][2], hdev->features[p][3],
151                            hdev->features[p][4], hdev->features[p][5],
152                            hdev->features[p][6], hdev->features[p][7]);
153         }
154         if (lmp_le_capable(hdev))
155                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157                            hdev->le_features[0], hdev->le_features[1],
158                            hdev->le_features[2], hdev->le_features[3],
159                            hdev->le_features[4], hdev->le_features[5],
160                            hdev->le_features[6], hdev->le_features[7]);
161         hci_dev_unlock(hdev);
162
163         return 0;
164 }
165
166 static int features_open(struct inode *inode, struct file *file)
167 {
168         return single_open(file, features_show, inode->i_private);
169 }
170
171 static const struct file_operations features_fops = {
172         .open           = features_open,
173         .read           = seq_read,
174         .llseek         = seq_lseek,
175         .release        = single_release,
176 };
177
178 static int blacklist_show(struct seq_file *f, void *p)
179 {
180         struct hci_dev *hdev = f->private;
181         struct bdaddr_list *b;
182
183         hci_dev_lock(hdev);
184         list_for_each_entry(b, &hdev->blacklist, list)
185                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
186         hci_dev_unlock(hdev);
187
188         return 0;
189 }
190
191 static int blacklist_open(struct inode *inode, struct file *file)
192 {
193         return single_open(file, blacklist_show, inode->i_private);
194 }
195
196 static const struct file_operations blacklist_fops = {
197         .open           = blacklist_open,
198         .read           = seq_read,
199         .llseek         = seq_lseek,
200         .release        = single_release,
201 };
202
203 static int whitelist_show(struct seq_file *f, void *p)
204 {
205         struct hci_dev *hdev = f->private;
206         struct bdaddr_list *b;
207
208         hci_dev_lock(hdev);
209         list_for_each_entry(b, &hdev->whitelist, list)
210                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
211         hci_dev_unlock(hdev);
212
213         return 0;
214 }
215
216 static int whitelist_open(struct inode *inode, struct file *file)
217 {
218         return single_open(file, whitelist_show, inode->i_private);
219 }
220
221 static const struct file_operations whitelist_fops = {
222         .open           = whitelist_open,
223         .read           = seq_read,
224         .llseek         = seq_lseek,
225         .release        = single_release,
226 };
227
228 static int uuids_show(struct seq_file *f, void *p)
229 {
230         struct hci_dev *hdev = f->private;
231         struct bt_uuid *uuid;
232
233         hci_dev_lock(hdev);
234         list_for_each_entry(uuid, &hdev->uuids, list) {
235                 u8 i, val[16];
236
237                 /* The Bluetooth UUID values are stored in big endian,
238                  * but with reversed byte order. So convert them into
239                  * the right order for the %pUb modifier.
240                  */
241                 for (i = 0; i < 16; i++)
242                         val[i] = uuid->uuid[15 - i];
243
244                 seq_printf(f, "%pUb\n", val);
245         }
246         hci_dev_unlock(hdev);
247
248         return 0;
249 }
250
251 static int uuids_open(struct inode *inode, struct file *file)
252 {
253         return single_open(file, uuids_show, inode->i_private);
254 }
255
256 static const struct file_operations uuids_fops = {
257         .open           = uuids_open,
258         .read           = seq_read,
259         .llseek         = seq_lseek,
260         .release        = single_release,
261 };
262
263 static int inquiry_cache_show(struct seq_file *f, void *p)
264 {
265         struct hci_dev *hdev = f->private;
266         struct discovery_state *cache = &hdev->discovery;
267         struct inquiry_entry *e;
268
269         hci_dev_lock(hdev);
270
271         list_for_each_entry(e, &cache->all, all) {
272                 struct inquiry_data *data = &e->data;
273                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
274                            &data->bdaddr,
275                            data->pscan_rep_mode, data->pscan_period_mode,
276                            data->pscan_mode, data->dev_class[2],
277                            data->dev_class[1], data->dev_class[0],
278                            __le16_to_cpu(data->clock_offset),
279                            data->rssi, data->ssp_mode, e->timestamp);
280         }
281
282         hci_dev_unlock(hdev);
283
284         return 0;
285 }
286
287 static int inquiry_cache_open(struct inode *inode, struct file *file)
288 {
289         return single_open(file, inquiry_cache_show, inode->i_private);
290 }
291
292 static const struct file_operations inquiry_cache_fops = {
293         .open           = inquiry_cache_open,
294         .read           = seq_read,
295         .llseek         = seq_lseek,
296         .release        = single_release,
297 };
298
299 static int link_keys_show(struct seq_file *f, void *ptr)
300 {
301         struct hci_dev *hdev = f->private;
302         struct list_head *p, *n;
303
304         hci_dev_lock(hdev);
305         list_for_each_safe(p, n, &hdev->link_keys) {
306                 struct link_key *key = list_entry(p, struct link_key, list);
307                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
308                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
309         }
310         hci_dev_unlock(hdev);
311
312         return 0;
313 }
314
315 static int link_keys_open(struct inode *inode, struct file *file)
316 {
317         return single_open(file, link_keys_show, inode->i_private);
318 }
319
320 static const struct file_operations link_keys_fops = {
321         .open           = link_keys_open,
322         .read           = seq_read,
323         .llseek         = seq_lseek,
324         .release        = single_release,
325 };
326
327 static int dev_class_show(struct seq_file *f, void *ptr)
328 {
329         struct hci_dev *hdev = f->private;
330
331         hci_dev_lock(hdev);
332         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
333                    hdev->dev_class[1], hdev->dev_class[0]);
334         hci_dev_unlock(hdev);
335
336         return 0;
337 }
338
339 static int dev_class_open(struct inode *inode, struct file *file)
340 {
341         return single_open(file, dev_class_show, inode->i_private);
342 }
343
344 static const struct file_operations dev_class_fops = {
345         .open           = dev_class_open,
346         .read           = seq_read,
347         .llseek         = seq_lseek,
348         .release        = single_release,
349 };
350
351 static int voice_setting_get(void *data, u64 *val)
352 {
353         struct hci_dev *hdev = data;
354
355         hci_dev_lock(hdev);
356         *val = hdev->voice_setting;
357         hci_dev_unlock(hdev);
358
359         return 0;
360 }
361
362 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
363                         NULL, "0x%4.4llx\n");
364
365 static int auto_accept_delay_set(void *data, u64 val)
366 {
367         struct hci_dev *hdev = data;
368
369         hci_dev_lock(hdev);
370         hdev->auto_accept_delay = val;
371         hci_dev_unlock(hdev);
372
373         return 0;
374 }
375
376 static int auto_accept_delay_get(void *data, u64 *val)
377 {
378         struct hci_dev *hdev = data;
379
380         hci_dev_lock(hdev);
381         *val = hdev->auto_accept_delay;
382         hci_dev_unlock(hdev);
383
384         return 0;
385 }
386
387 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
388                         auto_accept_delay_set, "%llu\n");
389
390 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
391                                      size_t count, loff_t *ppos)
392 {
393         struct hci_dev *hdev = file->private_data;
394         char buf[3];
395
396         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
397         buf[1] = '\n';
398         buf[2] = '\0';
399         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
400 }
401
402 static ssize_t force_sc_support_write(struct file *file,
403                                       const char __user *user_buf,
404                                       size_t count, loff_t *ppos)
405 {
406         struct hci_dev *hdev = file->private_data;
407         char buf[32];
408         size_t buf_size = min(count, (sizeof(buf)-1));
409         bool enable;
410
411         if (test_bit(HCI_UP, &hdev->flags))
412                 return -EBUSY;
413
414         if (copy_from_user(buf, user_buf, buf_size))
415                 return -EFAULT;
416
417         buf[buf_size] = '\0';
418         if (strtobool(buf, &enable))
419                 return -EINVAL;
420
421         if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
422                 return -EALREADY;
423
424         change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
425
426         return count;
427 }
428
429 static const struct file_operations force_sc_support_fops = {
430         .open           = simple_open,
431         .read           = force_sc_support_read,
432         .write          = force_sc_support_write,
433         .llseek         = default_llseek,
434 };
435
436 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
437                                  size_t count, loff_t *ppos)
438 {
439         struct hci_dev *hdev = file->private_data;
440         char buf[3];
441
442         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
443         buf[1] = '\n';
444         buf[2] = '\0';
445         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
446 }
447
448 static const struct file_operations sc_only_mode_fops = {
449         .open           = simple_open,
450         .read           = sc_only_mode_read,
451         .llseek         = default_llseek,
452 };
453
454 static int idle_timeout_set(void *data, u64 val)
455 {
456         struct hci_dev *hdev = data;
457
458         if (val != 0 && (val < 500 || val > 3600000))
459                 return -EINVAL;
460
461         hci_dev_lock(hdev);
462         hdev->idle_timeout = val;
463         hci_dev_unlock(hdev);
464
465         return 0;
466 }
467
468 static int idle_timeout_get(void *data, u64 *val)
469 {
470         struct hci_dev *hdev = data;
471
472         hci_dev_lock(hdev);
473         *val = hdev->idle_timeout;
474         hci_dev_unlock(hdev);
475
476         return 0;
477 }
478
479 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
480                         idle_timeout_set, "%llu\n");
481
482 static int rpa_timeout_set(void *data, u64 val)
483 {
484         struct hci_dev *hdev = data;
485
486         /* Require the RPA timeout to be at least 30 seconds and at most
487          * 24 hours.
488          */
489         if (val < 30 || val > (60 * 60 * 24))
490                 return -EINVAL;
491
492         hci_dev_lock(hdev);
493         hdev->rpa_timeout = val;
494         hci_dev_unlock(hdev);
495
496         return 0;
497 }
498
499 static int rpa_timeout_get(void *data, u64 *val)
500 {
501         struct hci_dev *hdev = data;
502
503         hci_dev_lock(hdev);
504         *val = hdev->rpa_timeout;
505         hci_dev_unlock(hdev);
506
507         return 0;
508 }
509
510 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
511                         rpa_timeout_set, "%llu\n");
512
513 static int sniff_min_interval_set(void *data, u64 val)
514 {
515         struct hci_dev *hdev = data;
516
517         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
518                 return -EINVAL;
519
520         hci_dev_lock(hdev);
521         hdev->sniff_min_interval = val;
522         hci_dev_unlock(hdev);
523
524         return 0;
525 }
526
527 static int sniff_min_interval_get(void *data, u64 *val)
528 {
529         struct hci_dev *hdev = data;
530
531         hci_dev_lock(hdev);
532         *val = hdev->sniff_min_interval;
533         hci_dev_unlock(hdev);
534
535         return 0;
536 }
537
538 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
539                         sniff_min_interval_set, "%llu\n");
540
541 static int sniff_max_interval_set(void *data, u64 val)
542 {
543         struct hci_dev *hdev = data;
544
545         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
546                 return -EINVAL;
547
548         hci_dev_lock(hdev);
549         hdev->sniff_max_interval = val;
550         hci_dev_unlock(hdev);
551
552         return 0;
553 }
554
555 static int sniff_max_interval_get(void *data, u64 *val)
556 {
557         struct hci_dev *hdev = data;
558
559         hci_dev_lock(hdev);
560         *val = hdev->sniff_max_interval;
561         hci_dev_unlock(hdev);
562
563         return 0;
564 }
565
566 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
567                         sniff_max_interval_set, "%llu\n");
568
569 static int conn_info_min_age_set(void *data, u64 val)
570 {
571         struct hci_dev *hdev = data;
572
573         if (val == 0 || val > hdev->conn_info_max_age)
574                 return -EINVAL;
575
576         hci_dev_lock(hdev);
577         hdev->conn_info_min_age = val;
578         hci_dev_unlock(hdev);
579
580         return 0;
581 }
582
583 static int conn_info_min_age_get(void *data, u64 *val)
584 {
585         struct hci_dev *hdev = data;
586
587         hci_dev_lock(hdev);
588         *val = hdev->conn_info_min_age;
589         hci_dev_unlock(hdev);
590
591         return 0;
592 }
593
594 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
595                         conn_info_min_age_set, "%llu\n");
596
597 static int conn_info_max_age_set(void *data, u64 val)
598 {
599         struct hci_dev *hdev = data;
600
601         if (val == 0 || val < hdev->conn_info_min_age)
602                 return -EINVAL;
603
604         hci_dev_lock(hdev);
605         hdev->conn_info_max_age = val;
606         hci_dev_unlock(hdev);
607
608         return 0;
609 }
610
611 static int conn_info_max_age_get(void *data, u64 *val)
612 {
613         struct hci_dev *hdev = data;
614
615         hci_dev_lock(hdev);
616         *val = hdev->conn_info_max_age;
617         hci_dev_unlock(hdev);
618
619         return 0;
620 }
621
622 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
623                         conn_info_max_age_set, "%llu\n");
624
625 static int identity_show(struct seq_file *f, void *p)
626 {
627         struct hci_dev *hdev = f->private;
628         bdaddr_t addr;
629         u8 addr_type;
630
631         hci_dev_lock(hdev);
632
633         hci_copy_identity_address(hdev, &addr, &addr_type);
634
635         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
636                    16, hdev->irk, &hdev->rpa);
637
638         hci_dev_unlock(hdev);
639
640         return 0;
641 }
642
643 static int identity_open(struct inode *inode, struct file *file)
644 {
645         return single_open(file, identity_show, inode->i_private);
646 }
647
648 static const struct file_operations identity_fops = {
649         .open           = identity_open,
650         .read           = seq_read,
651         .llseek         = seq_lseek,
652         .release        = single_release,
653 };
654
655 static int random_address_show(struct seq_file *f, void *p)
656 {
657         struct hci_dev *hdev = f->private;
658
659         hci_dev_lock(hdev);
660         seq_printf(f, "%pMR\n", &hdev->random_addr);
661         hci_dev_unlock(hdev);
662
663         return 0;
664 }
665
666 static int random_address_open(struct inode *inode, struct file *file)
667 {
668         return single_open(file, random_address_show, inode->i_private);
669 }
670
671 static const struct file_operations random_address_fops = {
672         .open           = random_address_open,
673         .read           = seq_read,
674         .llseek         = seq_lseek,
675         .release        = single_release,
676 };
677
678 static int static_address_show(struct seq_file *f, void *p)
679 {
680         struct hci_dev *hdev = f->private;
681
682         hci_dev_lock(hdev);
683         seq_printf(f, "%pMR\n", &hdev->static_addr);
684         hci_dev_unlock(hdev);
685
686         return 0;
687 }
688
689 static int static_address_open(struct inode *inode, struct file *file)
690 {
691         return single_open(file, static_address_show, inode->i_private);
692 }
693
694 static const struct file_operations static_address_fops = {
695         .open           = static_address_open,
696         .read           = seq_read,
697         .llseek         = seq_lseek,
698         .release        = single_release,
699 };
700
701 static ssize_t force_static_address_read(struct file *file,
702                                          char __user *user_buf,
703                                          size_t count, loff_t *ppos)
704 {
705         struct hci_dev *hdev = file->private_data;
706         char buf[3];
707
708         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
709         buf[1] = '\n';
710         buf[2] = '\0';
711         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
712 }
713
714 static ssize_t force_static_address_write(struct file *file,
715                                           const char __user *user_buf,
716                                           size_t count, loff_t *ppos)
717 {
718         struct hci_dev *hdev = file->private_data;
719         char buf[32];
720         size_t buf_size = min(count, (sizeof(buf)-1));
721         bool enable;
722
723         if (test_bit(HCI_UP, &hdev->flags))
724                 return -EBUSY;
725
726         if (copy_from_user(buf, user_buf, buf_size))
727                 return -EFAULT;
728
729         buf[buf_size] = '\0';
730         if (strtobool(buf, &enable))
731                 return -EINVAL;
732
733         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
734                 return -EALREADY;
735
736         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
737
738         return count;
739 }
740
741 static const struct file_operations force_static_address_fops = {
742         .open           = simple_open,
743         .read           = force_static_address_read,
744         .write          = force_static_address_write,
745         .llseek         = default_llseek,
746 };
747
748 static int white_list_show(struct seq_file *f, void *ptr)
749 {
750         struct hci_dev *hdev = f->private;
751         struct bdaddr_list *b;
752
753         hci_dev_lock(hdev);
754         list_for_each_entry(b, &hdev->le_white_list, list)
755                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
756         hci_dev_unlock(hdev);
757
758         return 0;
759 }
760
761 static int white_list_open(struct inode *inode, struct file *file)
762 {
763         return single_open(file, white_list_show, inode->i_private);
764 }
765
766 static const struct file_operations white_list_fops = {
767         .open           = white_list_open,
768         .read           = seq_read,
769         .llseek         = seq_lseek,
770         .release        = single_release,
771 };
772
773 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
774 {
775         struct hci_dev *hdev = f->private;
776         struct list_head *p, *n;
777
778         hci_dev_lock(hdev);
779         list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
780                 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
781                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
782                            &irk->bdaddr, irk->addr_type,
783                            16, irk->val, &irk->rpa);
784         }
785         hci_dev_unlock(hdev);
786
787         return 0;
788 }
789
790 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
791 {
792         return single_open(file, identity_resolving_keys_show,
793                            inode->i_private);
794 }
795
796 static const struct file_operations identity_resolving_keys_fops = {
797         .open           = identity_resolving_keys_open,
798         .read           = seq_read,
799         .llseek         = seq_lseek,
800         .release        = single_release,
801 };
802
803 static int long_term_keys_show(struct seq_file *f, void *ptr)
804 {
805         struct hci_dev *hdev = f->private;
806         struct list_head *p, *n;
807
808         hci_dev_lock(hdev);
809         list_for_each_safe(p, n, &hdev->long_term_keys) {
810                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
811                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
812                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
813                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
814                            __le64_to_cpu(ltk->rand), 16, ltk->val);
815         }
816         hci_dev_unlock(hdev);
817
818         return 0;
819 }
820
821 static int long_term_keys_open(struct inode *inode, struct file *file)
822 {
823         return single_open(file, long_term_keys_show, inode->i_private);
824 }
825
826 static const struct file_operations long_term_keys_fops = {
827         .open           = long_term_keys_open,
828         .read           = seq_read,
829         .llseek         = seq_lseek,
830         .release        = single_release,
831 };
832
833 static int conn_min_interval_set(void *data, u64 val)
834 {
835         struct hci_dev *hdev = data;
836
837         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
838                 return -EINVAL;
839
840         hci_dev_lock(hdev);
841         hdev->le_conn_min_interval = val;
842         hci_dev_unlock(hdev);
843
844         return 0;
845 }
846
847 static int conn_min_interval_get(void *data, u64 *val)
848 {
849         struct hci_dev *hdev = data;
850
851         hci_dev_lock(hdev);
852         *val = hdev->le_conn_min_interval;
853         hci_dev_unlock(hdev);
854
855         return 0;
856 }
857
858 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
859                         conn_min_interval_set, "%llu\n");
860
861 static int conn_max_interval_set(void *data, u64 val)
862 {
863         struct hci_dev *hdev = data;
864
865         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
866                 return -EINVAL;
867
868         hci_dev_lock(hdev);
869         hdev->le_conn_max_interval = val;
870         hci_dev_unlock(hdev);
871
872         return 0;
873 }
874
875 static int conn_max_interval_get(void *data, u64 *val)
876 {
877         struct hci_dev *hdev = data;
878
879         hci_dev_lock(hdev);
880         *val = hdev->le_conn_max_interval;
881         hci_dev_unlock(hdev);
882
883         return 0;
884 }
885
886 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
887                         conn_max_interval_set, "%llu\n");
888
889 static int conn_latency_set(void *data, u64 val)
890 {
891         struct hci_dev *hdev = data;
892
893         if (val > 0x01f3)
894                 return -EINVAL;
895
896         hci_dev_lock(hdev);
897         hdev->le_conn_latency = val;
898         hci_dev_unlock(hdev);
899
900         return 0;
901 }
902
903 static int conn_latency_get(void *data, u64 *val)
904 {
905         struct hci_dev *hdev = data;
906
907         hci_dev_lock(hdev);
908         *val = hdev->le_conn_latency;
909         hci_dev_unlock(hdev);
910
911         return 0;
912 }
913
914 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
915                         conn_latency_set, "%llu\n");
916
917 static int supervision_timeout_set(void *data, u64 val)
918 {
919         struct hci_dev *hdev = data;
920
921         if (val < 0x000a || val > 0x0c80)
922                 return -EINVAL;
923
924         hci_dev_lock(hdev);
925         hdev->le_supv_timeout = val;
926         hci_dev_unlock(hdev);
927
928         return 0;
929 }
930
931 static int supervision_timeout_get(void *data, u64 *val)
932 {
933         struct hci_dev *hdev = data;
934
935         hci_dev_lock(hdev);
936         *val = hdev->le_supv_timeout;
937         hci_dev_unlock(hdev);
938
939         return 0;
940 }
941
942 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
943                         supervision_timeout_set, "%llu\n");
944
945 static int adv_channel_map_set(void *data, u64 val)
946 {
947         struct hci_dev *hdev = data;
948
949         if (val < 0x01 || val > 0x07)
950                 return -EINVAL;
951
952         hci_dev_lock(hdev);
953         hdev->le_adv_channel_map = val;
954         hci_dev_unlock(hdev);
955
956         return 0;
957 }
958
959 static int adv_channel_map_get(void *data, u64 *val)
960 {
961         struct hci_dev *hdev = data;
962
963         hci_dev_lock(hdev);
964         *val = hdev->le_adv_channel_map;
965         hci_dev_unlock(hdev);
966
967         return 0;
968 }
969
970 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
971                         adv_channel_map_set, "%llu\n");
972
973 static int adv_min_interval_set(void *data, u64 val)
974 {
975         struct hci_dev *hdev = data;
976
977         if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
978                 return -EINVAL;
979
980         hci_dev_lock(hdev);
981         hdev->le_adv_min_interval = val;
982         hci_dev_unlock(hdev);
983
984         return 0;
985 }
986
987 static int adv_min_interval_get(void *data, u64 *val)
988 {
989         struct hci_dev *hdev = data;
990
991         hci_dev_lock(hdev);
992         *val = hdev->le_adv_min_interval;
993         hci_dev_unlock(hdev);
994
995         return 0;
996 }
997
998 DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
999                         adv_min_interval_set, "%llu\n");
1000
1001 static int adv_max_interval_set(void *data, u64 val)
1002 {
1003         struct hci_dev *hdev = data;
1004
1005         if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
1006                 return -EINVAL;
1007
1008         hci_dev_lock(hdev);
1009         hdev->le_adv_max_interval = val;
1010         hci_dev_unlock(hdev);
1011
1012         return 0;
1013 }
1014
1015 static int adv_max_interval_get(void *data, u64 *val)
1016 {
1017         struct hci_dev *hdev = data;
1018
1019         hci_dev_lock(hdev);
1020         *val = hdev->le_adv_max_interval;
1021         hci_dev_unlock(hdev);
1022
1023         return 0;
1024 }
1025
1026 DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1027                         adv_max_interval_set, "%llu\n");
1028
1029 static int device_list_show(struct seq_file *f, void *ptr)
1030 {
1031         struct hci_dev *hdev = f->private;
1032         struct hci_conn_params *p;
1033
1034         hci_dev_lock(hdev);
1035         list_for_each_entry(p, &hdev->le_conn_params, list) {
1036                 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
1037                            p->auto_connect);
1038         }
1039         hci_dev_unlock(hdev);
1040
1041         return 0;
1042 }
1043
1044 static int device_list_open(struct inode *inode, struct file *file)
1045 {
1046         return single_open(file, device_list_show, inode->i_private);
1047 }
1048
1049 static const struct file_operations device_list_fops = {
1050         .open           = device_list_open,
1051         .read           = seq_read,
1052         .llseek         = seq_lseek,
1053         .release        = single_release,
1054 };
1055
1056 /* ---- HCI requests ---- */
1057
1058 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1059 {
1060         BT_DBG("%s result 0x%2.2x", hdev->name, result);
1061
1062         if (hdev->req_status == HCI_REQ_PEND) {
1063                 hdev->req_result = result;
1064                 hdev->req_status = HCI_REQ_DONE;
1065                 wake_up_interruptible(&hdev->req_wait_q);
1066         }
1067 }
1068
1069 static void hci_req_cancel(struct hci_dev *hdev, int err)
1070 {
1071         BT_DBG("%s err 0x%2.2x", hdev->name, err);
1072
1073         if (hdev->req_status == HCI_REQ_PEND) {
1074                 hdev->req_result = err;
1075                 hdev->req_status = HCI_REQ_CANCELED;
1076                 wake_up_interruptible(&hdev->req_wait_q);
1077         }
1078 }
1079
1080 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1081                                             u8 event)
1082 {
1083         struct hci_ev_cmd_complete *ev;
1084         struct hci_event_hdr *hdr;
1085         struct sk_buff *skb;
1086
1087         hci_dev_lock(hdev);
1088
1089         skb = hdev->recv_evt;
1090         hdev->recv_evt = NULL;
1091
1092         hci_dev_unlock(hdev);
1093
1094         if (!skb)
1095                 return ERR_PTR(-ENODATA);
1096
1097         if (skb->len < sizeof(*hdr)) {
1098                 BT_ERR("Too short HCI event");
1099                 goto failed;
1100         }
1101
1102         hdr = (void *) skb->data;
1103         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1104
1105         if (event) {
1106                 if (hdr->evt != event)
1107                         goto failed;
1108                 return skb;
1109         }
1110
1111         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1112                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1113                 goto failed;
1114         }
1115
1116         if (skb->len < sizeof(*ev)) {
1117                 BT_ERR("Too short cmd_complete event");
1118                 goto failed;
1119         }
1120
1121         ev = (void *) skb->data;
1122         skb_pull(skb, sizeof(*ev));
1123
1124         if (opcode == __le16_to_cpu(ev->opcode))
1125                 return skb;
1126
1127         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1128                __le16_to_cpu(ev->opcode));
1129
1130 failed:
1131         kfree_skb(skb);
1132         return ERR_PTR(-ENODATA);
1133 }
1134
1135 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1136                                   const void *param, u8 event, u32 timeout)
1137 {
1138         DECLARE_WAITQUEUE(wait, current);
1139         struct hci_request req;
1140         int err = 0;
1141
1142         BT_DBG("%s", hdev->name);
1143
1144         hci_req_init(&req, hdev);
1145
1146         hci_req_add_ev(&req, opcode, plen, param, event);
1147
1148         hdev->req_status = HCI_REQ_PEND;
1149
1150         add_wait_queue(&hdev->req_wait_q, &wait);
1151         set_current_state(TASK_INTERRUPTIBLE);
1152
1153         err = hci_req_run(&req, hci_req_sync_complete);
1154         if (err < 0) {
1155                 remove_wait_queue(&hdev->req_wait_q, &wait);
1156                 return ERR_PTR(err);
1157         }
1158
1159         schedule_timeout(timeout);
1160
1161         remove_wait_queue(&hdev->req_wait_q, &wait);
1162
1163         if (signal_pending(current))
1164                 return ERR_PTR(-EINTR);
1165
1166         switch (hdev->req_status) {
1167         case HCI_REQ_DONE:
1168                 err = -bt_to_errno(hdev->req_result);
1169                 break;
1170
1171         case HCI_REQ_CANCELED:
1172                 err = -hdev->req_result;
1173                 break;
1174
1175         default:
1176                 err = -ETIMEDOUT;
1177                 break;
1178         }
1179
1180         hdev->req_status = hdev->req_result = 0;
1181
1182         BT_DBG("%s end: err %d", hdev->name, err);
1183
1184         if (err < 0)
1185                 return ERR_PTR(err);
1186
1187         return hci_get_cmd_complete(hdev, opcode, event);
1188 }
1189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1190
1191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1192                                const void *param, u32 timeout)
1193 {
1194         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1195 }
1196 EXPORT_SYMBOL(__hci_cmd_sync);
1197
1198 /* Execute request and wait for completion. */
1199 static int __hci_req_sync(struct hci_dev *hdev,
1200                           void (*func)(struct hci_request *req,
1201                                       unsigned long opt),
1202                           unsigned long opt, __u32 timeout)
1203 {
1204         struct hci_request req;
1205         DECLARE_WAITQUEUE(wait, current);
1206         int err = 0;
1207
1208         BT_DBG("%s start", hdev->name);
1209
1210         hci_req_init(&req, hdev);
1211
1212         hdev->req_status = HCI_REQ_PEND;
1213
1214         func(&req, opt);
1215
1216         add_wait_queue(&hdev->req_wait_q, &wait);
1217         set_current_state(TASK_INTERRUPTIBLE);
1218
1219         err = hci_req_run(&req, hci_req_sync_complete);
1220         if (err < 0) {
1221                 hdev->req_status = 0;
1222
1223                 remove_wait_queue(&hdev->req_wait_q, &wait);
1224
1225                 /* ENODATA means the HCI request command queue is empty.
1226                  * This can happen when a request with conditionals doesn't
1227                  * trigger any commands to be sent. This is normal behavior
1228                  * and should not trigger an error return.
1229                  */
1230                 if (err == -ENODATA)
1231                         return 0;
1232
1233                 return err;
1234         }
1235
1236         schedule_timeout(timeout);
1237
1238         remove_wait_queue(&hdev->req_wait_q, &wait);
1239
1240         if (signal_pending(current))
1241                 return -EINTR;
1242
1243         switch (hdev->req_status) {
1244         case HCI_REQ_DONE:
1245                 err = -bt_to_errno(hdev->req_result);
1246                 break;
1247
1248         case HCI_REQ_CANCELED:
1249                 err = -hdev->req_result;
1250                 break;
1251
1252         default:
1253                 err = -ETIMEDOUT;
1254                 break;
1255         }
1256
1257         hdev->req_status = hdev->req_result = 0;
1258
1259         BT_DBG("%s end: err %d", hdev->name, err);
1260
1261         return err;
1262 }
1263
1264 static int hci_req_sync(struct hci_dev *hdev,
1265                         void (*req)(struct hci_request *req,
1266                                     unsigned long opt),
1267                         unsigned long opt, __u32 timeout)
1268 {
1269         int ret;
1270
1271         if (!test_bit(HCI_UP, &hdev->flags))
1272                 return -ENETDOWN;
1273
1274         /* Serialize all requests */
1275         hci_req_lock(hdev);
1276         ret = __hci_req_sync(hdev, req, opt, timeout);
1277         hci_req_unlock(hdev);
1278
1279         return ret;
1280 }
1281
1282 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1283 {
1284         BT_DBG("%s %ld", req->hdev->name, opt);
1285
1286         /* Reset device */
1287         set_bit(HCI_RESET, &req->hdev->flags);
1288         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1289 }
1290
1291 static void bredr_init(struct hci_request *req)
1292 {
1293         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1294
1295         /* Read Local Supported Features */
1296         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1297
1298         /* Read Local Version */
1299         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1300
1301         /* Read BD Address */
1302         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1303 }
1304
1305 static void amp_init(struct hci_request *req)
1306 {
1307         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1308
1309         /* Read Local Version */
1310         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1311
1312         /* Read Local Supported Commands */
1313         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1314
1315         /* Read Local Supported Features */
1316         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1317
1318         /* Read Local AMP Info */
1319         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1320
1321         /* Read Data Blk size */
1322         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1323
1324         /* Read Flow Control Mode */
1325         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1326
1327         /* Read Location Data */
1328         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1329 }
1330
1331 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1332 {
1333         struct hci_dev *hdev = req->hdev;
1334
1335         BT_DBG("%s %ld", hdev->name, opt);
1336
1337         /* Reset */
1338         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1339                 hci_reset_req(req, 0);
1340
1341         switch (hdev->dev_type) {
1342         case HCI_BREDR:
1343                 bredr_init(req);
1344                 break;
1345
1346         case HCI_AMP:
1347                 amp_init(req);
1348                 break;
1349
1350         default:
1351                 BT_ERR("Unknown device type %d", hdev->dev_type);
1352                 break;
1353         }
1354 }
1355
1356 static void bredr_setup(struct hci_request *req)
1357 {
1358         struct hci_dev *hdev = req->hdev;
1359
1360         __le16 param;
1361         __u8 flt_type;
1362
1363         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1364         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1365
1366         /* Read Class of Device */
1367         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1368
1369         /* Read Local Name */
1370         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1371
1372         /* Read Voice Setting */
1373         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1374
1375         /* Read Number of Supported IAC */
1376         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1377
1378         /* Read Current IAC LAP */
1379         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1380
1381         /* Clear Event Filters */
1382         flt_type = HCI_FLT_CLEAR_ALL;
1383         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1384
1385         /* Connection accept timeout ~20 secs */
1386         param = cpu_to_le16(0x7d00);
1387         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1388
1389         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1390          * but it does not support page scan related HCI commands.
1391          */
1392         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1393                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1394                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1395         }
1396 }
1397
1398 static void le_setup(struct hci_request *req)
1399 {
1400         struct hci_dev *hdev = req->hdev;
1401
1402         /* Read LE Buffer Size */
1403         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1404
1405         /* Read LE Local Supported Features */
1406         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1407
1408         /* Read LE Supported States */
1409         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1410
1411         /* Read LE White List Size */
1412         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1413
1414         /* Clear LE White List */
1415         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1416
1417         /* LE-only controllers have LE implicitly enabled */
1418         if (!lmp_bredr_capable(hdev))
1419                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1420 }
1421
1422 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1423 {
1424         if (lmp_ext_inq_capable(hdev))
1425                 return 0x02;
1426
1427         if (lmp_inq_rssi_capable(hdev))
1428                 return 0x01;
1429
1430         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1431             hdev->lmp_subver == 0x0757)
1432                 return 0x01;
1433
1434         if (hdev->manufacturer == 15) {
1435                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1436                         return 0x01;
1437                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1438                         return 0x01;
1439                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1440                         return 0x01;
1441         }
1442
1443         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1444             hdev->lmp_subver == 0x1805)
1445                 return 0x01;
1446
1447         return 0x00;
1448 }
1449
1450 static void hci_setup_inquiry_mode(struct hci_request *req)
1451 {
1452         u8 mode;
1453
1454         mode = hci_get_inquiry_mode(req->hdev);
1455
1456         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1457 }
1458
1459 static void hci_setup_event_mask(struct hci_request *req)
1460 {
1461         struct hci_dev *hdev = req->hdev;
1462
1463         /* The second byte is 0xff instead of 0x9f (two reserved bits
1464          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1465          * command otherwise.
1466          */
1467         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1468
1469         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1470          * any event mask for pre 1.2 devices.
1471          */
1472         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1473                 return;
1474
1475         if (lmp_bredr_capable(hdev)) {
1476                 events[4] |= 0x01; /* Flow Specification Complete */
1477                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1478                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1479                 events[5] |= 0x08; /* Synchronous Connection Complete */
1480                 events[5] |= 0x10; /* Synchronous Connection Changed */
1481         } else {
1482                 /* Use a different default for LE-only devices */
1483                 memset(events, 0, sizeof(events));
1484                 events[0] |= 0x10; /* Disconnection Complete */
1485                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1486                 events[1] |= 0x20; /* Command Complete */
1487                 events[1] |= 0x40; /* Command Status */
1488                 events[1] |= 0x80; /* Hardware Error */
1489                 events[2] |= 0x04; /* Number of Completed Packets */
1490                 events[3] |= 0x02; /* Data Buffer Overflow */
1491
1492                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1493                         events[0] |= 0x80; /* Encryption Change */
1494                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
1495                 }
1496         }
1497
1498         if (lmp_inq_rssi_capable(hdev))
1499                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1500
1501         if (lmp_sniffsubr_capable(hdev))
1502                 events[5] |= 0x20; /* Sniff Subrating */
1503
1504         if (lmp_pause_enc_capable(hdev))
1505                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1506
1507         if (lmp_ext_inq_capable(hdev))
1508                 events[5] |= 0x40; /* Extended Inquiry Result */
1509
1510         if (lmp_no_flush_capable(hdev))
1511                 events[7] |= 0x01; /* Enhanced Flush Complete */
1512
1513         if (lmp_lsto_capable(hdev))
1514                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1515
1516         if (lmp_ssp_capable(hdev)) {
1517                 events[6] |= 0x01;      /* IO Capability Request */
1518                 events[6] |= 0x02;      /* IO Capability Response */
1519                 events[6] |= 0x04;      /* User Confirmation Request */
1520                 events[6] |= 0x08;      /* User Passkey Request */
1521                 events[6] |= 0x10;      /* Remote OOB Data Request */
1522                 events[6] |= 0x20;      /* Simple Pairing Complete */
1523                 events[7] |= 0x04;      /* User Passkey Notification */
1524                 events[7] |= 0x08;      /* Keypress Notification */
1525                 events[7] |= 0x10;      /* Remote Host Supported
1526                                          * Features Notification
1527                                          */
1528         }
1529
1530         if (lmp_le_capable(hdev))
1531                 events[7] |= 0x20;      /* LE Meta-Event */
1532
1533         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1534 }
1535
1536 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1537 {
1538         struct hci_dev *hdev = req->hdev;
1539
1540         if (lmp_bredr_capable(hdev))
1541                 bredr_setup(req);
1542         else
1543                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1544
1545         if (lmp_le_capable(hdev))
1546                 le_setup(req);
1547
1548         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1549          * local supported commands HCI command.
1550          */
1551         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1552                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1553
1554         if (lmp_ssp_capable(hdev)) {
1555                 /* When SSP is available, then the host features page
1556                  * should also be available as well. However some
1557                  * controllers list the max_page as 0 as long as SSP
1558                  * has not been enabled. To achieve proper debugging
1559                  * output, force the minimum max_page to 1 at least.
1560                  */
1561                 hdev->max_page = 0x01;
1562
1563                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1564                         u8 mode = 0x01;
1565                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1566                                     sizeof(mode), &mode);
1567                 } else {
1568                         struct hci_cp_write_eir cp;
1569
1570                         memset(hdev->eir, 0, sizeof(hdev->eir));
1571                         memset(&cp, 0, sizeof(cp));
1572
1573                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1574                 }
1575         }
1576
1577         if (lmp_inq_rssi_capable(hdev))
1578                 hci_setup_inquiry_mode(req);
1579
1580         if (lmp_inq_tx_pwr_capable(hdev))
1581                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1582
1583         if (lmp_ext_feat_capable(hdev)) {
1584                 struct hci_cp_read_local_ext_features cp;
1585
1586                 cp.page = 0x01;
1587                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1588                             sizeof(cp), &cp);
1589         }
1590
1591         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1592                 u8 enable = 1;
1593                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1594                             &enable);
1595         }
1596 }
1597
1598 static void hci_setup_link_policy(struct hci_request *req)
1599 {
1600         struct hci_dev *hdev = req->hdev;
1601         struct hci_cp_write_def_link_policy cp;
1602         u16 link_policy = 0;
1603
1604         if (lmp_rswitch_capable(hdev))
1605                 link_policy |= HCI_LP_RSWITCH;
1606         if (lmp_hold_capable(hdev))
1607                 link_policy |= HCI_LP_HOLD;
1608         if (lmp_sniff_capable(hdev))
1609                 link_policy |= HCI_LP_SNIFF;
1610         if (lmp_park_capable(hdev))
1611                 link_policy |= HCI_LP_PARK;
1612
1613         cp.policy = cpu_to_le16(link_policy);
1614         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1615 }
1616
1617 static void hci_set_le_support(struct hci_request *req)
1618 {
1619         struct hci_dev *hdev = req->hdev;
1620         struct hci_cp_write_le_host_supported cp;
1621
1622         /* LE-only devices do not support explicit enablement */
1623         if (!lmp_bredr_capable(hdev))
1624                 return;
1625
1626         memset(&cp, 0, sizeof(cp));
1627
1628         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1629                 cp.le = 0x01;
1630                 cp.simul = 0x00;
1631         }
1632
1633         if (cp.le != lmp_host_le_capable(hdev))
1634                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1635                             &cp);
1636 }
1637
1638 static void hci_set_event_mask_page_2(struct hci_request *req)
1639 {
1640         struct hci_dev *hdev = req->hdev;
1641         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1642
1643         /* If Connectionless Slave Broadcast master role is supported
1644          * enable all necessary events for it.
1645          */
1646         if (lmp_csb_master_capable(hdev)) {
1647                 events[1] |= 0x40;      /* Triggered Clock Capture */
1648                 events[1] |= 0x80;      /* Synchronization Train Complete */
1649                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1650                 events[2] |= 0x20;      /* CSB Channel Map Change */
1651         }
1652
1653         /* If Connectionless Slave Broadcast slave role is supported
1654          * enable all necessary events for it.
1655          */
1656         if (lmp_csb_slave_capable(hdev)) {
1657                 events[2] |= 0x01;      /* Synchronization Train Received */
1658                 events[2] |= 0x02;      /* CSB Receive */
1659                 events[2] |= 0x04;      /* CSB Timeout */
1660                 events[2] |= 0x08;      /* Truncated Page Complete */
1661         }
1662
1663         /* Enable Authenticated Payload Timeout Expired event if supported */
1664         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1665                 events[2] |= 0x80;
1666
1667         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1668 }
1669
1670 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1671 {
1672         struct hci_dev *hdev = req->hdev;
1673         u8 p;
1674
1675         hci_setup_event_mask(req);
1676
1677         /* Some Broadcom based Bluetooth controllers do not support the
1678          * Delete Stored Link Key command. They are clearly indicating its
1679          * absence in the bit mask of supported commands.
1680          *
1681          * Check the supported commands and only if the the command is marked
1682          * as supported send it. If not supported assume that the controller
1683          * does not have actual support for stored link keys which makes this
1684          * command redundant anyway.
1685          *
1686          * Some controllers indicate that they support handling deleting
1687          * stored link keys, but they don't. The quirk lets a driver
1688          * just disable this command.
1689          */
1690         if (hdev->commands[6] & 0x80 &&
1691             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1692                 struct hci_cp_delete_stored_link_key cp;
1693
1694                 bacpy(&cp.bdaddr, BDADDR_ANY);
1695                 cp.delete_all = 0x01;
1696                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1697                             sizeof(cp), &cp);
1698         }
1699
1700         if (hdev->commands[5] & 0x10)
1701                 hci_setup_link_policy(req);
1702
1703         if (lmp_le_capable(hdev)) {
1704                 u8 events[8];
1705
1706                 memset(events, 0, sizeof(events));
1707                 events[0] = 0x0f;
1708
1709                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1710                         events[0] |= 0x10;      /* LE Long Term Key Request */
1711
1712                 /* If controller supports the Connection Parameters Request
1713                  * Link Layer Procedure, enable the corresponding event.
1714                  */
1715                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1716                         events[0] |= 0x20;      /* LE Remote Connection
1717                                                  * Parameter Request
1718                                                  */
1719
1720                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1721                             events);
1722
1723                 if (hdev->commands[25] & 0x40) {
1724                         /* Read LE Advertising Channel TX Power */
1725                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1726                 }
1727
1728                 hci_set_le_support(req);
1729         }
1730
1731         /* Read features beyond page 1 if available */
1732         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1733                 struct hci_cp_read_local_ext_features cp;
1734
1735                 cp.page = p;
1736                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1737                             sizeof(cp), &cp);
1738         }
1739 }
1740
1741 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1742 {
1743         struct hci_dev *hdev = req->hdev;
1744
1745         /* Set event mask page 2 if the HCI command for it is supported */
1746         if (hdev->commands[22] & 0x04)
1747                 hci_set_event_mask_page_2(req);
1748
1749         /* Read local codec list if the HCI command is supported */
1750         if (hdev->commands[29] & 0x20)
1751                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1752
1753         /* Get MWS transport configuration if the HCI command is supported */
1754         if (hdev->commands[30] & 0x08)
1755                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1756
1757         /* Check for Synchronization Train support */
1758         if (lmp_sync_train_capable(hdev))
1759                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1760
1761         /* Enable Secure Connections if supported and configured */
1762         if ((lmp_sc_capable(hdev) ||
1763              test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1764             test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1765                 u8 support = 0x01;
1766                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1767                             sizeof(support), &support);
1768         }
1769 }
1770
1771 static int __hci_init(struct hci_dev *hdev)
1772 {
1773         int err;
1774
1775         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1776         if (err < 0)
1777                 return err;
1778
1779         /* The Device Under Test (DUT) mode is special and available for
1780          * all controller types. So just create it early on.
1781          */
1782         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1783                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1784                                     &dut_mode_fops);
1785         }
1786
1787         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1788          * BR/EDR/LE type controllers. AMP controllers only need the
1789          * first stage init.
1790          */
1791         if (hdev->dev_type != HCI_BREDR)
1792                 return 0;
1793
1794         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1795         if (err < 0)
1796                 return err;
1797
1798         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1799         if (err < 0)
1800                 return err;
1801
1802         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1803         if (err < 0)
1804                 return err;
1805
1806         /* Only create debugfs entries during the initial setup
1807          * phase and not every time the controller gets powered on.
1808          */
1809         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1810                 return 0;
1811
1812         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1813                             &features_fops);
1814         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1815                            &hdev->manufacturer);
1816         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1817         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1818         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1819                             &blacklist_fops);
1820         debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1821                             &whitelist_fops);
1822         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1823
1824         debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1825                             &conn_info_min_age_fops);
1826         debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1827                             &conn_info_max_age_fops);
1828
1829         if (lmp_bredr_capable(hdev)) {
1830                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1831                                     hdev, &inquiry_cache_fops);
1832                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1833                                     hdev, &link_keys_fops);
1834                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1835                                     hdev, &dev_class_fops);
1836                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1837                                     hdev, &voice_setting_fops);
1838         }
1839
1840         if (lmp_ssp_capable(hdev)) {
1841                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1842                                     hdev, &auto_accept_delay_fops);
1843                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1844                                     hdev, &force_sc_support_fops);
1845                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1846                                     hdev, &sc_only_mode_fops);
1847         }
1848
1849         if (lmp_sniff_capable(hdev)) {
1850                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1851                                     hdev, &idle_timeout_fops);
1852                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1853                                     hdev, &sniff_min_interval_fops);
1854                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1855                                     hdev, &sniff_max_interval_fops);
1856         }
1857
1858         if (lmp_le_capable(hdev)) {
1859                 debugfs_create_file("identity", 0400, hdev->debugfs,
1860                                     hdev, &identity_fops);
1861                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1862                                     hdev, &rpa_timeout_fops);
1863                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1864                                     hdev, &random_address_fops);
1865                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1866                                     hdev, &static_address_fops);
1867
1868                 /* For controllers with a public address, provide a debug
1869                  * option to force the usage of the configured static
1870                  * address. By default the public address is used.
1871                  */
1872                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1873                         debugfs_create_file("force_static_address", 0644,
1874                                             hdev->debugfs, hdev,
1875                                             &force_static_address_fops);
1876
1877                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1878                                   &hdev->le_white_list_size);
1879                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1880                                     &white_list_fops);
1881                 debugfs_create_file("identity_resolving_keys", 0400,
1882                                     hdev->debugfs, hdev,
1883                                     &identity_resolving_keys_fops);
1884                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1885                                     hdev, &long_term_keys_fops);
1886                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1887                                     hdev, &conn_min_interval_fops);
1888                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1889                                     hdev, &conn_max_interval_fops);
1890                 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1891                                     hdev, &conn_latency_fops);
1892                 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1893                                     hdev, &supervision_timeout_fops);
1894                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1895                                     hdev, &adv_channel_map_fops);
1896                 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1897                                     hdev, &adv_min_interval_fops);
1898                 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1899                                     hdev, &adv_max_interval_fops);
1900                 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1901                                     &device_list_fops);
1902                 debugfs_create_u16("discov_interleaved_timeout", 0644,
1903                                    hdev->debugfs,
1904                                    &hdev->discov_interleaved_timeout);
1905
1906                 smp_register(hdev);
1907         }
1908
1909         return 0;
1910 }
1911
1912 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1913 {
1914         struct hci_dev *hdev = req->hdev;
1915
1916         BT_DBG("%s %ld", hdev->name, opt);
1917
1918         /* Reset */
1919         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1920                 hci_reset_req(req, 0);
1921
1922         /* Read Local Version */
1923         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1924
1925         /* Read BD Address */
1926         if (hdev->set_bdaddr)
1927                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1928 }
1929
1930 static int __hci_unconf_init(struct hci_dev *hdev)
1931 {
1932         int err;
1933
1934         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1935                 return 0;
1936
1937         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1938         if (err < 0)
1939                 return err;
1940
1941         return 0;
1942 }
1943
1944 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1945 {
1946         __u8 scan = opt;
1947
1948         BT_DBG("%s %x", req->hdev->name, scan);
1949
1950         /* Inquiry and Page scans */
1951         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1952 }
1953
1954 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1955 {
1956         __u8 auth = opt;
1957
1958         BT_DBG("%s %x", req->hdev->name, auth);
1959
1960         /* Authentication */
1961         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1962 }
1963
1964 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1965 {
1966         __u8 encrypt = opt;
1967
1968         BT_DBG("%s %x", req->hdev->name, encrypt);
1969
1970         /* Encryption */
1971         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1972 }
1973
1974 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1975 {
1976         __le16 policy = cpu_to_le16(opt);
1977
1978         BT_DBG("%s %x", req->hdev->name, policy);
1979
1980         /* Default link policy */
1981         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1982 }
1983
1984 /* Get HCI device by index.
1985  * Device is held on return. */
1986 struct hci_dev *hci_dev_get(int index)
1987 {
1988         struct hci_dev *hdev = NULL, *d;
1989
1990         BT_DBG("%d", index);
1991
1992         if (index < 0)
1993                 return NULL;
1994
1995         read_lock(&hci_dev_list_lock);
1996         list_for_each_entry(d, &hci_dev_list, list) {
1997                 if (d->id == index) {
1998                         hdev = hci_dev_hold(d);
1999                         break;
2000                 }
2001         }
2002         read_unlock(&hci_dev_list_lock);
2003         return hdev;
2004 }
2005
2006 /* ---- Inquiry support ---- */
2007
2008 bool hci_discovery_active(struct hci_dev *hdev)
2009 {
2010         struct discovery_state *discov = &hdev->discovery;
2011
2012         switch (discov->state) {
2013         case DISCOVERY_FINDING:
2014         case DISCOVERY_RESOLVING:
2015                 return true;
2016
2017         default:
2018                 return false;
2019         }
2020 }
2021
2022 void hci_discovery_set_state(struct hci_dev *hdev, int state)
2023 {
2024         int old_state = hdev->discovery.state;
2025
2026         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2027
2028         if (old_state == state)
2029                 return;
2030
2031         hdev->discovery.state = state;
2032
2033         switch (state) {
2034         case DISCOVERY_STOPPED:
2035                 hci_update_background_scan(hdev);
2036
2037                 if (old_state != DISCOVERY_STARTING)
2038                         mgmt_discovering(hdev, 0);
2039                 break;
2040         case DISCOVERY_STARTING:
2041                 break;
2042         case DISCOVERY_FINDING:
2043                 mgmt_discovering(hdev, 1);
2044                 break;
2045         case DISCOVERY_RESOLVING:
2046                 break;
2047         case DISCOVERY_STOPPING:
2048                 break;
2049         }
2050 }
2051
2052 void hci_inquiry_cache_flush(struct hci_dev *hdev)
2053 {
2054         struct discovery_state *cache = &hdev->discovery;
2055         struct inquiry_entry *p, *n;
2056
2057         list_for_each_entry_safe(p, n, &cache->all, all) {
2058                 list_del(&p->all);
2059                 kfree(p);
2060         }
2061
2062         INIT_LIST_HEAD(&cache->unknown);
2063         INIT_LIST_HEAD(&cache->resolve);
2064 }
2065
2066 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2067                                                bdaddr_t *bdaddr)
2068 {
2069         struct discovery_state *cache = &hdev->discovery;
2070         struct inquiry_entry *e;
2071
2072         BT_DBG("cache %p, %pMR", cache, bdaddr);
2073
2074         list_for_each_entry(e, &cache->all, all) {
2075                 if (!bacmp(&e->data.bdaddr, bdaddr))
2076                         return e;
2077         }
2078
2079         return NULL;
2080 }
2081
2082 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2083                                                        bdaddr_t *bdaddr)
2084 {
2085         struct discovery_state *cache = &hdev->discovery;
2086         struct inquiry_entry *e;
2087
2088         BT_DBG("cache %p, %pMR", cache, bdaddr);
2089
2090         list_for_each_entry(e, &cache->unknown, list) {
2091                 if (!bacmp(&e->data.bdaddr, bdaddr))
2092                         return e;
2093         }
2094
2095         return NULL;
2096 }
2097
2098 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2099                                                        bdaddr_t *bdaddr,
2100                                                        int state)
2101 {
2102         struct discovery_state *cache = &hdev->discovery;
2103         struct inquiry_entry *e;
2104
2105         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2106
2107         list_for_each_entry(e, &cache->resolve, list) {
2108                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2109                         return e;
2110                 if (!bacmp(&e->data.bdaddr, bdaddr))
2111                         return e;
2112         }
2113
2114         return NULL;
2115 }
2116
2117 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2118                                       struct inquiry_entry *ie)
2119 {
2120         struct discovery_state *cache = &hdev->discovery;
2121         struct list_head *pos = &cache->resolve;
2122         struct inquiry_entry *p;
2123
2124         list_del(&ie->list);
2125
2126         list_for_each_entry(p, &cache->resolve, list) {
2127                 if (p->name_state != NAME_PENDING &&
2128                     abs(p->data.rssi) >= abs(ie->data.rssi))
2129                         break;
2130                 pos = &p->list;
2131         }
2132
2133         list_add(&ie->list, pos);
2134 }
2135
2136 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2137                              bool name_known)
2138 {
2139         struct discovery_state *cache = &hdev->discovery;
2140         struct inquiry_entry *ie;
2141         u32 flags = 0;
2142
2143         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2144
2145         hci_remove_remote_oob_data(hdev, &data->bdaddr);
2146
2147         if (!data->ssp_mode)
2148                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2149
2150         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2151         if (ie) {
2152                 if (!ie->data.ssp_mode)
2153                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2154
2155                 if (ie->name_state == NAME_NEEDED &&
2156                     data->rssi != ie->data.rssi) {
2157                         ie->data.rssi = data->rssi;
2158                         hci_inquiry_cache_update_resolve(hdev, ie);
2159                 }
2160
2161                 goto update;
2162         }
2163
2164         /* Entry not in the cache. Add new one. */
2165         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
2166         if (!ie) {
2167                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2168                 goto done;
2169         }
2170
2171         list_add(&ie->all, &cache->all);
2172
2173         if (name_known) {
2174                 ie->name_state = NAME_KNOWN;
2175         } else {
2176                 ie->name_state = NAME_NOT_KNOWN;
2177                 list_add(&ie->list, &cache->unknown);
2178         }
2179
2180 update:
2181         if (name_known && ie->name_state != NAME_KNOWN &&
2182             ie->name_state != NAME_PENDING) {
2183                 ie->name_state = NAME_KNOWN;
2184                 list_del(&ie->list);
2185         }
2186
2187         memcpy(&ie->data, data, sizeof(*data));
2188         ie->timestamp = jiffies;
2189         cache->timestamp = jiffies;
2190
2191         if (ie->name_state == NAME_NOT_KNOWN)
2192                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2193
2194 done:
2195         return flags;
2196 }
2197
2198 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2199 {
2200         struct discovery_state *cache = &hdev->discovery;
2201         struct inquiry_info *info = (struct inquiry_info *) buf;
2202         struct inquiry_entry *e;
2203         int copied = 0;
2204
2205         list_for_each_entry(e, &cache->all, all) {
2206                 struct inquiry_data *data = &e->data;
2207
2208                 if (copied >= num)
2209                         break;
2210
2211                 bacpy(&info->bdaddr, &data->bdaddr);
2212                 info->pscan_rep_mode    = data->pscan_rep_mode;
2213                 info->pscan_period_mode = data->pscan_period_mode;
2214                 info->pscan_mode        = data->pscan_mode;
2215                 memcpy(info->dev_class, data->dev_class, 3);
2216                 info->clock_offset      = data->clock_offset;
2217
2218                 info++;
2219                 copied++;
2220         }
2221
2222         BT_DBG("cache %p, copied %d", cache, copied);
2223         return copied;
2224 }
2225
2226 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2227 {
2228         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2229         struct hci_dev *hdev = req->hdev;
2230         struct hci_cp_inquiry cp;
2231
2232         BT_DBG("%s", hdev->name);
2233
2234         if (test_bit(HCI_INQUIRY, &hdev->flags))
2235                 return;
2236
2237         /* Start Inquiry */
2238         memcpy(&cp.lap, &ir->lap, 3);
2239         cp.length  = ir->length;
2240         cp.num_rsp = ir->num_rsp;
2241         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2242 }
2243
2244 int hci_inquiry(void __user *arg)
2245 {
2246         __u8 __user *ptr = arg;
2247         struct hci_inquiry_req ir;
2248         struct hci_dev *hdev;
2249         int err = 0, do_inquiry = 0, max_rsp;
2250         long timeo;
2251         __u8 *buf;
2252
2253         if (copy_from_user(&ir, ptr, sizeof(ir)))
2254                 return -EFAULT;
2255
2256         hdev = hci_dev_get(ir.dev_id);
2257         if (!hdev)
2258                 return -ENODEV;
2259
2260         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2261                 err = -EBUSY;
2262                 goto done;
2263         }
2264
2265         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2266                 err = -EOPNOTSUPP;
2267                 goto done;
2268         }
2269
2270         if (hdev->dev_type != HCI_BREDR) {
2271                 err = -EOPNOTSUPP;
2272                 goto done;
2273         }
2274
2275         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2276                 err = -EOPNOTSUPP;
2277                 goto done;
2278         }
2279
2280         hci_dev_lock(hdev);
2281         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2282             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2283                 hci_inquiry_cache_flush(hdev);
2284                 do_inquiry = 1;
2285         }
2286         hci_dev_unlock(hdev);
2287
2288         timeo = ir.length * msecs_to_jiffies(2000);
2289
2290         if (do_inquiry) {
2291                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2292                                    timeo);
2293                 if (err < 0)
2294                         goto done;
2295
2296                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2297                  * cleared). If it is interrupted by a signal, return -EINTR.
2298                  */
2299                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
2300                                 TASK_INTERRUPTIBLE))
2301                         return -EINTR;
2302         }
2303
2304         /* for unlimited number of responses we will use buffer with
2305          * 255 entries
2306          */
2307         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2308
2309         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2310          * copy it to the user space.
2311          */
2312         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2313         if (!buf) {
2314                 err = -ENOMEM;
2315                 goto done;
2316         }
2317
2318         hci_dev_lock(hdev);
2319         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2320         hci_dev_unlock(hdev);
2321
2322         BT_DBG("num_rsp %d", ir.num_rsp);
2323
2324         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2325                 ptr += sizeof(ir);
2326                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2327                                  ir.num_rsp))
2328                         err = -EFAULT;
2329         } else
2330                 err = -EFAULT;
2331
2332         kfree(buf);
2333
2334 done:
2335         hci_dev_put(hdev);
2336         return err;
2337 }
2338
2339 static int hci_dev_do_open(struct hci_dev *hdev)
2340 {
2341         int ret = 0;
2342
2343         BT_DBG("%s %p", hdev->name, hdev);
2344
2345         hci_req_lock(hdev);
2346
2347         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2348                 ret = -ENODEV;
2349                 goto done;
2350         }
2351
2352         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2353             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2354                 /* Check for rfkill but allow the HCI setup stage to
2355                  * proceed (which in itself doesn't cause any RF activity).
2356                  */
2357                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2358                         ret = -ERFKILL;
2359                         goto done;
2360                 }
2361
2362                 /* Check for valid public address or a configured static
2363                  * random adddress, but let the HCI setup proceed to
2364                  * be able to determine if there is a public address
2365                  * or not.
2366                  *
2367                  * In case of user channel usage, it is not important
2368                  * if a public address or static random address is
2369                  * available.
2370                  *
2371                  * This check is only valid for BR/EDR controllers
2372                  * since AMP controllers do not have an address.
2373                  */
2374                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2375                     hdev->dev_type == HCI_BREDR &&
2376                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2377                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2378                         ret = -EADDRNOTAVAIL;
2379                         goto done;
2380                 }
2381         }
2382
2383         if (test_bit(HCI_UP, &hdev->flags)) {
2384                 ret = -EALREADY;
2385                 goto done;
2386         }
2387
2388         if (hdev->open(hdev)) {
2389                 ret = -EIO;
2390                 goto done;
2391         }
2392
2393         atomic_set(&hdev->cmd_cnt, 1);
2394         set_bit(HCI_INIT, &hdev->flags);
2395
2396         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2397                 if (hdev->setup)
2398                         ret = hdev->setup(hdev);
2399
2400                 /* The transport driver can set these quirks before
2401                  * creating the HCI device or in its setup callback.
2402                  *
2403                  * In case any of them is set, the controller has to
2404                  * start up as unconfigured.
2405                  */
2406                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2407                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2408                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2409
2410                 /* For an unconfigured controller it is required to
2411                  * read at least the version information provided by
2412                  * the Read Local Version Information command.
2413                  *
2414                  * If the set_bdaddr driver callback is provided, then
2415                  * also the original Bluetooth public device address
2416                  * will be read using the Read BD Address command.
2417                  */
2418                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2419                         ret = __hci_unconf_init(hdev);
2420         }
2421
2422         if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2423                 /* If public address change is configured, ensure that
2424                  * the address gets programmed. If the driver does not
2425                  * support changing the public address, fail the power
2426                  * on procedure.
2427                  */
2428                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2429                     hdev->set_bdaddr)
2430                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2431                 else
2432                         ret = -EADDRNOTAVAIL;
2433         }
2434
2435         if (!ret) {
2436                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2437                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2438                         ret = __hci_init(hdev);
2439         }
2440
2441         clear_bit(HCI_INIT, &hdev->flags);
2442
2443         if (!ret) {
2444                 hci_dev_hold(hdev);
2445                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2446                 set_bit(HCI_UP, &hdev->flags);
2447                 hci_notify(hdev, HCI_DEV_UP);
2448                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2449                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2450                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2451                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2452                     hdev->dev_type == HCI_BREDR) {
2453                         hci_dev_lock(hdev);
2454                         mgmt_powered(hdev, 1);
2455                         hci_dev_unlock(hdev);
2456                 }
2457         } else {
2458                 /* Init failed, cleanup */
2459                 flush_work(&hdev->tx_work);
2460                 flush_work(&hdev->cmd_work);
2461                 flush_work(&hdev->rx_work);
2462
2463                 skb_queue_purge(&hdev->cmd_q);
2464                 skb_queue_purge(&hdev->rx_q);
2465
2466                 if (hdev->flush)
2467                         hdev->flush(hdev);
2468
2469                 if (hdev->sent_cmd) {
2470                         kfree_skb(hdev->sent_cmd);
2471                         hdev->sent_cmd = NULL;
2472                 }
2473
2474                 hdev->close(hdev);
2475                 hdev->flags &= BIT(HCI_RAW);
2476         }
2477
2478 done:
2479         hci_req_unlock(hdev);
2480         return ret;
2481 }
2482
2483 /* ---- HCI ioctl helpers ---- */
2484
2485 int hci_dev_open(__u16 dev)
2486 {
2487         struct hci_dev *hdev;
2488         int err;
2489
2490         hdev = hci_dev_get(dev);
2491         if (!hdev)
2492                 return -ENODEV;
2493
2494         /* Devices that are marked as unconfigured can only be powered
2495          * up as user channel. Trying to bring them up as normal devices
2496          * will result into a failure. Only user channel operation is
2497          * possible.
2498          *
2499          * When this function is called for a user channel, the flag
2500          * HCI_USER_CHANNEL will be set first before attempting to
2501          * open the device.
2502          */
2503         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2504             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2505                 err = -EOPNOTSUPP;
2506                 goto done;
2507         }
2508
2509         /* We need to ensure that no other power on/off work is pending
2510          * before proceeding to call hci_dev_do_open. This is
2511          * particularly important if the setup procedure has not yet
2512          * completed.
2513          */
2514         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2515                 cancel_delayed_work(&hdev->power_off);
2516
2517         /* After this call it is guaranteed that the setup procedure
2518          * has finished. This means that error conditions like RFKILL
2519          * or no valid public or static random address apply.
2520          */
2521         flush_workqueue(hdev->req_workqueue);
2522
2523         /* For controllers not using the management interface and that
2524          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
2525          * so that pairing works for them. Once the management interface
2526          * is in use this bit will be cleared again and userspace has
2527          * to explicitly enable it.
2528          */
2529         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2530             !test_bit(HCI_MGMT, &hdev->dev_flags))
2531                 set_bit(HCI_BONDABLE, &hdev->dev_flags);
2532
2533         err = hci_dev_do_open(hdev);
2534
2535 done:
2536         hci_dev_put(hdev);
2537         return err;
2538 }
2539
2540 /* This function requires the caller holds hdev->lock */
2541 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2542 {
2543         struct hci_conn_params *p;
2544
2545         list_for_each_entry(p, &hdev->le_conn_params, list) {
2546                 if (p->conn) {
2547                         hci_conn_drop(p->conn);
2548                         hci_conn_put(p->conn);
2549                         p->conn = NULL;
2550                 }
2551                 list_del_init(&p->action);
2552         }
2553
2554         BT_DBG("All LE pending actions cleared");
2555 }
2556
2557 static int hci_dev_do_close(struct hci_dev *hdev)
2558 {
2559         BT_DBG("%s %p", hdev->name, hdev);
2560
2561         cancel_delayed_work(&hdev->power_off);
2562
2563         hci_req_cancel(hdev, ENODEV);
2564         hci_req_lock(hdev);
2565
2566         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2567                 cancel_delayed_work_sync(&hdev->cmd_timer);
2568                 hci_req_unlock(hdev);
2569                 return 0;
2570         }
2571
2572         /* Flush RX and TX works */
2573         flush_work(&hdev->tx_work);
2574         flush_work(&hdev->rx_work);
2575
2576         if (hdev->discov_timeout > 0) {
2577                 cancel_delayed_work(&hdev->discov_off);
2578                 hdev->discov_timeout = 0;
2579                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2580                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2581         }
2582
2583         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2584                 cancel_delayed_work(&hdev->service_cache);
2585
2586         cancel_delayed_work_sync(&hdev->le_scan_disable);
2587
2588         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2589                 cancel_delayed_work_sync(&hdev->rpa_expired);
2590
2591         hci_dev_lock(hdev);
2592         hci_inquiry_cache_flush(hdev);
2593         hci_pend_le_actions_clear(hdev);
2594         hci_conn_hash_flush(hdev);
2595         hci_dev_unlock(hdev);
2596
2597         hci_notify(hdev, HCI_DEV_DOWN);
2598
2599         if (hdev->flush)
2600                 hdev->flush(hdev);
2601
2602         /* Reset device */
2603         skb_queue_purge(&hdev->cmd_q);
2604         atomic_set(&hdev->cmd_cnt, 1);
2605         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2606             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2607             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2608                 set_bit(HCI_INIT, &hdev->flags);
2609                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2610                 clear_bit(HCI_INIT, &hdev->flags);
2611         }
2612
2613         /* flush cmd  work */
2614         flush_work(&hdev->cmd_work);
2615
2616         /* Drop queues */
2617         skb_queue_purge(&hdev->rx_q);
2618         skb_queue_purge(&hdev->cmd_q);
2619         skb_queue_purge(&hdev->raw_q);
2620
2621         /* Drop last sent command */
2622         if (hdev->sent_cmd) {
2623                 cancel_delayed_work_sync(&hdev->cmd_timer);
2624                 kfree_skb(hdev->sent_cmd);
2625                 hdev->sent_cmd = NULL;
2626         }
2627
2628         kfree_skb(hdev->recv_evt);
2629         hdev->recv_evt = NULL;
2630
2631         /* After this point our queues are empty
2632          * and no tasks are scheduled. */
2633         hdev->close(hdev);
2634
2635         /* Clear flags */
2636         hdev->flags &= BIT(HCI_RAW);
2637         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2638
2639         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2640                 if (hdev->dev_type == HCI_BREDR) {
2641                         hci_dev_lock(hdev);
2642                         mgmt_powered(hdev, 0);
2643                         hci_dev_unlock(hdev);
2644                 }
2645         }
2646
2647         /* Controller radio is available but is currently powered down */
2648         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2649
2650         memset(hdev->eir, 0, sizeof(hdev->eir));
2651         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2652         bacpy(&hdev->random_addr, BDADDR_ANY);
2653
2654         hci_req_unlock(hdev);
2655
2656         hci_dev_put(hdev);
2657         return 0;
2658 }
2659
2660 int hci_dev_close(__u16 dev)
2661 {
2662         struct hci_dev *hdev;
2663         int err;
2664
2665         hdev = hci_dev_get(dev);
2666         if (!hdev)
2667                 return -ENODEV;
2668
2669         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2670                 err = -EBUSY;
2671                 goto done;
2672         }
2673
2674         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2675                 cancel_delayed_work(&hdev->power_off);
2676
2677         err = hci_dev_do_close(hdev);
2678
2679 done:
2680         hci_dev_put(hdev);
2681         return err;
2682 }
2683
2684 int hci_dev_reset(__u16 dev)
2685 {
2686         struct hci_dev *hdev;
2687         int ret = 0;
2688
2689         hdev = hci_dev_get(dev);
2690         if (!hdev)
2691                 return -ENODEV;
2692
2693         hci_req_lock(hdev);
2694
2695         if (!test_bit(HCI_UP, &hdev->flags)) {
2696                 ret = -ENETDOWN;
2697                 goto done;
2698         }
2699
2700         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2701                 ret = -EBUSY;
2702                 goto done;
2703         }
2704
2705         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2706                 ret = -EOPNOTSUPP;
2707                 goto done;
2708         }
2709
2710         /* Drop queues */
2711         skb_queue_purge(&hdev->rx_q);
2712         skb_queue_purge(&hdev->cmd_q);
2713
2714         hci_dev_lock(hdev);
2715         hci_inquiry_cache_flush(hdev);
2716         hci_conn_hash_flush(hdev);
2717         hci_dev_unlock(hdev);
2718
2719         if (hdev->flush)
2720                 hdev->flush(hdev);
2721
2722         atomic_set(&hdev->cmd_cnt, 1);
2723         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2724
2725         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2726
2727 done:
2728         hci_req_unlock(hdev);
2729         hci_dev_put(hdev);
2730         return ret;
2731 }
2732
2733 int hci_dev_reset_stat(__u16 dev)
2734 {
2735         struct hci_dev *hdev;
2736         int ret = 0;
2737
2738         hdev = hci_dev_get(dev);
2739         if (!hdev)
2740                 return -ENODEV;
2741
2742         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2743                 ret = -EBUSY;
2744                 goto done;
2745         }
2746
2747         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2748                 ret = -EOPNOTSUPP;
2749                 goto done;
2750         }
2751
2752         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2753
2754 done:
2755         hci_dev_put(hdev);
2756         return ret;
2757 }
2758
2759 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2760 {
2761         bool conn_changed, discov_changed;
2762
2763         BT_DBG("%s scan 0x%02x", hdev->name, scan);
2764
2765         if ((scan & SCAN_PAGE))
2766                 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2767                                                  &hdev->dev_flags);
2768         else
2769                 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2770                                                   &hdev->dev_flags);
2771
2772         if ((scan & SCAN_INQUIRY)) {
2773                 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2774                                                    &hdev->dev_flags);
2775         } else {
2776                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2777                 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2778                                                     &hdev->dev_flags);
2779         }
2780
2781         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2782                 return;
2783
2784         if (conn_changed || discov_changed) {
2785                 /* In case this was disabled through mgmt */
2786                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2787
2788                 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2789                         mgmt_update_adv_data(hdev);
2790
2791                 mgmt_new_settings(hdev);
2792         }
2793 }
2794
2795 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2796 {
2797         struct hci_dev *hdev;
2798         struct hci_dev_req dr;
2799         int err = 0;
2800
2801         if (copy_from_user(&dr, arg, sizeof(dr)))
2802                 return -EFAULT;
2803
2804         hdev = hci_dev_get(dr.dev_id);
2805         if (!hdev)
2806                 return -ENODEV;
2807
2808         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2809                 err = -EBUSY;
2810                 goto done;
2811         }
2812
2813         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2814                 err = -EOPNOTSUPP;
2815                 goto done;
2816         }
2817
2818         if (hdev->dev_type != HCI_BREDR) {
2819                 err = -EOPNOTSUPP;
2820                 goto done;
2821         }
2822
2823         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2824                 err = -EOPNOTSUPP;
2825                 goto done;
2826         }
2827
2828         switch (cmd) {
2829         case HCISETAUTH:
2830                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2831                                    HCI_INIT_TIMEOUT);
2832                 break;
2833
2834         case HCISETENCRYPT:
2835                 if (!lmp_encrypt_capable(hdev)) {
2836                         err = -EOPNOTSUPP;
2837                         break;
2838                 }
2839
2840                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2841                         /* Auth must be enabled first */
2842                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2843                                            HCI_INIT_TIMEOUT);
2844                         if (err)
2845                                 break;
2846                 }
2847
2848                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2849                                    HCI_INIT_TIMEOUT);
2850                 break;
2851
2852         case HCISETSCAN:
2853                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2854                                    HCI_INIT_TIMEOUT);
2855
2856                 /* Ensure that the connectable and discoverable states
2857                  * get correctly modified as this was a non-mgmt change.
2858                  */
2859                 if (!err)
2860                         hci_update_scan_state(hdev, dr.dev_opt);
2861                 break;
2862
2863         case HCISETLINKPOL:
2864                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2865                                    HCI_INIT_TIMEOUT);
2866                 break;
2867
2868         case HCISETLINKMODE:
2869                 hdev->link_mode = ((__u16) dr.dev_opt) &
2870                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2871                 break;
2872
2873         case HCISETPTYPE:
2874                 hdev->pkt_type = (__u16) dr.dev_opt;
2875                 break;
2876
2877         case HCISETACLMTU:
2878                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2879                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2880                 break;
2881
2882         case HCISETSCOMTU:
2883                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2884                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2885                 break;
2886
2887         default:
2888                 err = -EINVAL;
2889                 break;
2890         }
2891
2892 done:
2893         hci_dev_put(hdev);
2894         return err;
2895 }
2896
2897 int hci_get_dev_list(void __user *arg)
2898 {
2899         struct hci_dev *hdev;
2900         struct hci_dev_list_req *dl;
2901         struct hci_dev_req *dr;
2902         int n = 0, size, err;
2903         __u16 dev_num;
2904
2905         if (get_user(dev_num, (__u16 __user *) arg))
2906                 return -EFAULT;
2907
2908         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2909                 return -EINVAL;
2910
2911         size = sizeof(*dl) + dev_num * sizeof(*dr);
2912
2913         dl = kzalloc(size, GFP_KERNEL);
2914         if (!dl)
2915                 return -ENOMEM;
2916
2917         dr = dl->dev_req;
2918
2919         read_lock(&hci_dev_list_lock);
2920         list_for_each_entry(hdev, &hci_dev_list, list) {
2921                 unsigned long flags = hdev->flags;
2922
2923                 /* When the auto-off is configured it means the transport
2924                  * is running, but in that case still indicate that the
2925                  * device is actually down.
2926                  */
2927                 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2928                         flags &= ~BIT(HCI_UP);
2929
2930                 (dr + n)->dev_id  = hdev->id;
2931                 (dr + n)->dev_opt = flags;
2932
2933                 if (++n >= dev_num)
2934                         break;
2935         }
2936         read_unlock(&hci_dev_list_lock);
2937
2938         dl->dev_num = n;
2939         size = sizeof(*dl) + n * sizeof(*dr);
2940
2941         err = copy_to_user(arg, dl, size);
2942         kfree(dl);
2943
2944         return err ? -EFAULT : 0;
2945 }
2946
2947 int hci_get_dev_info(void __user *arg)
2948 {
2949         struct hci_dev *hdev;
2950         struct hci_dev_info di;
2951         unsigned long flags;
2952         int err = 0;
2953
2954         if (copy_from_user(&di, arg, sizeof(di)))
2955                 return -EFAULT;
2956
2957         hdev = hci_dev_get(di.dev_id);
2958         if (!hdev)
2959                 return -ENODEV;
2960
2961         /* When the auto-off is configured it means the transport
2962          * is running, but in that case still indicate that the
2963          * device is actually down.
2964          */
2965         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2966                 flags = hdev->flags & ~BIT(HCI_UP);
2967         else
2968                 flags = hdev->flags;
2969
2970         strcpy(di.name, hdev->name);
2971         di.bdaddr   = hdev->bdaddr;
2972         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2973         di.flags    = flags;
2974         di.pkt_type = hdev->pkt_type;
2975         if (lmp_bredr_capable(hdev)) {
2976                 di.acl_mtu  = hdev->acl_mtu;
2977                 di.acl_pkts = hdev->acl_pkts;
2978                 di.sco_mtu  = hdev->sco_mtu;
2979                 di.sco_pkts = hdev->sco_pkts;
2980         } else {
2981                 di.acl_mtu  = hdev->le_mtu;
2982                 di.acl_pkts = hdev->le_pkts;
2983                 di.sco_mtu  = 0;
2984                 di.sco_pkts = 0;
2985         }
2986         di.link_policy = hdev->link_policy;
2987         di.link_mode   = hdev->link_mode;
2988
2989         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2990         memcpy(&di.features, &hdev->features, sizeof(di.features));
2991
2992         if (copy_to_user(arg, &di, sizeof(di)))
2993                 err = -EFAULT;
2994
2995         hci_dev_put(hdev);
2996
2997         return err;
2998 }
2999
3000 /* ---- Interface to HCI drivers ---- */
3001
3002 static int hci_rfkill_set_block(void *data, bool blocked)
3003 {
3004         struct hci_dev *hdev = data;
3005
3006         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
3007
3008         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
3009                 return -EBUSY;
3010
3011         if (blocked) {
3012                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3013                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3014                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
3015                         hci_dev_do_close(hdev);
3016         } else {
3017                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
3018         }
3019
3020         return 0;
3021 }
3022
3023 static const struct rfkill_ops hci_rfkill_ops = {
3024         .set_block = hci_rfkill_set_block,
3025 };
3026
3027 static void hci_power_on(struct work_struct *work)
3028 {
3029         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
3030         int err;
3031
3032         BT_DBG("%s", hdev->name);
3033
3034         err = hci_dev_do_open(hdev);
3035         if (err < 0) {
3036                 mgmt_set_powered_failed(hdev, err);
3037                 return;
3038         }
3039
3040         /* During the HCI setup phase, a few error conditions are
3041          * ignored and they need to be checked now. If they are still
3042          * valid, it is important to turn the device back off.
3043          */
3044         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
3045             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
3046             (hdev->dev_type == HCI_BREDR &&
3047              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3048              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
3049                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3050                 hci_dev_do_close(hdev);
3051         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
3052                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3053                                    HCI_AUTO_OFF_TIMEOUT);
3054         }
3055
3056         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
3057                 /* For unconfigured devices, set the HCI_RAW flag
3058                  * so that userspace can easily identify them.
3059                  */
3060                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3061                         set_bit(HCI_RAW, &hdev->flags);
3062
3063                 /* For fully configured devices, this will send
3064                  * the Index Added event. For unconfigured devices,
3065                  * it will send Unconfigued Index Added event.
3066                  *
3067                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3068                  * and no event will be send.
3069                  */
3070                 mgmt_index_added(hdev);
3071         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
3072                 /* When the controller is now configured, then it
3073                  * is important to clear the HCI_RAW flag.
3074                  */
3075                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3076                         clear_bit(HCI_RAW, &hdev->flags);
3077
3078                 /* Powering on the controller with HCI_CONFIG set only
3079                  * happens with the transition from unconfigured to
3080                  * configured. This will send the Index Added event.
3081                  */
3082                 mgmt_index_added(hdev);
3083         }
3084 }
3085
3086 static void hci_power_off(struct work_struct *work)
3087 {
3088         struct hci_dev *hdev = container_of(work, struct hci_dev,
3089                                             power_off.work);
3090
3091         BT_DBG("%s", hdev->name);
3092
3093         hci_dev_do_close(hdev);
3094 }
3095
3096 static void hci_discov_off(struct work_struct *work)
3097 {
3098         struct hci_dev *hdev;
3099
3100         hdev = container_of(work, struct hci_dev, discov_off.work);
3101
3102         BT_DBG("%s", hdev->name);
3103
3104         mgmt_discoverable_timeout(hdev);
3105 }
3106
3107 void hci_uuids_clear(struct hci_dev *hdev)
3108 {
3109         struct bt_uuid *uuid, *tmp;
3110
3111         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3112                 list_del(&uuid->list);
3113                 kfree(uuid);
3114         }
3115 }
3116
3117 void hci_link_keys_clear(struct hci_dev *hdev)
3118 {
3119         struct list_head *p, *n;
3120
3121         list_for_each_safe(p, n, &hdev->link_keys) {
3122                 struct link_key *key;
3123
3124                 key = list_entry(p, struct link_key, list);
3125
3126                 list_del(p);
3127                 kfree(key);
3128         }
3129 }
3130
3131 void hci_smp_ltks_clear(struct hci_dev *hdev)
3132 {
3133         struct smp_ltk *k, *tmp;
3134
3135         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3136                 list_del(&k->list);
3137                 kfree(k);
3138         }
3139 }
3140
3141 void hci_smp_irks_clear(struct hci_dev *hdev)
3142 {
3143         struct smp_irk *k, *tmp;
3144
3145         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3146                 list_del(&k->list);
3147                 kfree(k);
3148         }
3149 }
3150
3151 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3152 {
3153         struct link_key *k;
3154
3155         list_for_each_entry(k, &hdev->link_keys, list)
3156                 if (bacmp(bdaddr, &k->bdaddr) == 0)
3157                         return k;
3158
3159         return NULL;
3160 }
3161
3162 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3163                                u8 key_type, u8 old_key_type)
3164 {
3165         /* Legacy key */
3166         if (key_type < 0x03)
3167                 return true;
3168
3169         /* Debug keys are insecure so don't store them persistently */
3170         if (key_type == HCI_LK_DEBUG_COMBINATION)
3171                 return false;
3172
3173         /* Changed combination key and there's no previous one */
3174         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3175                 return false;
3176
3177         /* Security mode 3 case */
3178         if (!conn)
3179                 return true;
3180
3181         /* Neither local nor remote side had no-bonding as requirement */
3182         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3183                 return true;
3184
3185         /* Local side had dedicated bonding as requirement */
3186         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3187                 return true;
3188
3189         /* Remote side had dedicated bonding as requirement */
3190         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3191                 return true;
3192
3193         /* If none of the above criteria match, then don't store the key
3194          * persistently */
3195         return false;
3196 }
3197
3198 static u8 ltk_role(u8 type)
3199 {
3200         if (type == SMP_LTK)
3201                 return HCI_ROLE_MASTER;
3202
3203         return HCI_ROLE_SLAVE;
3204 }
3205
3206 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3207                              u8 role)
3208 {
3209         struct smp_ltk *k;
3210
3211         list_for_each_entry(k, &hdev->long_term_keys, list) {
3212                 if (k->ediv != ediv || k->rand != rand)
3213                         continue;
3214
3215                 if (ltk_role(k->type) != role)
3216                         continue;
3217
3218                 return k;
3219         }
3220
3221         return NULL;
3222 }
3223
3224 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3225                                      u8 addr_type, u8 role)
3226 {
3227         struct smp_ltk *k;
3228
3229         list_for_each_entry(k, &hdev->long_term_keys, list)
3230                 if (addr_type == k->bdaddr_type &&
3231                     bacmp(bdaddr, &k->bdaddr) == 0 &&
3232                     ltk_role(k->type) == role)
3233                         return k;
3234
3235         return NULL;
3236 }
3237
3238 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3239 {
3240         struct smp_irk *irk;
3241
3242         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3243                 if (!bacmp(&irk->rpa, rpa))
3244                         return irk;
3245         }
3246
3247         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3248                 if (smp_irk_matches(hdev, irk->val, rpa)) {
3249                         bacpy(&irk->rpa, rpa);
3250                         return irk;
3251                 }
3252         }
3253
3254         return NULL;
3255 }
3256
3257 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3258                                      u8 addr_type)
3259 {
3260         struct smp_irk *irk;
3261
3262         /* Identity Address must be public or static random */
3263         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3264                 return NULL;
3265
3266         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3267                 if (addr_type == irk->addr_type &&
3268                     bacmp(bdaddr, &irk->bdaddr) == 0)
3269                         return irk;
3270         }
3271
3272         return NULL;
3273 }
3274
3275 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3276                                   bdaddr_t *bdaddr, u8 *val, u8 type,
3277                                   u8 pin_len, bool *persistent)
3278 {
3279         struct link_key *key, *old_key;
3280         u8 old_key_type;
3281
3282         old_key = hci_find_link_key(hdev, bdaddr);
3283         if (old_key) {
3284                 old_key_type = old_key->type;
3285                 key = old_key;
3286         } else {
3287                 old_key_type = conn ? conn->key_type : 0xff;
3288                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3289                 if (!key)
3290                         return NULL;
3291                 list_add(&key->list, &hdev->link_keys);
3292         }
3293
3294         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3295
3296         /* Some buggy controller combinations generate a changed
3297          * combination key for legacy pairing even when there's no
3298          * previous key */
3299         if (type == HCI_LK_CHANGED_COMBINATION &&
3300             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3301                 type = HCI_LK_COMBINATION;
3302                 if (conn)
3303                         conn->key_type = type;
3304         }
3305
3306         bacpy(&key->bdaddr, bdaddr);
3307         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3308         key->pin_len = pin_len;
3309
3310         if (type == HCI_LK_CHANGED_COMBINATION)
3311                 key->type = old_key_type;
3312         else
3313                 key->type = type;
3314
3315         if (persistent)
3316                 *persistent = hci_persistent_key(hdev, conn, type,
3317                                                  old_key_type);
3318
3319         return key;
3320 }
3321
3322 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3323                             u8 addr_type, u8 type, u8 authenticated,
3324                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3325 {
3326         struct smp_ltk *key, *old_key;
3327         u8 role = ltk_role(type);
3328
3329         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
3330         if (old_key)
3331                 key = old_key;
3332         else {
3333                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3334                 if (!key)
3335                         return NULL;
3336                 list_add(&key->list, &hdev->long_term_keys);
3337         }
3338
3339         bacpy(&key->bdaddr, bdaddr);
3340         key->bdaddr_type = addr_type;
3341         memcpy(key->val, tk, sizeof(key->val));
3342         key->authenticated = authenticated;
3343         key->ediv = ediv;
3344         key->rand = rand;
3345         key->enc_size = enc_size;
3346         key->type = type;
3347
3348         return key;
3349 }
3350
3351 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3352                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3353 {
3354         struct smp_irk *irk;
3355
3356         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3357         if (!irk) {
3358                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3359                 if (!irk)
3360                         return NULL;
3361
3362                 bacpy(&irk->bdaddr, bdaddr);
3363                 irk->addr_type = addr_type;
3364
3365                 list_add(&irk->list, &hdev->identity_resolving_keys);
3366         }
3367
3368         memcpy(irk->val, val, 16);
3369         bacpy(&irk->rpa, rpa);
3370
3371         return irk;
3372 }
3373
3374 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3375 {
3376         struct link_key *key;
3377
3378         key = hci_find_link_key(hdev, bdaddr);
3379         if (!key)
3380                 return -ENOENT;
3381
3382         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3383
3384         list_del(&key->list);
3385         kfree(key);
3386
3387         return 0;
3388 }
3389
3390 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3391 {
3392         struct smp_ltk *k, *tmp;
3393         int removed = 0;
3394
3395         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3396                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3397                         continue;
3398
3399                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3400
3401                 list_del(&k->list);
3402                 kfree(k);
3403                 removed++;
3404         }
3405
3406         return removed ? 0 : -ENOENT;
3407 }
3408
3409 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3410 {
3411         struct smp_irk *k, *tmp;
3412
3413         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3414                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3415                         continue;
3416
3417                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3418
3419                 list_del(&k->list);
3420                 kfree(k);
3421         }
3422 }
3423
3424 /* HCI command timer function */
3425 static void hci_cmd_timeout(struct work_struct *work)
3426 {
3427         struct hci_dev *hdev = container_of(work, struct hci_dev,
3428                                             cmd_timer.work);
3429
3430         if (hdev->sent_cmd) {
3431                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3432                 u16 opcode = __le16_to_cpu(sent->opcode);
3433
3434                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3435         } else {
3436                 BT_ERR("%s command tx timeout", hdev->name);
3437         }
3438
3439         atomic_set(&hdev->cmd_cnt, 1);
3440         queue_work(hdev->workqueue, &hdev->cmd_work);
3441 }
3442
3443 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3444                                           bdaddr_t *bdaddr)
3445 {
3446         struct oob_data *data;
3447
3448         list_for_each_entry(data, &hdev->remote_oob_data, list)
3449                 if (bacmp(bdaddr, &data->bdaddr) == 0)
3450                         return data;
3451
3452         return NULL;
3453 }
3454
3455 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3456 {
3457         struct oob_data *data;
3458
3459         data = hci_find_remote_oob_data(hdev, bdaddr);
3460         if (!data)
3461                 return -ENOENT;
3462
3463         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3464
3465         list_del(&data->list);
3466         kfree(data);
3467
3468         return 0;
3469 }
3470
3471 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3472 {
3473         struct oob_data *data, *n;
3474
3475         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3476                 list_del(&data->list);
3477                 kfree(data);
3478         }
3479 }
3480
3481 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3482                             u8 *hash, u8 *randomizer)
3483 {
3484         struct oob_data *data;
3485
3486         data = hci_find_remote_oob_data(hdev, bdaddr);
3487         if (!data) {
3488                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3489                 if (!data)
3490                         return -ENOMEM;
3491
3492                 bacpy(&data->bdaddr, bdaddr);
3493                 list_add(&data->list, &hdev->remote_oob_data);
3494         }
3495
3496         memcpy(data->hash192, hash, sizeof(data->hash192));
3497         memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3498
3499         memset(data->hash256, 0, sizeof(data->hash256));
3500         memset(data->randomizer256, 0, sizeof(data->randomizer256));
3501
3502         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3503
3504         return 0;
3505 }
3506
3507 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3508                                 u8 *hash192, u8 *randomizer192,
3509                                 u8 *hash256, u8 *randomizer256)
3510 {
3511         struct oob_data *data;
3512
3513         data = hci_find_remote_oob_data(hdev, bdaddr);
3514         if (!data) {
3515                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3516                 if (!data)
3517                         return -ENOMEM;
3518
3519                 bacpy(&data->bdaddr, bdaddr);
3520                 list_add(&data->list, &hdev->remote_oob_data);
3521         }
3522
3523         memcpy(data->hash192, hash192, sizeof(data->hash192));
3524         memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3525
3526         memcpy(data->hash256, hash256, sizeof(data->hash256));
3527         memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3528
3529         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3530
3531         return 0;
3532 }
3533
3534 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3535                                          bdaddr_t *bdaddr, u8 type)
3536 {
3537         struct bdaddr_list *b;
3538
3539         list_for_each_entry(b, bdaddr_list, list) {
3540                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3541                         return b;
3542         }
3543
3544         return NULL;
3545 }
3546
3547 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3548 {
3549         struct list_head *p, *n;
3550
3551         list_for_each_safe(p, n, bdaddr_list) {
3552                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3553
3554                 list_del(p);
3555                 kfree(b);
3556         }
3557 }
3558
3559 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3560 {
3561         struct bdaddr_list *entry;
3562
3563         if (!bacmp(bdaddr, BDADDR_ANY))
3564                 return -EBADF;
3565
3566         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3567                 return -EEXIST;
3568
3569         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3570         if (!entry)
3571                 return -ENOMEM;
3572
3573         bacpy(&entry->bdaddr, bdaddr);
3574         entry->bdaddr_type = type;
3575
3576         list_add(&entry->list, list);
3577
3578         return 0;
3579 }
3580
3581 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3582 {
3583         struct bdaddr_list *entry;
3584
3585         if (!bacmp(bdaddr, BDADDR_ANY)) {
3586                 hci_bdaddr_list_clear(list);
3587                 return 0;
3588         }
3589
3590         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3591         if (!entry)
3592                 return -ENOENT;
3593
3594         list_del(&entry->list);
3595         kfree(entry);
3596
3597         return 0;
3598 }
3599
3600 /* This function requires the caller holds hdev->lock */
3601 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3602                                                bdaddr_t *addr, u8 addr_type)
3603 {
3604         struct hci_conn_params *params;
3605
3606         /* The conn params list only contains identity addresses */
3607         if (!hci_is_identity_address(addr, addr_type))
3608                 return NULL;
3609
3610         list_for_each_entry(params, &hdev->le_conn_params, list) {
3611                 if (bacmp(&params->addr, addr) == 0 &&
3612                     params->addr_type == addr_type) {
3613                         return params;
3614                 }
3615         }
3616
3617         return NULL;
3618 }
3619
3620 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3621 {
3622         struct hci_conn *conn;
3623
3624         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3625         if (!conn)
3626                 return false;
3627
3628         if (conn->dst_type != type)
3629                 return false;
3630
3631         if (conn->state != BT_CONNECTED)
3632                 return false;
3633
3634         return true;
3635 }
3636
3637 /* This function requires the caller holds hdev->lock */
3638 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3639                                                   bdaddr_t *addr, u8 addr_type)
3640 {
3641         struct hci_conn_params *param;
3642
3643         /* The list only contains identity addresses */
3644         if (!hci_is_identity_address(addr, addr_type))
3645                 return NULL;
3646
3647         list_for_each_entry(param, list, action) {
3648                 if (bacmp(&param->addr, addr) == 0 &&
3649                     param->addr_type == addr_type)
3650                         return param;
3651         }
3652
3653         return NULL;
3654 }
3655
3656 /* This function requires the caller holds hdev->lock */
3657 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3658                                             bdaddr_t *addr, u8 addr_type)
3659 {
3660         struct hci_conn_params *params;
3661
3662         if (!hci_is_identity_address(addr, addr_type))
3663                 return NULL;
3664
3665         params = hci_conn_params_lookup(hdev, addr, addr_type);
3666         if (params)
3667                 return params;
3668
3669         params = kzalloc(sizeof(*params), GFP_KERNEL);
3670         if (!params) {
3671                 BT_ERR("Out of memory");
3672                 return NULL;
3673         }
3674
3675         bacpy(&params->addr, addr);
3676         params->addr_type = addr_type;
3677
3678         list_add(&params->list, &hdev->le_conn_params);
3679         INIT_LIST_HEAD(&params->action);
3680
3681         params->conn_min_interval = hdev->le_conn_min_interval;
3682         params->conn_max_interval = hdev->le_conn_max_interval;
3683         params->conn_latency = hdev->le_conn_latency;
3684         params->supervision_timeout = hdev->le_supv_timeout;
3685         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3686
3687         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3688
3689         return params;
3690 }
3691
3692 /* This function requires the caller holds hdev->lock */
3693 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3694                         u8 auto_connect)
3695 {
3696         struct hci_conn_params *params;
3697
3698         params = hci_conn_params_add(hdev, addr, addr_type);
3699         if (!params)
3700                 return -EIO;
3701
3702         if (params->auto_connect == auto_connect)
3703                 return 0;
3704
3705         list_del_init(&params->action);
3706
3707         switch (auto_connect) {
3708         case HCI_AUTO_CONN_DISABLED:
3709         case HCI_AUTO_CONN_LINK_LOSS:
3710                 hci_update_background_scan(hdev);
3711                 break;
3712         case HCI_AUTO_CONN_REPORT:
3713                 list_add(&params->action, &hdev->pend_le_reports);
3714                 hci_update_background_scan(hdev);
3715                 break;
3716         case HCI_AUTO_CONN_DIRECT:
3717         case HCI_AUTO_CONN_ALWAYS:
3718                 if (!is_connected(hdev, addr, addr_type)) {
3719                         list_add(&params->action, &hdev->pend_le_conns);
3720                         hci_update_background_scan(hdev);
3721                 }
3722                 break;
3723         }
3724
3725         params->auto_connect = auto_connect;
3726
3727         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3728                auto_connect);
3729
3730         return 0;
3731 }
3732
3733 static void hci_conn_params_free(struct hci_conn_params *params)
3734 {
3735         if (params->conn) {
3736                 hci_conn_drop(params->conn);
3737                 hci_conn_put(params->conn);
3738         }
3739
3740         list_del(&params->action);
3741         list_del(&params->list);
3742         kfree(params);
3743 }
3744
3745 /* This function requires the caller holds hdev->lock */
3746 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3747 {
3748         struct hci_conn_params *params;
3749
3750         params = hci_conn_params_lookup(hdev, addr, addr_type);
3751         if (!params)
3752                 return;
3753
3754         hci_conn_params_free(params);
3755
3756         hci_update_background_scan(hdev);
3757
3758         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3759 }
3760
3761 /* This function requires the caller holds hdev->lock */
3762 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3763 {
3764         struct hci_conn_params *params, *tmp;
3765
3766         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3767                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3768                         continue;
3769                 list_del(&params->list);
3770                 kfree(params);
3771         }
3772
3773         BT_DBG("All LE disabled connection parameters were removed");
3774 }
3775
3776 /* This function requires the caller holds hdev->lock */
3777 void hci_conn_params_clear_all(struct hci_dev *hdev)
3778 {
3779         struct hci_conn_params *params, *tmp;
3780
3781         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3782                 hci_conn_params_free(params);
3783
3784         hci_update_background_scan(hdev);
3785
3786         BT_DBG("All LE connection parameters were removed");
3787 }
3788
3789 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3790 {
3791         if (status) {
3792                 BT_ERR("Failed to start inquiry: status %d", status);
3793
3794                 hci_dev_lock(hdev);
3795                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3796                 hci_dev_unlock(hdev);
3797                 return;
3798         }
3799 }
3800
3801 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3802 {
3803         /* General inquiry access code (GIAC) */
3804         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3805         struct hci_request req;
3806         struct hci_cp_inquiry cp;
3807         int err;
3808
3809         if (status) {
3810                 BT_ERR("Failed to disable LE scanning: status %d", status);
3811                 return;
3812         }
3813
3814         switch (hdev->discovery.type) {
3815         case DISCOV_TYPE_LE:
3816                 hci_dev_lock(hdev);
3817                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3818                 hci_dev_unlock(hdev);
3819                 break;
3820
3821         case DISCOV_TYPE_INTERLEAVED:
3822                 hci_req_init(&req, hdev);
3823
3824                 memset(&cp, 0, sizeof(cp));
3825                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3826                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3827                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3828
3829                 hci_dev_lock(hdev);
3830
3831                 hci_inquiry_cache_flush(hdev);
3832
3833                 err = hci_req_run(&req, inquiry_complete);
3834                 if (err) {
3835                         BT_ERR("Inquiry request failed: err %d", err);
3836                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3837                 }
3838
3839                 hci_dev_unlock(hdev);
3840                 break;
3841         }
3842 }
3843
3844 static void le_scan_disable_work(struct work_struct *work)
3845 {
3846         struct hci_dev *hdev = container_of(work, struct hci_dev,
3847                                             le_scan_disable.work);
3848         struct hci_request req;
3849         int err;
3850
3851         BT_DBG("%s", hdev->name);
3852
3853         hci_req_init(&req, hdev);
3854
3855         hci_req_add_le_scan_disable(&req);
3856
3857         err = hci_req_run(&req, le_scan_disable_work_complete);
3858         if (err)
3859                 BT_ERR("Disable LE scanning request failed: err %d", err);
3860 }
3861
3862 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3863 {
3864         struct hci_dev *hdev = req->hdev;
3865
3866         /* If we're advertising or initiating an LE connection we can't
3867          * go ahead and change the random address at this time. This is
3868          * because the eventual initiator address used for the
3869          * subsequently created connection will be undefined (some
3870          * controllers use the new address and others the one we had
3871          * when the operation started).
3872          *
3873          * In this kind of scenario skip the update and let the random
3874          * address be updated at the next cycle.
3875          */
3876         if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3877             hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3878                 BT_DBG("Deferring random address update");
3879                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
3880                 return;
3881         }
3882
3883         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3884 }
3885
3886 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3887                               u8 *own_addr_type)
3888 {
3889         struct hci_dev *hdev = req->hdev;
3890         int err;
3891
3892         /* If privacy is enabled use a resolvable private address. If
3893          * current RPA has expired or there is something else than
3894          * the current RPA in use, then generate a new one.
3895          */
3896         if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3897                 int to;
3898
3899                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3900
3901                 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3902                     !bacmp(&hdev->random_addr, &hdev->rpa))
3903                         return 0;
3904
3905                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
3906                 if (err < 0) {
3907                         BT_ERR("%s failed to generate new RPA", hdev->name);
3908                         return err;
3909                 }
3910
3911                 set_random_addr(req, &hdev->rpa);
3912
3913                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3914                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3915
3916                 return 0;
3917         }
3918
3919         /* In case of required privacy without resolvable private address,
3920          * use an unresolvable private address. This is useful for active
3921          * scanning and non-connectable advertising.
3922          */
3923         if (require_privacy) {
3924                 bdaddr_t urpa;
3925
3926                 get_random_bytes(&urpa, 6);
3927                 urpa.b[5] &= 0x3f;      /* Clear two most significant bits */
3928
3929                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3930                 set_random_addr(req, &urpa);
3931                 return 0;
3932         }
3933
3934         /* If forcing static address is in use or there is no public
3935          * address use the static address as random address (but skip
3936          * the HCI command if the current random address is already the
3937          * static one.
3938          */
3939         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3940             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3941                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3942                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3943                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3944                                     &hdev->static_addr);
3945                 return 0;
3946         }
3947
3948         /* Neither privacy nor static address is being used so use a
3949          * public address.
3950          */
3951         *own_addr_type = ADDR_LE_DEV_PUBLIC;
3952
3953         return 0;
3954 }
3955
3956 /* Copy the Identity Address of the controller.
3957  *
3958  * If the controller has a public BD_ADDR, then by default use that one.
3959  * If this is a LE only controller without a public address, default to
3960  * the static random address.
3961  *
3962  * For debugging purposes it is possible to force controllers with a
3963  * public address to use the static random address instead.
3964  */
3965 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3966                                u8 *bdaddr_type)
3967 {
3968         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3969             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3970                 bacpy(bdaddr, &hdev->static_addr);
3971                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3972         } else {
3973                 bacpy(bdaddr, &hdev->bdaddr);
3974                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3975         }
3976 }
3977
3978 /* Alloc HCI device */
3979 struct hci_dev *hci_alloc_dev(void)
3980 {
3981         struct hci_dev *hdev;
3982
3983         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3984         if (!hdev)
3985                 return NULL;
3986
3987         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3988         hdev->esco_type = (ESCO_HV1);
3989         hdev->link_mode = (HCI_LM_ACCEPT);
3990         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3991         hdev->io_capability = 0x03;     /* No Input No Output */
3992         hdev->manufacturer = 0xffff;    /* Default to internal use */
3993         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3994         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3995
3996         hdev->sniff_max_interval = 800;
3997         hdev->sniff_min_interval = 80;
3998
3999         hdev->le_adv_channel_map = 0x07;
4000         hdev->le_adv_min_interval = 0x0800;
4001         hdev->le_adv_max_interval = 0x0800;
4002         hdev->le_scan_interval = 0x0060;
4003         hdev->le_scan_window = 0x0030;
4004         hdev->le_conn_min_interval = 0x0028;
4005         hdev->le_conn_max_interval = 0x0038;
4006         hdev->le_conn_latency = 0x0000;
4007         hdev->le_supv_timeout = 0x002a;
4008
4009         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
4010         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
4011         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4012         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
4013
4014         mutex_init(&hdev->lock);
4015         mutex_init(&hdev->req_lock);
4016
4017         INIT_LIST_HEAD(&hdev->mgmt_pending);
4018         INIT_LIST_HEAD(&hdev->blacklist);
4019         INIT_LIST_HEAD(&hdev->whitelist);
4020         INIT_LIST_HEAD(&hdev->uuids);
4021         INIT_LIST_HEAD(&hdev->link_keys);
4022         INIT_LIST_HEAD(&hdev->long_term_keys);
4023         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
4024         INIT_LIST_HEAD(&hdev->remote_oob_data);
4025         INIT_LIST_HEAD(&hdev->le_white_list);
4026         INIT_LIST_HEAD(&hdev->le_conn_params);
4027         INIT_LIST_HEAD(&hdev->pend_le_conns);
4028         INIT_LIST_HEAD(&hdev->pend_le_reports);
4029         INIT_LIST_HEAD(&hdev->conn_hash.list);
4030
4031         INIT_WORK(&hdev->rx_work, hci_rx_work);
4032         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4033         INIT_WORK(&hdev->tx_work, hci_tx_work);
4034         INIT_WORK(&hdev->power_on, hci_power_on);
4035
4036         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4037         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4038         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4039
4040         skb_queue_head_init(&hdev->rx_q);
4041         skb_queue_head_init(&hdev->cmd_q);
4042         skb_queue_head_init(&hdev->raw_q);
4043
4044         init_waitqueue_head(&hdev->req_wait_q);
4045
4046         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
4047
4048         hci_init_sysfs(hdev);
4049         discovery_init(hdev);
4050
4051         return hdev;
4052 }
4053 EXPORT_SYMBOL(hci_alloc_dev);
4054
4055 /* Free HCI device */
4056 void hci_free_dev(struct hci_dev *hdev)
4057 {
4058         /* will free via device release */
4059         put_device(&hdev->dev);
4060 }
4061 EXPORT_SYMBOL(hci_free_dev);
4062
4063 /* Register HCI device */
4064 int hci_register_dev(struct hci_dev *hdev)
4065 {
4066         int id, error;
4067
4068         if (!hdev->open || !hdev->close || !hdev->send)
4069                 return -EINVAL;
4070
4071         /* Do not allow HCI_AMP devices to register at index 0,
4072          * so the index can be used as the AMP controller ID.
4073          */
4074         switch (hdev->dev_type) {
4075         case HCI_BREDR:
4076                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4077                 break;
4078         case HCI_AMP:
4079                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4080                 break;
4081         default:
4082                 return -EINVAL;
4083         }
4084
4085         if (id < 0)
4086                 return id;
4087
4088         sprintf(hdev->name, "hci%d", id);
4089         hdev->id = id;
4090
4091         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4092
4093         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4094                                           WQ_MEM_RECLAIM, 1, hdev->name);
4095         if (!hdev->workqueue) {
4096                 error = -ENOMEM;
4097                 goto err;
4098         }
4099
4100         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4101                                               WQ_MEM_RECLAIM, 1, hdev->name);
4102         if (!hdev->req_workqueue) {
4103                 destroy_workqueue(hdev->workqueue);
4104                 error = -ENOMEM;
4105                 goto err;
4106         }
4107
4108         if (!IS_ERR_OR_NULL(bt_debugfs))
4109                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4110
4111         dev_set_name(&hdev->dev, "%s", hdev->name);
4112
4113         error = device_add(&hdev->dev);
4114         if (error < 0)
4115                 goto err_wqueue;
4116
4117         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4118                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4119                                     hdev);
4120         if (hdev->rfkill) {
4121                 if (rfkill_register(hdev->rfkill) < 0) {
4122                         rfkill_destroy(hdev->rfkill);
4123                         hdev->rfkill = NULL;
4124                 }
4125         }
4126
4127         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4128                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4129
4130         set_bit(HCI_SETUP, &hdev->dev_flags);
4131         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4132
4133         if (hdev->dev_type == HCI_BREDR) {
4134                 /* Assume BR/EDR support until proven otherwise (such as
4135                  * through reading supported features during init.
4136                  */
4137                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4138         }
4139
4140         write_lock(&hci_dev_list_lock);
4141         list_add(&hdev->list, &hci_dev_list);
4142         write_unlock(&hci_dev_list_lock);
4143
4144         /* Devices that are marked for raw-only usage are unconfigured
4145          * and should not be included in normal operation.
4146          */
4147         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4148                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4149
4150         hci_notify(hdev, HCI_DEV_REG);
4151         hci_dev_hold(hdev);
4152
4153         queue_work(hdev->req_workqueue, &hdev->power_on);
4154
4155         return id;
4156
4157 err_wqueue:
4158         destroy_workqueue(hdev->workqueue);
4159         destroy_workqueue(hdev->req_workqueue);
4160 err:
4161         ida_simple_remove(&hci_index_ida, hdev->id);
4162
4163         return error;
4164 }
4165 EXPORT_SYMBOL(hci_register_dev);
4166
4167 /* Unregister HCI device */
4168 void hci_unregister_dev(struct hci_dev *hdev)
4169 {
4170         int i, id;
4171
4172         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4173
4174         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4175
4176         id = hdev->id;
4177
4178         write_lock(&hci_dev_list_lock);
4179         list_del(&hdev->list);
4180         write_unlock(&hci_dev_list_lock);
4181
4182         hci_dev_do_close(hdev);
4183
4184         for (i = 0; i < NUM_REASSEMBLY; i++)
4185                 kfree_skb(hdev->reassembly[i]);
4186
4187         cancel_work_sync(&hdev->power_on);
4188
4189         if (!test_bit(HCI_INIT, &hdev->flags) &&
4190             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4191             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4192                 hci_dev_lock(hdev);
4193                 mgmt_index_removed(hdev);
4194                 hci_dev_unlock(hdev);
4195         }
4196
4197         /* mgmt_index_removed should take care of emptying the
4198          * pending list */
4199         BUG_ON(!list_empty(&hdev->mgmt_pending));
4200
4201         hci_notify(hdev, HCI_DEV_UNREG);
4202
4203         if (hdev->rfkill) {
4204                 rfkill_unregister(hdev->rfkill);
4205                 rfkill_destroy(hdev->rfkill);
4206         }
4207
4208         smp_unregister(hdev);
4209
4210         device_del(&hdev->dev);
4211
4212         debugfs_remove_recursive(hdev->debugfs);
4213
4214         destroy_workqueue(hdev->workqueue);
4215         destroy_workqueue(hdev->req_workqueue);
4216
4217         hci_dev_lock(hdev);
4218         hci_bdaddr_list_clear(&hdev->blacklist);
4219         hci_bdaddr_list_clear(&hdev->whitelist);
4220         hci_uuids_clear(hdev);
4221         hci_link_keys_clear(hdev);
4222         hci_smp_ltks_clear(hdev);
4223         hci_smp_irks_clear(hdev);
4224         hci_remote_oob_data_clear(hdev);
4225         hci_bdaddr_list_clear(&hdev->le_white_list);
4226         hci_conn_params_clear_all(hdev);
4227         hci_dev_unlock(hdev);
4228
4229         hci_dev_put(hdev);
4230
4231         ida_simple_remove(&hci_index_ida, id);
4232 }
4233 EXPORT_SYMBOL(hci_unregister_dev);
4234
4235 /* Suspend HCI device */
4236 int hci_suspend_dev(struct hci_dev *hdev)
4237 {
4238         hci_notify(hdev, HCI_DEV_SUSPEND);
4239         return 0;
4240 }
4241 EXPORT_SYMBOL(hci_suspend_dev);
4242
4243 /* Resume HCI device */
4244 int hci_resume_dev(struct hci_dev *hdev)
4245 {
4246         hci_notify(hdev, HCI_DEV_RESUME);
4247         return 0;
4248 }
4249 EXPORT_SYMBOL(hci_resume_dev);
4250
4251 /* Receive frame from HCI drivers */
4252 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4253 {
4254         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4255                       && !test_bit(HCI_INIT, &hdev->flags))) {
4256                 kfree_skb(skb);
4257                 return -ENXIO;
4258         }
4259
4260         /* Incoming skb */
4261         bt_cb(skb)->incoming = 1;
4262
4263         /* Time stamp */
4264         __net_timestamp(skb);
4265
4266         skb_queue_tail(&hdev->rx_q, skb);
4267         queue_work(hdev->workqueue, &hdev->rx_work);
4268
4269         return 0;
4270 }
4271 EXPORT_SYMBOL(hci_recv_frame);
4272
4273 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4274                           int count, __u8 index)
4275 {
4276         int len = 0;
4277         int hlen = 0;
4278         int remain = count;
4279         struct sk_buff *skb;
4280         struct bt_skb_cb *scb;
4281
4282         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4283             index >= NUM_REASSEMBLY)
4284                 return -EILSEQ;
4285
4286         skb = hdev->reassembly[index];
4287
4288         if (!skb) {
4289                 switch (type) {
4290                 case HCI_ACLDATA_PKT:
4291                         len = HCI_MAX_FRAME_SIZE;
4292                         hlen = HCI_ACL_HDR_SIZE;
4293                         break;
4294                 case HCI_EVENT_PKT:
4295                         len = HCI_MAX_EVENT_SIZE;
4296                         hlen = HCI_EVENT_HDR_SIZE;
4297                         break;
4298                 case HCI_SCODATA_PKT:
4299                         len = HCI_MAX_SCO_SIZE;
4300                         hlen = HCI_SCO_HDR_SIZE;
4301                         break;
4302                 }
4303
4304                 skb = bt_skb_alloc(len, GFP_ATOMIC);
4305                 if (!skb)
4306                         return -ENOMEM;
4307
4308                 scb = (void *) skb->cb;
4309                 scb->expect = hlen;
4310                 scb->pkt_type = type;
4311
4312                 hdev->reassembly[index] = skb;
4313         }
4314
4315         while (count) {
4316                 scb = (void *) skb->cb;
4317                 len = min_t(uint, scb->expect, count);
4318
4319                 memcpy(skb_put(skb, len), data, len);
4320
4321                 count -= len;
4322                 data += len;
4323                 scb->expect -= len;
4324                 remain = count;
4325
4326                 switch (type) {
4327                 case HCI_EVENT_PKT:
4328                         if (skb->len == HCI_EVENT_HDR_SIZE) {
4329                                 struct hci_event_hdr *h = hci_event_hdr(skb);
4330                                 scb->expect = h->plen;
4331
4332                                 if (skb_tailroom(skb) < scb->expect) {
4333                                         kfree_skb(skb);
4334                                         hdev->reassembly[index] = NULL;
4335                                         return -ENOMEM;
4336                                 }
4337                         }
4338                         break;
4339
4340                 case HCI_ACLDATA_PKT:
4341                         if (skb->len  == HCI_ACL_HDR_SIZE) {
4342                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4343                                 scb->expect = __le16_to_cpu(h->dlen);
4344
4345                                 if (skb_tailroom(skb) < scb->expect) {
4346                                         kfree_skb(skb);
4347                                         hdev->reassembly[index] = NULL;
4348                                         return -ENOMEM;
4349                                 }
4350                         }
4351                         break;
4352
4353                 case HCI_SCODATA_PKT:
4354                         if (skb->len == HCI_SCO_HDR_SIZE) {
4355                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4356                                 scb->expect = h->dlen;
4357
4358                                 if (skb_tailroom(skb) < scb->expect) {
4359                                         kfree_skb(skb);
4360                                         hdev->reassembly[index] = NULL;
4361                                         return -ENOMEM;
4362                                 }
4363                         }
4364                         break;
4365                 }
4366
4367                 if (scb->expect == 0) {
4368                         /* Complete frame */
4369
4370                         bt_cb(skb)->pkt_type = type;
4371                         hci_recv_frame(hdev, skb);
4372
4373                         hdev->reassembly[index] = NULL;
4374                         return remain;
4375                 }
4376         }
4377
4378         return remain;
4379 }
4380
4381 #define STREAM_REASSEMBLY 0
4382
4383 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4384 {
4385         int type;
4386         int rem = 0;
4387
4388         while (count) {
4389                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4390
4391                 if (!skb) {
4392                         struct { char type; } *pkt;
4393
4394                         /* Start of the frame */
4395                         pkt = data;
4396                         type = pkt->type;
4397
4398                         data++;
4399                         count--;
4400                 } else
4401                         type = bt_cb(skb)->pkt_type;
4402
4403                 rem = hci_reassembly(hdev, type, data, count,
4404                                      STREAM_REASSEMBLY);
4405                 if (rem < 0)
4406                         return rem;
4407
4408                 data += (count - rem);
4409                 count = rem;
4410         }
4411
4412         return rem;
4413 }
4414 EXPORT_SYMBOL(hci_recv_stream_fragment);
4415
4416 /* ---- Interface to upper protocols ---- */
4417
4418 int hci_register_cb(struct hci_cb *cb)
4419 {
4420         BT_DBG("%p name %s", cb, cb->name);
4421
4422         write_lock(&hci_cb_list_lock);
4423         list_add(&cb->list, &hci_cb_list);
4424         write_unlock(&hci_cb_list_lock);
4425
4426         return 0;
4427 }
4428 EXPORT_SYMBOL(hci_register_cb);
4429
4430 int hci_unregister_cb(struct hci_cb *cb)
4431 {
4432         BT_DBG("%p name %s", cb, cb->name);
4433
4434         write_lock(&hci_cb_list_lock);
4435         list_del(&cb->list);
4436         write_unlock(&hci_cb_list_lock);
4437
4438         return 0;
4439 }
4440 EXPORT_SYMBOL(hci_unregister_cb);
4441
4442 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4443 {
4444         int err;
4445
4446         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4447
4448         /* Time stamp */
4449         __net_timestamp(skb);
4450
4451         /* Send copy to monitor */
4452         hci_send_to_monitor(hdev, skb);
4453
4454         if (atomic_read(&hdev->promisc)) {
4455                 /* Send copy to the sockets */
4456                 hci_send_to_sock(hdev, skb);
4457         }
4458
4459         /* Get rid of skb owner, prior to sending to the driver. */
4460         skb_orphan(skb);
4461
4462         err = hdev->send(hdev, skb);
4463         if (err < 0) {
4464                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4465                 kfree_skb(skb);
4466         }
4467 }
4468
4469 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4470 {
4471         skb_queue_head_init(&req->cmd_q);
4472         req->hdev = hdev;
4473         req->err = 0;
4474 }
4475
4476 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4477 {
4478         struct hci_dev *hdev = req->hdev;
4479         struct sk_buff *skb;
4480         unsigned long flags;
4481
4482         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4483
4484         /* If an error occurred during request building, remove all HCI
4485          * commands queued on the HCI request queue.
4486          */
4487         if (req->err) {
4488                 skb_queue_purge(&req->cmd_q);
4489                 return req->err;
4490         }
4491
4492         /* Do not allow empty requests */
4493         if (skb_queue_empty(&req->cmd_q))
4494                 return -ENODATA;
4495
4496         skb = skb_peek_tail(&req->cmd_q);
4497         bt_cb(skb)->req.complete = complete;
4498
4499         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4500         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4501         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4502
4503         queue_work(hdev->workqueue, &hdev->cmd_work);
4504
4505         return 0;
4506 }
4507
4508 bool hci_req_pending(struct hci_dev *hdev)
4509 {
4510         return (hdev->req_status == HCI_REQ_PEND);
4511 }
4512
4513 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4514                                        u32 plen, const void *param)
4515 {
4516         int len = HCI_COMMAND_HDR_SIZE + plen;
4517         struct hci_command_hdr *hdr;
4518         struct sk_buff *skb;
4519
4520         skb = bt_skb_alloc(len, GFP_ATOMIC);
4521         if (!skb)
4522                 return NULL;
4523
4524         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4525         hdr->opcode = cpu_to_le16(opcode);
4526         hdr->plen   = plen;
4527
4528         if (plen)
4529                 memcpy(skb_put(skb, plen), param, plen);
4530
4531         BT_DBG("skb len %d", skb->len);
4532
4533         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4534         bt_cb(skb)->opcode = opcode;
4535
4536         return skb;
4537 }
4538
4539 /* Send HCI command */
4540 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4541                  const void *param)
4542 {
4543         struct sk_buff *skb;
4544
4545         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4546
4547         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4548         if (!skb) {
4549                 BT_ERR("%s no memory for command", hdev->name);
4550                 return -ENOMEM;
4551         }
4552
4553         /* Stand-alone HCI commands must be flagged as
4554          * single-command requests.
4555          */
4556         bt_cb(skb)->req.start = true;
4557
4558         skb_queue_tail(&hdev->cmd_q, skb);
4559         queue_work(hdev->workqueue, &hdev->cmd_work);
4560
4561         return 0;
4562 }
4563
4564 /* Queue a command to an asynchronous HCI request */
4565 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4566                     const void *param, u8 event)
4567 {
4568         struct hci_dev *hdev = req->hdev;
4569         struct sk_buff *skb;
4570
4571         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4572
4573         /* If an error occurred during request building, there is no point in
4574          * queueing the HCI command. We can simply return.
4575          */
4576         if (req->err)
4577                 return;
4578
4579         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4580         if (!skb) {
4581                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4582                        hdev->name, opcode);
4583                 req->err = -ENOMEM;
4584                 return;
4585         }
4586
4587         if (skb_queue_empty(&req->cmd_q))
4588                 bt_cb(skb)->req.start = true;
4589
4590         bt_cb(skb)->req.event = event;
4591
4592         skb_queue_tail(&req->cmd_q, skb);
4593 }
4594
4595 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4596                  const void *param)
4597 {
4598         hci_req_add_ev(req, opcode, plen, param, 0);
4599 }
4600
4601 /* Get data from the previously sent command */
4602 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4603 {
4604         struct hci_command_hdr *hdr;
4605
4606         if (!hdev->sent_cmd)
4607                 return NULL;
4608
4609         hdr = (void *) hdev->sent_cmd->data;
4610
4611         if (hdr->opcode != cpu_to_le16(opcode))
4612                 return NULL;
4613
4614         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4615
4616         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4617 }
4618
4619 /* Send ACL data */
4620 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4621 {
4622         struct hci_acl_hdr *hdr;
4623         int len = skb->len;
4624
4625         skb_push(skb, HCI_ACL_HDR_SIZE);
4626         skb_reset_transport_header(skb);
4627         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4628         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4629         hdr->dlen   = cpu_to_le16(len);
4630 }
4631
4632 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4633                           struct sk_buff *skb, __u16 flags)
4634 {
4635         struct hci_conn *conn = chan->conn;
4636         struct hci_dev *hdev = conn->hdev;
4637         struct sk_buff *list;
4638
4639         skb->len = skb_headlen(skb);
4640         skb->data_len = 0;
4641
4642         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4643
4644         switch (hdev->dev_type) {
4645         case HCI_BREDR:
4646                 hci_add_acl_hdr(skb, conn->handle, flags);
4647                 break;
4648         case HCI_AMP:
4649                 hci_add_acl_hdr(skb, chan->handle, flags);
4650                 break;
4651         default:
4652                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4653                 return;
4654         }
4655
4656         list = skb_shinfo(skb)->frag_list;
4657         if (!list) {
4658                 /* Non fragmented */
4659                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4660
4661                 skb_queue_tail(queue, skb);
4662         } else {
4663                 /* Fragmented */
4664                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4665
4666                 skb_shinfo(skb)->frag_list = NULL;
4667
4668                 /* Queue all fragments atomically. We need to use spin_lock_bh
4669                  * here because of 6LoWPAN links, as there this function is
4670                  * called from softirq and using normal spin lock could cause
4671                  * deadlocks.
4672                  */
4673                 spin_lock_bh(&queue->lock);
4674
4675                 __skb_queue_tail(queue, skb);
4676
4677                 flags &= ~ACL_START;
4678                 flags |= ACL_CONT;
4679                 do {
4680                         skb = list; list = list->next;
4681
4682                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4683                         hci_add_acl_hdr(skb, conn->handle, flags);
4684
4685                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4686
4687                         __skb_queue_tail(queue, skb);
4688                 } while (list);
4689
4690                 spin_unlock_bh(&queue->lock);
4691         }
4692 }
4693
4694 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4695 {
4696         struct hci_dev *hdev = chan->conn->hdev;
4697
4698         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4699
4700         hci_queue_acl(chan, &chan->data_q, skb, flags);
4701
4702         queue_work(hdev->workqueue, &hdev->tx_work);
4703 }
4704
4705 /* Send SCO data */
4706 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4707 {
4708         struct hci_dev *hdev = conn->hdev;
4709         struct hci_sco_hdr hdr;
4710
4711         BT_DBG("%s len %d", hdev->name, skb->len);
4712
4713         hdr.handle = cpu_to_le16(conn->handle);
4714         hdr.dlen   = skb->len;
4715
4716         skb_push(skb, HCI_SCO_HDR_SIZE);
4717         skb_reset_transport_header(skb);
4718         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4719
4720         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4721
4722         skb_queue_tail(&conn->data_q, skb);
4723         queue_work(hdev->workqueue, &hdev->tx_work);
4724 }
4725
4726 /* ---- HCI TX task (outgoing data) ---- */
4727
4728 /* HCI Connection scheduler */
4729 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4730                                      int *quote)
4731 {
4732         struct hci_conn_hash *h = &hdev->conn_hash;
4733         struct hci_conn *conn = NULL, *c;
4734         unsigned int num = 0, min = ~0;
4735
4736         /* We don't have to lock device here. Connections are always
4737          * added and removed with TX task disabled. */
4738
4739         rcu_read_lock();
4740
4741         list_for_each_entry_rcu(c, &h->list, list) {
4742                 if (c->type != type || skb_queue_empty(&c->data_q))
4743                         continue;
4744
4745                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4746                         continue;
4747
4748                 num++;
4749
4750                 if (c->sent < min) {
4751                         min  = c->sent;
4752                         conn = c;
4753                 }
4754
4755                 if (hci_conn_num(hdev, type) == num)
4756                         break;
4757         }
4758
4759         rcu_read_unlock();
4760
4761         if (conn) {
4762                 int cnt, q;
4763
4764                 switch (conn->type) {
4765                 case ACL_LINK:
4766                         cnt = hdev->acl_cnt;
4767                         break;
4768                 case SCO_LINK:
4769                 case ESCO_LINK:
4770                         cnt = hdev->sco_cnt;
4771                         break;
4772                 case LE_LINK:
4773                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4774                         break;
4775                 default:
4776                         cnt = 0;
4777                         BT_ERR("Unknown link type");
4778                 }
4779
4780                 q = cnt / num;
4781                 *quote = q ? q : 1;
4782         } else
4783                 *quote = 0;
4784
4785         BT_DBG("conn %p quote %d", conn, *quote);
4786         return conn;
4787 }
4788
4789 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4790 {
4791         struct hci_conn_hash *h = &hdev->conn_hash;
4792         struct hci_conn *c;
4793
4794         BT_ERR("%s link tx timeout", hdev->name);
4795
4796         rcu_read_lock();
4797
4798         /* Kill stalled connections */
4799         list_for_each_entry_rcu(c, &h->list, list) {
4800                 if (c->type == type && c->sent) {
4801                         BT_ERR("%s killing stalled connection %pMR",
4802                                hdev->name, &c->dst);
4803                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4804                 }
4805         }
4806
4807         rcu_read_unlock();
4808 }
4809
4810 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4811                                       int *quote)
4812 {
4813         struct hci_conn_hash *h = &hdev->conn_hash;
4814         struct hci_chan *chan = NULL;
4815         unsigned int num = 0, min = ~0, cur_prio = 0;
4816         struct hci_conn *conn;
4817         int cnt, q, conn_num = 0;
4818
4819         BT_DBG("%s", hdev->name);
4820
4821         rcu_read_lock();
4822
4823         list_for_each_entry_rcu(conn, &h->list, list) {
4824                 struct hci_chan *tmp;
4825
4826                 if (conn->type != type)
4827                         continue;
4828
4829                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4830                         continue;
4831
4832                 conn_num++;
4833
4834                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4835                         struct sk_buff *skb;
4836
4837                         if (skb_queue_empty(&tmp->data_q))
4838                                 continue;
4839
4840                         skb = skb_peek(&tmp->data_q);
4841                         if (skb->priority < cur_prio)
4842                                 continue;
4843
4844                         if (skb->priority > cur_prio) {
4845                                 num = 0;
4846                                 min = ~0;
4847                                 cur_prio = skb->priority;
4848                         }
4849
4850                         num++;
4851
4852                         if (conn->sent < min) {
4853                                 min  = conn->sent;
4854                                 chan = tmp;
4855                         }
4856                 }
4857
4858                 if (hci_conn_num(hdev, type) == conn_num)
4859                         break;
4860         }
4861
4862         rcu_read_unlock();
4863
4864         if (!chan)
4865                 return NULL;
4866
4867         switch (chan->conn->type) {
4868         case ACL_LINK:
4869                 cnt = hdev->acl_cnt;
4870                 break;
4871         case AMP_LINK:
4872                 cnt = hdev->block_cnt;
4873                 break;
4874         case SCO_LINK:
4875         case ESCO_LINK:
4876                 cnt = hdev->sco_cnt;
4877                 break;
4878         case LE_LINK:
4879                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4880                 break;
4881         default:
4882                 cnt = 0;
4883                 BT_ERR("Unknown link type");
4884         }
4885
4886         q = cnt / num;
4887         *quote = q ? q : 1;
4888         BT_DBG("chan %p quote %d", chan, *quote);
4889         return chan;
4890 }
4891
4892 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4893 {
4894         struct hci_conn_hash *h = &hdev->conn_hash;
4895         struct hci_conn *conn;
4896         int num = 0;
4897
4898         BT_DBG("%s", hdev->name);
4899
4900         rcu_read_lock();
4901
4902         list_for_each_entry_rcu(conn, &h->list, list) {
4903                 struct hci_chan *chan;
4904
4905                 if (conn->type != type)
4906                         continue;
4907
4908                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4909                         continue;
4910
4911                 num++;
4912
4913                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4914                         struct sk_buff *skb;
4915
4916                         if (chan->sent) {
4917                                 chan->sent = 0;
4918                                 continue;
4919                         }
4920
4921                         if (skb_queue_empty(&chan->data_q))
4922                                 continue;
4923
4924                         skb = skb_peek(&chan->data_q);
4925                         if (skb->priority >= HCI_PRIO_MAX - 1)
4926                                 continue;
4927
4928                         skb->priority = HCI_PRIO_MAX - 1;
4929
4930                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4931                                skb->priority);
4932                 }
4933
4934                 if (hci_conn_num(hdev, type) == num)
4935                         break;
4936         }
4937
4938         rcu_read_unlock();
4939
4940 }
4941
4942 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4943 {
4944         /* Calculate count of blocks used by this packet */
4945         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4946 }
4947
4948 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4949 {
4950         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4951                 /* ACL tx timeout must be longer than maximum
4952                  * link supervision timeout (40.9 seconds) */
4953                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4954                                        HCI_ACL_TX_TIMEOUT))
4955                         hci_link_tx_to(hdev, ACL_LINK);
4956         }
4957 }
4958
4959 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4960 {
4961         unsigned int cnt = hdev->acl_cnt;
4962         struct hci_chan *chan;
4963         struct sk_buff *skb;
4964         int quote;
4965
4966         __check_timeout(hdev, cnt);
4967
4968         while (hdev->acl_cnt &&
4969                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4970                 u32 priority = (skb_peek(&chan->data_q))->priority;
4971                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4972                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4973                                skb->len, skb->priority);
4974
4975                         /* Stop if priority has changed */
4976                         if (skb->priority < priority)
4977                                 break;
4978
4979                         skb = skb_dequeue(&chan->data_q);
4980
4981                         hci_conn_enter_active_mode(chan->conn,
4982                                                    bt_cb(skb)->force_active);
4983
4984                         hci_send_frame(hdev, skb);
4985                         hdev->acl_last_tx = jiffies;
4986
4987                         hdev->acl_cnt--;
4988                         chan->sent++;
4989                         chan->conn->sent++;
4990                 }
4991         }
4992
4993         if (cnt != hdev->acl_cnt)
4994                 hci_prio_recalculate(hdev, ACL_LINK);
4995 }
4996
4997 static void hci_sched_acl_blk(struct hci_dev *hdev)
4998 {
4999         unsigned int cnt = hdev->block_cnt;
5000         struct hci_chan *chan;
5001         struct sk_buff *skb;
5002         int quote;
5003         u8 type;
5004
5005         __check_timeout(hdev, cnt);
5006
5007         BT_DBG("%s", hdev->name);
5008
5009         if (hdev->dev_type == HCI_AMP)
5010                 type = AMP_LINK;
5011         else
5012                 type = ACL_LINK;
5013
5014         while (hdev->block_cnt > 0 &&
5015                (chan = hci_chan_sent(hdev, type, &quote))) {
5016                 u32 priority = (skb_peek(&chan->data_q))->priority;
5017                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5018                         int blocks;
5019
5020                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5021                                skb->len, skb->priority);
5022
5023                         /* Stop if priority has changed */
5024                         if (skb->priority < priority)
5025                                 break;
5026
5027                         skb = skb_dequeue(&chan->data_q);
5028
5029                         blocks = __get_blocks(hdev, skb);
5030                         if (blocks > hdev->block_cnt)
5031                                 return;
5032
5033                         hci_conn_enter_active_mode(chan->conn,
5034                                                    bt_cb(skb)->force_active);
5035
5036                         hci_send_frame(hdev, skb);
5037                         hdev->acl_last_tx = jiffies;
5038
5039                         hdev->block_cnt -= blocks;
5040                         quote -= blocks;
5041
5042                         chan->sent += blocks;
5043                         chan->conn->sent += blocks;
5044                 }
5045         }
5046
5047         if (cnt != hdev->block_cnt)
5048                 hci_prio_recalculate(hdev, type);
5049 }
5050
5051 static void hci_sched_acl(struct hci_dev *hdev)
5052 {
5053         BT_DBG("%s", hdev->name);
5054
5055         /* No ACL link over BR/EDR controller */
5056         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5057                 return;
5058
5059         /* No AMP link over AMP controller */
5060         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
5061                 return;
5062
5063         switch (hdev->flow_ctl_mode) {
5064         case HCI_FLOW_CTL_MODE_PACKET_BASED:
5065                 hci_sched_acl_pkt(hdev);
5066                 break;
5067
5068         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5069                 hci_sched_acl_blk(hdev);
5070                 break;
5071         }
5072 }
5073
5074 /* Schedule SCO */
5075 static void hci_sched_sco(struct hci_dev *hdev)
5076 {
5077         struct hci_conn *conn;
5078         struct sk_buff *skb;
5079         int quote;
5080
5081         BT_DBG("%s", hdev->name);
5082
5083         if (!hci_conn_num(hdev, SCO_LINK))
5084                 return;
5085
5086         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5087                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5088                         BT_DBG("skb %p len %d", skb, skb->len);
5089                         hci_send_frame(hdev, skb);
5090
5091                         conn->sent++;
5092                         if (conn->sent == ~0)
5093                                 conn->sent = 0;
5094                 }
5095         }
5096 }
5097
5098 static void hci_sched_esco(struct hci_dev *hdev)
5099 {
5100         struct hci_conn *conn;
5101         struct sk_buff *skb;
5102         int quote;
5103
5104         BT_DBG("%s", hdev->name);
5105
5106         if (!hci_conn_num(hdev, ESCO_LINK))
5107                 return;
5108
5109         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5110                                                      &quote))) {
5111                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5112                         BT_DBG("skb %p len %d", skb, skb->len);
5113                         hci_send_frame(hdev, skb);
5114
5115                         conn->sent++;
5116                         if (conn->sent == ~0)
5117                                 conn->sent = 0;
5118                 }
5119         }
5120 }
5121
5122 static void hci_sched_le(struct hci_dev *hdev)
5123 {
5124         struct hci_chan *chan;
5125         struct sk_buff *skb;
5126         int quote, cnt, tmp;
5127
5128         BT_DBG("%s", hdev->name);
5129
5130         if (!hci_conn_num(hdev, LE_LINK))
5131                 return;
5132
5133         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5134                 /* LE tx timeout must be longer than maximum
5135                  * link supervision timeout (40.9 seconds) */
5136                 if (!hdev->le_cnt && hdev->le_pkts &&
5137                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
5138                         hci_link_tx_to(hdev, LE_LINK);
5139         }
5140
5141         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5142         tmp = cnt;
5143         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
5144                 u32 priority = (skb_peek(&chan->data_q))->priority;
5145                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5146                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5147                                skb->len, skb->priority);
5148
5149                         /* Stop if priority has changed */
5150                         if (skb->priority < priority)
5151                                 break;
5152
5153                         skb = skb_dequeue(&chan->data_q);
5154
5155                         hci_send_frame(hdev, skb);
5156                         hdev->le_last_tx = jiffies;
5157
5158                         cnt--;
5159                         chan->sent++;
5160                         chan->conn->sent++;
5161                 }
5162         }
5163
5164         if (hdev->le_pkts)
5165                 hdev->le_cnt = cnt;
5166         else
5167                 hdev->acl_cnt = cnt;
5168
5169         if (cnt != tmp)
5170                 hci_prio_recalculate(hdev, LE_LINK);
5171 }
5172
5173 static void hci_tx_work(struct work_struct *work)
5174 {
5175         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5176         struct sk_buff *skb;
5177
5178         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5179                hdev->sco_cnt, hdev->le_cnt);
5180
5181         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5182                 /* Schedule queues and send stuff to HCI driver */
5183                 hci_sched_acl(hdev);
5184                 hci_sched_sco(hdev);
5185                 hci_sched_esco(hdev);
5186                 hci_sched_le(hdev);
5187         }
5188
5189         /* Send next queued raw (unknown type) packet */
5190         while ((skb = skb_dequeue(&hdev->raw_q)))
5191                 hci_send_frame(hdev, skb);
5192 }
5193
5194 /* ----- HCI RX task (incoming data processing) ----- */
5195
5196 /* ACL data packet */
5197 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5198 {
5199         struct hci_acl_hdr *hdr = (void *) skb->data;
5200         struct hci_conn *conn;
5201         __u16 handle, flags;
5202
5203         skb_pull(skb, HCI_ACL_HDR_SIZE);
5204
5205         handle = __le16_to_cpu(hdr->handle);
5206         flags  = hci_flags(handle);
5207         handle = hci_handle(handle);
5208
5209         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5210                handle, flags);
5211
5212         hdev->stat.acl_rx++;
5213
5214         hci_dev_lock(hdev);
5215         conn = hci_conn_hash_lookup_handle(hdev, handle);
5216         hci_dev_unlock(hdev);
5217
5218         if (conn) {
5219                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5220
5221                 /* Send to upper protocol */
5222                 l2cap_recv_acldata(conn, skb, flags);
5223                 return;
5224         } else {
5225                 BT_ERR("%s ACL packet for unknown connection handle %d",
5226                        hdev->name, handle);
5227         }
5228
5229         kfree_skb(skb);
5230 }
5231
5232 /* SCO data packet */
5233 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5234 {
5235         struct hci_sco_hdr *hdr = (void *) skb->data;
5236         struct hci_conn *conn;
5237         __u16 handle;
5238
5239         skb_pull(skb, HCI_SCO_HDR_SIZE);
5240
5241         handle = __le16_to_cpu(hdr->handle);
5242
5243         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5244
5245         hdev->stat.sco_rx++;
5246
5247         hci_dev_lock(hdev);
5248         conn = hci_conn_hash_lookup_handle(hdev, handle);
5249         hci_dev_unlock(hdev);
5250
5251         if (conn) {
5252                 /* Send to upper protocol */
5253                 sco_recv_scodata(conn, skb);
5254                 return;
5255         } else {
5256                 BT_ERR("%s SCO packet for unknown connection handle %d",
5257                        hdev->name, handle);
5258         }
5259
5260         kfree_skb(skb);
5261 }
5262
5263 static bool hci_req_is_complete(struct hci_dev *hdev)
5264 {
5265         struct sk_buff *skb;
5266
5267         skb = skb_peek(&hdev->cmd_q);
5268         if (!skb)
5269                 return true;
5270
5271         return bt_cb(skb)->req.start;
5272 }
5273
5274 static void hci_resend_last(struct hci_dev *hdev)
5275 {
5276         struct hci_command_hdr *sent;
5277         struct sk_buff *skb;
5278         u16 opcode;
5279
5280         if (!hdev->sent_cmd)
5281                 return;
5282
5283         sent = (void *) hdev->sent_cmd->data;
5284         opcode = __le16_to_cpu(sent->opcode);
5285         if (opcode == HCI_OP_RESET)
5286                 return;
5287
5288         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5289         if (!skb)
5290                 return;
5291
5292         skb_queue_head(&hdev->cmd_q, skb);
5293         queue_work(hdev->workqueue, &hdev->cmd_work);
5294 }
5295
5296 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5297 {
5298         hci_req_complete_t req_complete = NULL;
5299         struct sk_buff *skb;
5300         unsigned long flags;
5301
5302         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5303
5304         /* If the completed command doesn't match the last one that was
5305          * sent we need to do special handling of it.
5306          */
5307         if (!hci_sent_cmd_data(hdev, opcode)) {
5308                 /* Some CSR based controllers generate a spontaneous
5309                  * reset complete event during init and any pending
5310                  * command will never be completed. In such a case we
5311                  * need to resend whatever was the last sent
5312                  * command.
5313                  */
5314                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5315                         hci_resend_last(hdev);
5316
5317                 return;
5318         }
5319
5320         /* If the command succeeded and there's still more commands in
5321          * this request the request is not yet complete.
5322          */
5323         if (!status && !hci_req_is_complete(hdev))
5324                 return;
5325
5326         /* If this was the last command in a request the complete
5327          * callback would be found in hdev->sent_cmd instead of the
5328          * command queue (hdev->cmd_q).
5329          */
5330         if (hdev->sent_cmd) {
5331                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5332
5333                 if (req_complete) {
5334                         /* We must set the complete callback to NULL to
5335                          * avoid calling the callback more than once if
5336                          * this function gets called again.
5337                          */
5338                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
5339
5340                         goto call_complete;
5341                 }
5342         }
5343
5344         /* Remove all pending commands belonging to this request */
5345         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5346         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5347                 if (bt_cb(skb)->req.start) {
5348                         __skb_queue_head(&hdev->cmd_q, skb);
5349                         break;
5350                 }
5351
5352                 req_complete = bt_cb(skb)->req.complete;
5353                 kfree_skb(skb);
5354         }
5355         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5356
5357 call_complete:
5358         if (req_complete)
5359                 req_complete(hdev, status);
5360 }
5361
5362 static void hci_rx_work(struct work_struct *work)
5363 {
5364         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5365         struct sk_buff *skb;
5366
5367         BT_DBG("%s", hdev->name);
5368
5369         while ((skb = skb_dequeue(&hdev->rx_q))) {
5370                 /* Send copy to monitor */
5371                 hci_send_to_monitor(hdev, skb);
5372
5373                 if (atomic_read(&hdev->promisc)) {
5374                         /* Send copy to the sockets */
5375                         hci_send_to_sock(hdev, skb);
5376                 }
5377
5378                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5379                         kfree_skb(skb);
5380                         continue;
5381                 }
5382
5383                 if (test_bit(HCI_INIT, &hdev->flags)) {
5384                         /* Don't process data packets in this states. */
5385                         switch (bt_cb(skb)->pkt_type) {
5386                         case HCI_ACLDATA_PKT:
5387                         case HCI_SCODATA_PKT:
5388                                 kfree_skb(skb);
5389                                 continue;
5390                         }
5391                 }
5392
5393                 /* Process frame */
5394                 switch (bt_cb(skb)->pkt_type) {
5395                 case HCI_EVENT_PKT:
5396                         BT_DBG("%s Event packet", hdev->name);
5397                         hci_event_packet(hdev, skb);
5398                         break;
5399
5400                 case HCI_ACLDATA_PKT:
5401                         BT_DBG("%s ACL data packet", hdev->name);
5402                         hci_acldata_packet(hdev, skb);
5403                         break;
5404
5405                 case HCI_SCODATA_PKT:
5406                         BT_DBG("%s SCO data packet", hdev->name);
5407                         hci_scodata_packet(hdev, skb);
5408                         break;
5409
5410                 default:
5411                         kfree_skb(skb);
5412                         break;
5413                 }
5414         }
5415 }
5416
5417 static void hci_cmd_work(struct work_struct *work)
5418 {
5419         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5420         struct sk_buff *skb;
5421
5422         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5423                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5424
5425         /* Send queued commands */
5426         if (atomic_read(&hdev->cmd_cnt)) {
5427                 skb = skb_dequeue(&hdev->cmd_q);
5428                 if (!skb)
5429                         return;
5430
5431                 kfree_skb(hdev->sent_cmd);
5432
5433                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5434                 if (hdev->sent_cmd) {
5435                         atomic_dec(&hdev->cmd_cnt);
5436                         hci_send_frame(hdev, skb);
5437                         if (test_bit(HCI_RESET, &hdev->flags))
5438                                 cancel_delayed_work(&hdev->cmd_timer);
5439                         else
5440                                 schedule_delayed_work(&hdev->cmd_timer,
5441                                                       HCI_CMD_TIMEOUT);
5442                 } else {
5443                         skb_queue_head(&hdev->cmd_q, skb);
5444                         queue_work(hdev->workqueue, &hdev->cmd_work);
5445                 }
5446         }
5447 }
5448
5449 void hci_req_add_le_scan_disable(struct hci_request *req)
5450 {
5451         struct hci_cp_le_set_scan_enable cp;
5452
5453         memset(&cp, 0, sizeof(cp));
5454         cp.enable = LE_SCAN_DISABLE;
5455         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5456 }
5457
5458 static void add_to_white_list(struct hci_request *req,
5459                               struct hci_conn_params *params)
5460 {
5461         struct hci_cp_le_add_to_white_list cp;
5462
5463         cp.bdaddr_type = params->addr_type;
5464         bacpy(&cp.bdaddr, &params->addr);
5465
5466         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5467 }
5468
5469 static u8 update_white_list(struct hci_request *req)
5470 {
5471         struct hci_dev *hdev = req->hdev;
5472         struct hci_conn_params *params;
5473         struct bdaddr_list *b;
5474         uint8_t white_list_entries = 0;
5475
5476         /* Go through the current white list programmed into the
5477          * controller one by one and check if that address is still
5478          * in the list of pending connections or list of devices to
5479          * report. If not present in either list, then queue the
5480          * command to remove it from the controller.
5481          */
5482         list_for_each_entry(b, &hdev->le_white_list, list) {
5483                 struct hci_cp_le_del_from_white_list cp;
5484
5485                 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5486                                               &b->bdaddr, b->bdaddr_type) ||
5487                     hci_pend_le_action_lookup(&hdev->pend_le_reports,
5488                                               &b->bdaddr, b->bdaddr_type)) {
5489                         white_list_entries++;
5490                         continue;
5491                 }
5492
5493                 cp.bdaddr_type = b->bdaddr_type;
5494                 bacpy(&cp.bdaddr, &b->bdaddr);
5495
5496                 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5497                             sizeof(cp), &cp);
5498         }
5499
5500         /* Since all no longer valid white list entries have been
5501          * removed, walk through the list of pending connections
5502          * and ensure that any new device gets programmed into
5503          * the controller.
5504          *
5505          * If the list of the devices is larger than the list of
5506          * available white list entries in the controller, then
5507          * just abort and return filer policy value to not use the
5508          * white list.
5509          */
5510         list_for_each_entry(params, &hdev->pend_le_conns, action) {
5511                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5512                                            &params->addr, params->addr_type))
5513                         continue;
5514
5515                 if (white_list_entries >= hdev->le_white_list_size) {
5516                         /* Select filter policy to accept all advertising */
5517                         return 0x00;
5518                 }
5519
5520                 if (hci_find_irk_by_addr(hdev, &params->addr,
5521                                          params->addr_type)) {
5522                         /* White list can not be used with RPAs */
5523                         return 0x00;
5524                 }
5525
5526                 white_list_entries++;
5527                 add_to_white_list(req, params);
5528         }
5529
5530         /* After adding all new pending connections, walk through
5531          * the list of pending reports and also add these to the
5532          * white list if there is still space.
5533          */
5534         list_for_each_entry(params, &hdev->pend_le_reports, action) {
5535                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5536                                            &params->addr, params->addr_type))
5537                         continue;
5538
5539                 if (white_list_entries >= hdev->le_white_list_size) {
5540                         /* Select filter policy to accept all advertising */
5541                         return 0x00;
5542                 }
5543
5544                 if (hci_find_irk_by_addr(hdev, &params->addr,
5545                                          params->addr_type)) {
5546                         /* White list can not be used with RPAs */
5547                         return 0x00;
5548                 }
5549
5550                 white_list_entries++;
5551                 add_to_white_list(req, params);
5552         }
5553
5554         /* Select filter policy to use white list */
5555         return 0x01;
5556 }
5557
5558 void hci_req_add_le_passive_scan(struct hci_request *req)
5559 {
5560         struct hci_cp_le_set_scan_param param_cp;
5561         struct hci_cp_le_set_scan_enable enable_cp;
5562         struct hci_dev *hdev = req->hdev;
5563         u8 own_addr_type;
5564         u8 filter_policy;
5565
5566         /* Set require_privacy to false since no SCAN_REQ are send
5567          * during passive scanning. Not using an unresolvable address
5568          * here is important so that peer devices using direct
5569          * advertising with our address will be correctly reported
5570          * by the controller.
5571          */
5572         if (hci_update_random_address(req, false, &own_addr_type))
5573                 return;
5574
5575         /* Adding or removing entries from the white list must
5576          * happen before enabling scanning. The controller does
5577          * not allow white list modification while scanning.
5578          */
5579         filter_policy = update_white_list(req);
5580
5581         memset(&param_cp, 0, sizeof(param_cp));
5582         param_cp.type = LE_SCAN_PASSIVE;
5583         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5584         param_cp.window = cpu_to_le16(hdev->le_scan_window);
5585         param_cp.own_address_type = own_addr_type;
5586         param_cp.filter_policy = filter_policy;
5587         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5588                     &param_cp);
5589
5590         memset(&enable_cp, 0, sizeof(enable_cp));
5591         enable_cp.enable = LE_SCAN_ENABLE;
5592         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5593         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5594                     &enable_cp);
5595 }
5596
5597 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5598 {
5599         if (status)
5600                 BT_DBG("HCI request failed to update background scanning: "
5601                        "status 0x%2.2x", status);
5602 }
5603
5604 /* This function controls the background scanning based on hdev->pend_le_conns
5605  * list. If there are pending LE connection we start the background scanning,
5606  * otherwise we stop it.
5607  *
5608  * This function requires the caller holds hdev->lock.
5609  */
5610 void hci_update_background_scan(struct hci_dev *hdev)
5611 {
5612         struct hci_request req;
5613         struct hci_conn *conn;
5614         int err;
5615
5616         if (!test_bit(HCI_UP, &hdev->flags) ||
5617             test_bit(HCI_INIT, &hdev->flags) ||
5618             test_bit(HCI_SETUP, &hdev->dev_flags) ||
5619             test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5620             test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5621             test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5622                 return;
5623
5624         /* No point in doing scanning if LE support hasn't been enabled */
5625         if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5626                 return;
5627
5628         /* If discovery is active don't interfere with it */
5629         if (hdev->discovery.state != DISCOVERY_STOPPED)
5630                 return;
5631
5632         hci_req_init(&req, hdev);
5633
5634         if (list_empty(&hdev->pend_le_conns) &&
5635             list_empty(&hdev->pend_le_reports)) {
5636                 /* If there is no pending LE connections or devices
5637                  * to be scanned for, we should stop the background
5638                  * scanning.
5639                  */
5640
5641                 /* If controller is not scanning we are done. */
5642                 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5643                         return;
5644
5645                 hci_req_add_le_scan_disable(&req);
5646
5647                 BT_DBG("%s stopping background scanning", hdev->name);
5648         } else {
5649                 /* If there is at least one pending LE connection, we should
5650                  * keep the background scan running.
5651                  */
5652
5653                 /* If controller is connecting, we should not start scanning
5654                  * since some controllers are not able to scan and connect at
5655                  * the same time.
5656                  */
5657                 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5658                 if (conn)
5659                         return;
5660
5661                 /* If controller is currently scanning, we stop it to ensure we
5662                  * don't miss any advertising (due to duplicates filter).
5663                  */
5664                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5665                         hci_req_add_le_scan_disable(&req);
5666
5667                 hci_req_add_le_passive_scan(&req);
5668
5669                 BT_DBG("%s starting background scanning", hdev->name);
5670         }
5671
5672         err = hci_req_run(&req, update_background_scan_complete);
5673         if (err)
5674                 BT_ERR("Failed to run HCI request: err %d", err);
5675 }
5676
5677 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5678 {
5679         struct bdaddr_list *b;
5680
5681         list_for_each_entry(b, &hdev->whitelist, list) {
5682                 struct hci_conn *conn;
5683
5684                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5685                 if (!conn)
5686                         return true;
5687
5688                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5689                         return true;
5690         }
5691
5692         return false;
5693 }
5694
5695 void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5696 {
5697         u8 scan;
5698
5699         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5700                 return;
5701
5702         if (!hdev_is_powered(hdev))
5703                 return;
5704
5705         if (mgmt_powering_down(hdev))
5706                 return;
5707
5708         if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
5709             disconnected_whitelist_entries(hdev))
5710                 scan = SCAN_PAGE;
5711         else
5712                 scan = SCAN_DISABLED;
5713
5714         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5715                 return;
5716
5717         if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5718                 scan |= SCAN_INQUIRY;
5719
5720         if (req)
5721                 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5722         else
5723                 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5724 }