Bluetooth: Move hci_pend_le_conn_* functions to different location
[cascardo/linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38
39 #include "smp.h"
40
41 static void hci_rx_work(struct work_struct *work);
42 static void hci_cmd_work(struct work_struct *work);
43 static void hci_tx_work(struct work_struct *work);
44
45 /* HCI device list */
46 LIST_HEAD(hci_dev_list);
47 DEFINE_RWLOCK(hci_dev_list_lock);
48
49 /* HCI callback list */
50 LIST_HEAD(hci_cb_list);
51 DEFINE_RWLOCK(hci_cb_list_lock);
52
53 /* HCI ID Numbering */
54 static DEFINE_IDA(hci_index_ida);
55
56 /* ---- HCI notifications ---- */
57
58 static void hci_notify(struct hci_dev *hdev, int event)
59 {
60         hci_sock_dev_event(hdev, event);
61 }
62
63 /* ---- HCI debugfs entries ---- */
64
65 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66                              size_t count, loff_t *ppos)
67 {
68         struct hci_dev *hdev = file->private_data;
69         char buf[3];
70
71         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
72         buf[1] = '\n';
73         buf[2] = '\0';
74         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
75 }
76
77 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78                               size_t count, loff_t *ppos)
79 {
80         struct hci_dev *hdev = file->private_data;
81         struct sk_buff *skb;
82         char buf[32];
83         size_t buf_size = min(count, (sizeof(buf)-1));
84         bool enable;
85         int err;
86
87         if (!test_bit(HCI_UP, &hdev->flags))
88                 return -ENETDOWN;
89
90         if (copy_from_user(buf, user_buf, buf_size))
91                 return -EFAULT;
92
93         buf[buf_size] = '\0';
94         if (strtobool(buf, &enable))
95                 return -EINVAL;
96
97         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
98                 return -EALREADY;
99
100         hci_req_lock(hdev);
101         if (enable)
102                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
103                                      HCI_CMD_TIMEOUT);
104         else
105                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
106                                      HCI_CMD_TIMEOUT);
107         hci_req_unlock(hdev);
108
109         if (IS_ERR(skb))
110                 return PTR_ERR(skb);
111
112         err = -bt_to_errno(skb->data[0]);
113         kfree_skb(skb);
114
115         if (err < 0)
116                 return err;
117
118         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
119
120         return count;
121 }
122
123 static const struct file_operations dut_mode_fops = {
124         .open           = simple_open,
125         .read           = dut_mode_read,
126         .write          = dut_mode_write,
127         .llseek         = default_llseek,
128 };
129
130 static int features_show(struct seq_file *f, void *ptr)
131 {
132         struct hci_dev *hdev = f->private;
133         u8 p;
134
135         hci_dev_lock(hdev);
136         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
137                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
138                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
139                            hdev->features[p][0], hdev->features[p][1],
140                            hdev->features[p][2], hdev->features[p][3],
141                            hdev->features[p][4], hdev->features[p][5],
142                            hdev->features[p][6], hdev->features[p][7]);
143         }
144         if (lmp_le_capable(hdev))
145                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
146                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
147                            hdev->le_features[0], hdev->le_features[1],
148                            hdev->le_features[2], hdev->le_features[3],
149                            hdev->le_features[4], hdev->le_features[5],
150                            hdev->le_features[6], hdev->le_features[7]);
151         hci_dev_unlock(hdev);
152
153         return 0;
154 }
155
156 static int features_open(struct inode *inode, struct file *file)
157 {
158         return single_open(file, features_show, inode->i_private);
159 }
160
161 static const struct file_operations features_fops = {
162         .open           = features_open,
163         .read           = seq_read,
164         .llseek         = seq_lseek,
165         .release        = single_release,
166 };
167
168 static int blacklist_show(struct seq_file *f, void *p)
169 {
170         struct hci_dev *hdev = f->private;
171         struct bdaddr_list *b;
172
173         hci_dev_lock(hdev);
174         list_for_each_entry(b, &hdev->blacklist, list)
175                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
176         hci_dev_unlock(hdev);
177
178         return 0;
179 }
180
181 static int blacklist_open(struct inode *inode, struct file *file)
182 {
183         return single_open(file, blacklist_show, inode->i_private);
184 }
185
186 static const struct file_operations blacklist_fops = {
187         .open           = blacklist_open,
188         .read           = seq_read,
189         .llseek         = seq_lseek,
190         .release        = single_release,
191 };
192
193 static int uuids_show(struct seq_file *f, void *p)
194 {
195         struct hci_dev *hdev = f->private;
196         struct bt_uuid *uuid;
197
198         hci_dev_lock(hdev);
199         list_for_each_entry(uuid, &hdev->uuids, list) {
200                 u8 i, val[16];
201
202                 /* The Bluetooth UUID values are stored in big endian,
203                  * but with reversed byte order. So convert them into
204                  * the right order for the %pUb modifier.
205                  */
206                 for (i = 0; i < 16; i++)
207                         val[i] = uuid->uuid[15 - i];
208
209                 seq_printf(f, "%pUb\n", val);
210         }
211         hci_dev_unlock(hdev);
212
213         return 0;
214 }
215
216 static int uuids_open(struct inode *inode, struct file *file)
217 {
218         return single_open(file, uuids_show, inode->i_private);
219 }
220
221 static const struct file_operations uuids_fops = {
222         .open           = uuids_open,
223         .read           = seq_read,
224         .llseek         = seq_lseek,
225         .release        = single_release,
226 };
227
228 static int inquiry_cache_show(struct seq_file *f, void *p)
229 {
230         struct hci_dev *hdev = f->private;
231         struct discovery_state *cache = &hdev->discovery;
232         struct inquiry_entry *e;
233
234         hci_dev_lock(hdev);
235
236         list_for_each_entry(e, &cache->all, all) {
237                 struct inquiry_data *data = &e->data;
238                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
239                            &data->bdaddr,
240                            data->pscan_rep_mode, data->pscan_period_mode,
241                            data->pscan_mode, data->dev_class[2],
242                            data->dev_class[1], data->dev_class[0],
243                            __le16_to_cpu(data->clock_offset),
244                            data->rssi, data->ssp_mode, e->timestamp);
245         }
246
247         hci_dev_unlock(hdev);
248
249         return 0;
250 }
251
252 static int inquiry_cache_open(struct inode *inode, struct file *file)
253 {
254         return single_open(file, inquiry_cache_show, inode->i_private);
255 }
256
257 static const struct file_operations inquiry_cache_fops = {
258         .open           = inquiry_cache_open,
259         .read           = seq_read,
260         .llseek         = seq_lseek,
261         .release        = single_release,
262 };
263
264 static int link_keys_show(struct seq_file *f, void *ptr)
265 {
266         struct hci_dev *hdev = f->private;
267         struct list_head *p, *n;
268
269         hci_dev_lock(hdev);
270         list_for_each_safe(p, n, &hdev->link_keys) {
271                 struct link_key *key = list_entry(p, struct link_key, list);
272                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
273                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
274         }
275         hci_dev_unlock(hdev);
276
277         return 0;
278 }
279
280 static int link_keys_open(struct inode *inode, struct file *file)
281 {
282         return single_open(file, link_keys_show, inode->i_private);
283 }
284
285 static const struct file_operations link_keys_fops = {
286         .open           = link_keys_open,
287         .read           = seq_read,
288         .llseek         = seq_lseek,
289         .release        = single_release,
290 };
291
292 static int dev_class_show(struct seq_file *f, void *ptr)
293 {
294         struct hci_dev *hdev = f->private;
295
296         hci_dev_lock(hdev);
297         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
298                    hdev->dev_class[1], hdev->dev_class[0]);
299         hci_dev_unlock(hdev);
300
301         return 0;
302 }
303
304 static int dev_class_open(struct inode *inode, struct file *file)
305 {
306         return single_open(file, dev_class_show, inode->i_private);
307 }
308
309 static const struct file_operations dev_class_fops = {
310         .open           = dev_class_open,
311         .read           = seq_read,
312         .llseek         = seq_lseek,
313         .release        = single_release,
314 };
315
316 static int voice_setting_get(void *data, u64 *val)
317 {
318         struct hci_dev *hdev = data;
319
320         hci_dev_lock(hdev);
321         *val = hdev->voice_setting;
322         hci_dev_unlock(hdev);
323
324         return 0;
325 }
326
327 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
328                         NULL, "0x%4.4llx\n");
329
330 static int auto_accept_delay_set(void *data, u64 val)
331 {
332         struct hci_dev *hdev = data;
333
334         hci_dev_lock(hdev);
335         hdev->auto_accept_delay = val;
336         hci_dev_unlock(hdev);
337
338         return 0;
339 }
340
341 static int auto_accept_delay_get(void *data, u64 *val)
342 {
343         struct hci_dev *hdev = data;
344
345         hci_dev_lock(hdev);
346         *val = hdev->auto_accept_delay;
347         hci_dev_unlock(hdev);
348
349         return 0;
350 }
351
352 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
353                         auto_accept_delay_set, "%llu\n");
354
355 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
356                                      size_t count, loff_t *ppos)
357 {
358         struct hci_dev *hdev = file->private_data;
359         char buf[3];
360
361         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
362         buf[1] = '\n';
363         buf[2] = '\0';
364         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
365 }
366
367 static ssize_t force_sc_support_write(struct file *file,
368                                       const char __user *user_buf,
369                                       size_t count, loff_t *ppos)
370 {
371         struct hci_dev *hdev = file->private_data;
372         char buf[32];
373         size_t buf_size = min(count, (sizeof(buf)-1));
374         bool enable;
375
376         if (test_bit(HCI_UP, &hdev->flags))
377                 return -EBUSY;
378
379         if (copy_from_user(buf, user_buf, buf_size))
380                 return -EFAULT;
381
382         buf[buf_size] = '\0';
383         if (strtobool(buf, &enable))
384                 return -EINVAL;
385
386         if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
387                 return -EALREADY;
388
389         change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
390
391         return count;
392 }
393
394 static const struct file_operations force_sc_support_fops = {
395         .open           = simple_open,
396         .read           = force_sc_support_read,
397         .write          = force_sc_support_write,
398         .llseek         = default_llseek,
399 };
400
401 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
402                                  size_t count, loff_t *ppos)
403 {
404         struct hci_dev *hdev = file->private_data;
405         char buf[3];
406
407         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
408         buf[1] = '\n';
409         buf[2] = '\0';
410         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
411 }
412
413 static const struct file_operations sc_only_mode_fops = {
414         .open           = simple_open,
415         .read           = sc_only_mode_read,
416         .llseek         = default_llseek,
417 };
418
419 static int idle_timeout_set(void *data, u64 val)
420 {
421         struct hci_dev *hdev = data;
422
423         if (val != 0 && (val < 500 || val > 3600000))
424                 return -EINVAL;
425
426         hci_dev_lock(hdev);
427         hdev->idle_timeout = val;
428         hci_dev_unlock(hdev);
429
430         return 0;
431 }
432
433 static int idle_timeout_get(void *data, u64 *val)
434 {
435         struct hci_dev *hdev = data;
436
437         hci_dev_lock(hdev);
438         *val = hdev->idle_timeout;
439         hci_dev_unlock(hdev);
440
441         return 0;
442 }
443
444 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
445                         idle_timeout_set, "%llu\n");
446
447 static int rpa_timeout_set(void *data, u64 val)
448 {
449         struct hci_dev *hdev = data;
450
451         /* Require the RPA timeout to be at least 30 seconds and at most
452          * 24 hours.
453          */
454         if (val < 30 || val > (60 * 60 * 24))
455                 return -EINVAL;
456
457         hci_dev_lock(hdev);
458         hdev->rpa_timeout = val;
459         hci_dev_unlock(hdev);
460
461         return 0;
462 }
463
464 static int rpa_timeout_get(void *data, u64 *val)
465 {
466         struct hci_dev *hdev = data;
467
468         hci_dev_lock(hdev);
469         *val = hdev->rpa_timeout;
470         hci_dev_unlock(hdev);
471
472         return 0;
473 }
474
475 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
476                         rpa_timeout_set, "%llu\n");
477
478 static int sniff_min_interval_set(void *data, u64 val)
479 {
480         struct hci_dev *hdev = data;
481
482         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
483                 return -EINVAL;
484
485         hci_dev_lock(hdev);
486         hdev->sniff_min_interval = val;
487         hci_dev_unlock(hdev);
488
489         return 0;
490 }
491
492 static int sniff_min_interval_get(void *data, u64 *val)
493 {
494         struct hci_dev *hdev = data;
495
496         hci_dev_lock(hdev);
497         *val = hdev->sniff_min_interval;
498         hci_dev_unlock(hdev);
499
500         return 0;
501 }
502
503 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
504                         sniff_min_interval_set, "%llu\n");
505
506 static int sniff_max_interval_set(void *data, u64 val)
507 {
508         struct hci_dev *hdev = data;
509
510         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
511                 return -EINVAL;
512
513         hci_dev_lock(hdev);
514         hdev->sniff_max_interval = val;
515         hci_dev_unlock(hdev);
516
517         return 0;
518 }
519
520 static int sniff_max_interval_get(void *data, u64 *val)
521 {
522         struct hci_dev *hdev = data;
523
524         hci_dev_lock(hdev);
525         *val = hdev->sniff_max_interval;
526         hci_dev_unlock(hdev);
527
528         return 0;
529 }
530
531 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
532                         sniff_max_interval_set, "%llu\n");
533
534 static int conn_info_min_age_set(void *data, u64 val)
535 {
536         struct hci_dev *hdev = data;
537
538         if (val == 0 || val > hdev->conn_info_max_age)
539                 return -EINVAL;
540
541         hci_dev_lock(hdev);
542         hdev->conn_info_min_age = val;
543         hci_dev_unlock(hdev);
544
545         return 0;
546 }
547
548 static int conn_info_min_age_get(void *data, u64 *val)
549 {
550         struct hci_dev *hdev = data;
551
552         hci_dev_lock(hdev);
553         *val = hdev->conn_info_min_age;
554         hci_dev_unlock(hdev);
555
556         return 0;
557 }
558
559 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
560                         conn_info_min_age_set, "%llu\n");
561
562 static int conn_info_max_age_set(void *data, u64 val)
563 {
564         struct hci_dev *hdev = data;
565
566         if (val == 0 || val < hdev->conn_info_min_age)
567                 return -EINVAL;
568
569         hci_dev_lock(hdev);
570         hdev->conn_info_max_age = val;
571         hci_dev_unlock(hdev);
572
573         return 0;
574 }
575
576 static int conn_info_max_age_get(void *data, u64 *val)
577 {
578         struct hci_dev *hdev = data;
579
580         hci_dev_lock(hdev);
581         *val = hdev->conn_info_max_age;
582         hci_dev_unlock(hdev);
583
584         return 0;
585 }
586
587 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
588                         conn_info_max_age_set, "%llu\n");
589
590 static int identity_show(struct seq_file *f, void *p)
591 {
592         struct hci_dev *hdev = f->private;
593         bdaddr_t addr;
594         u8 addr_type;
595
596         hci_dev_lock(hdev);
597
598         hci_copy_identity_address(hdev, &addr, &addr_type);
599
600         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
601                    16, hdev->irk, &hdev->rpa);
602
603         hci_dev_unlock(hdev);
604
605         return 0;
606 }
607
608 static int identity_open(struct inode *inode, struct file *file)
609 {
610         return single_open(file, identity_show, inode->i_private);
611 }
612
613 static const struct file_operations identity_fops = {
614         .open           = identity_open,
615         .read           = seq_read,
616         .llseek         = seq_lseek,
617         .release        = single_release,
618 };
619
620 static int random_address_show(struct seq_file *f, void *p)
621 {
622         struct hci_dev *hdev = f->private;
623
624         hci_dev_lock(hdev);
625         seq_printf(f, "%pMR\n", &hdev->random_addr);
626         hci_dev_unlock(hdev);
627
628         return 0;
629 }
630
631 static int random_address_open(struct inode *inode, struct file *file)
632 {
633         return single_open(file, random_address_show, inode->i_private);
634 }
635
636 static const struct file_operations random_address_fops = {
637         .open           = random_address_open,
638         .read           = seq_read,
639         .llseek         = seq_lseek,
640         .release        = single_release,
641 };
642
643 static int static_address_show(struct seq_file *f, void *p)
644 {
645         struct hci_dev *hdev = f->private;
646
647         hci_dev_lock(hdev);
648         seq_printf(f, "%pMR\n", &hdev->static_addr);
649         hci_dev_unlock(hdev);
650
651         return 0;
652 }
653
654 static int static_address_open(struct inode *inode, struct file *file)
655 {
656         return single_open(file, static_address_show, inode->i_private);
657 }
658
659 static const struct file_operations static_address_fops = {
660         .open           = static_address_open,
661         .read           = seq_read,
662         .llseek         = seq_lseek,
663         .release        = single_release,
664 };
665
666 static ssize_t force_static_address_read(struct file *file,
667                                          char __user *user_buf,
668                                          size_t count, loff_t *ppos)
669 {
670         struct hci_dev *hdev = file->private_data;
671         char buf[3];
672
673         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
674         buf[1] = '\n';
675         buf[2] = '\0';
676         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
677 }
678
679 static ssize_t force_static_address_write(struct file *file,
680                                           const char __user *user_buf,
681                                           size_t count, loff_t *ppos)
682 {
683         struct hci_dev *hdev = file->private_data;
684         char buf[32];
685         size_t buf_size = min(count, (sizeof(buf)-1));
686         bool enable;
687
688         if (test_bit(HCI_UP, &hdev->flags))
689                 return -EBUSY;
690
691         if (copy_from_user(buf, user_buf, buf_size))
692                 return -EFAULT;
693
694         buf[buf_size] = '\0';
695         if (strtobool(buf, &enable))
696                 return -EINVAL;
697
698         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
699                 return -EALREADY;
700
701         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
702
703         return count;
704 }
705
706 static const struct file_operations force_static_address_fops = {
707         .open           = simple_open,
708         .read           = force_static_address_read,
709         .write          = force_static_address_write,
710         .llseek         = default_llseek,
711 };
712
713 static int white_list_show(struct seq_file *f, void *ptr)
714 {
715         struct hci_dev *hdev = f->private;
716         struct bdaddr_list *b;
717
718         hci_dev_lock(hdev);
719         list_for_each_entry(b, &hdev->le_white_list, list)
720                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
721         hci_dev_unlock(hdev);
722
723         return 0;
724 }
725
726 static int white_list_open(struct inode *inode, struct file *file)
727 {
728         return single_open(file, white_list_show, inode->i_private);
729 }
730
731 static const struct file_operations white_list_fops = {
732         .open           = white_list_open,
733         .read           = seq_read,
734         .llseek         = seq_lseek,
735         .release        = single_release,
736 };
737
738 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
739 {
740         struct hci_dev *hdev = f->private;
741         struct list_head *p, *n;
742
743         hci_dev_lock(hdev);
744         list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
745                 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
746                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
747                            &irk->bdaddr, irk->addr_type,
748                            16, irk->val, &irk->rpa);
749         }
750         hci_dev_unlock(hdev);
751
752         return 0;
753 }
754
755 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
756 {
757         return single_open(file, identity_resolving_keys_show,
758                            inode->i_private);
759 }
760
761 static const struct file_operations identity_resolving_keys_fops = {
762         .open           = identity_resolving_keys_open,
763         .read           = seq_read,
764         .llseek         = seq_lseek,
765         .release        = single_release,
766 };
767
768 static int long_term_keys_show(struct seq_file *f, void *ptr)
769 {
770         struct hci_dev *hdev = f->private;
771         struct list_head *p, *n;
772
773         hci_dev_lock(hdev);
774         list_for_each_safe(p, n, &hdev->long_term_keys) {
775                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
776                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
777                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
778                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
779                            __le64_to_cpu(ltk->rand), 16, ltk->val);
780         }
781         hci_dev_unlock(hdev);
782
783         return 0;
784 }
785
786 static int long_term_keys_open(struct inode *inode, struct file *file)
787 {
788         return single_open(file, long_term_keys_show, inode->i_private);
789 }
790
791 static const struct file_operations long_term_keys_fops = {
792         .open           = long_term_keys_open,
793         .read           = seq_read,
794         .llseek         = seq_lseek,
795         .release        = single_release,
796 };
797
798 static int conn_min_interval_set(void *data, u64 val)
799 {
800         struct hci_dev *hdev = data;
801
802         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
803                 return -EINVAL;
804
805         hci_dev_lock(hdev);
806         hdev->le_conn_min_interval = val;
807         hci_dev_unlock(hdev);
808
809         return 0;
810 }
811
812 static int conn_min_interval_get(void *data, u64 *val)
813 {
814         struct hci_dev *hdev = data;
815
816         hci_dev_lock(hdev);
817         *val = hdev->le_conn_min_interval;
818         hci_dev_unlock(hdev);
819
820         return 0;
821 }
822
823 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
824                         conn_min_interval_set, "%llu\n");
825
826 static int conn_max_interval_set(void *data, u64 val)
827 {
828         struct hci_dev *hdev = data;
829
830         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
831                 return -EINVAL;
832
833         hci_dev_lock(hdev);
834         hdev->le_conn_max_interval = val;
835         hci_dev_unlock(hdev);
836
837         return 0;
838 }
839
840 static int conn_max_interval_get(void *data, u64 *val)
841 {
842         struct hci_dev *hdev = data;
843
844         hci_dev_lock(hdev);
845         *val = hdev->le_conn_max_interval;
846         hci_dev_unlock(hdev);
847
848         return 0;
849 }
850
851 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
852                         conn_max_interval_set, "%llu\n");
853
854 static int adv_channel_map_set(void *data, u64 val)
855 {
856         struct hci_dev *hdev = data;
857
858         if (val < 0x01 || val > 0x07)
859                 return -EINVAL;
860
861         hci_dev_lock(hdev);
862         hdev->le_adv_channel_map = val;
863         hci_dev_unlock(hdev);
864
865         return 0;
866 }
867
868 static int adv_channel_map_get(void *data, u64 *val)
869 {
870         struct hci_dev *hdev = data;
871
872         hci_dev_lock(hdev);
873         *val = hdev->le_adv_channel_map;
874         hci_dev_unlock(hdev);
875
876         return 0;
877 }
878
879 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
880                         adv_channel_map_set, "%llu\n");
881
882 static int le_auto_conn_show(struct seq_file *sf, void *ptr)
883 {
884         struct hci_dev *hdev = sf->private;
885         struct hci_conn_params *p;
886
887         hci_dev_lock(hdev);
888
889         list_for_each_entry(p, &hdev->le_conn_params, list) {
890                 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
891                            p->auto_connect);
892         }
893
894         hci_dev_unlock(hdev);
895
896         return 0;
897 }
898
899 static int le_auto_conn_open(struct inode *inode, struct file *file)
900 {
901         return single_open(file, le_auto_conn_show, inode->i_private);
902 }
903
904 static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
905                                   size_t count, loff_t *offset)
906 {
907         struct seq_file *sf = file->private_data;
908         struct hci_dev *hdev = sf->private;
909         u8 auto_connect = 0;
910         bdaddr_t addr;
911         u8 addr_type;
912         char *buf;
913         int err = 0;
914         int n;
915
916         /* Don't allow partial write */
917         if (*offset != 0)
918                 return -EINVAL;
919
920         if (count < 3)
921                 return -EINVAL;
922
923         buf = memdup_user(data, count);
924         if (IS_ERR(buf))
925                 return PTR_ERR(buf);
926
927         if (memcmp(buf, "add", 3) == 0) {
928                 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
929                            &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
930                            &addr.b[1], &addr.b[0], &addr_type,
931                            &auto_connect);
932
933                 if (n < 7) {
934                         err = -EINVAL;
935                         goto done;
936                 }
937
938                 hci_dev_lock(hdev);
939                 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
940                                           hdev->le_conn_min_interval,
941                                           hdev->le_conn_max_interval);
942                 hci_dev_unlock(hdev);
943
944                 if (err)
945                         goto done;
946         } else if (memcmp(buf, "del", 3) == 0) {
947                 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
948                            &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
949                            &addr.b[1], &addr.b[0], &addr_type);
950
951                 if (n < 7) {
952                         err = -EINVAL;
953                         goto done;
954                 }
955
956                 hci_dev_lock(hdev);
957                 hci_conn_params_del(hdev, &addr, addr_type);
958                 hci_dev_unlock(hdev);
959         } else if (memcmp(buf, "clr", 3) == 0) {
960                 hci_dev_lock(hdev);
961                 hci_conn_params_clear(hdev);
962                 hci_pend_le_conns_clear(hdev);
963                 hci_update_background_scan(hdev);
964                 hci_dev_unlock(hdev);
965         } else {
966                 err = -EINVAL;
967         }
968
969 done:
970         kfree(buf);
971
972         if (err)
973                 return err;
974         else
975                 return count;
976 }
977
978 static const struct file_operations le_auto_conn_fops = {
979         .open           = le_auto_conn_open,
980         .read           = seq_read,
981         .write          = le_auto_conn_write,
982         .llseek         = seq_lseek,
983         .release        = single_release,
984 };
985
986 /* ---- HCI requests ---- */
987
988 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
989 {
990         BT_DBG("%s result 0x%2.2x", hdev->name, result);
991
992         if (hdev->req_status == HCI_REQ_PEND) {
993                 hdev->req_result = result;
994                 hdev->req_status = HCI_REQ_DONE;
995                 wake_up_interruptible(&hdev->req_wait_q);
996         }
997 }
998
999 static void hci_req_cancel(struct hci_dev *hdev, int err)
1000 {
1001         BT_DBG("%s err 0x%2.2x", hdev->name, err);
1002
1003         if (hdev->req_status == HCI_REQ_PEND) {
1004                 hdev->req_result = err;
1005                 hdev->req_status = HCI_REQ_CANCELED;
1006                 wake_up_interruptible(&hdev->req_wait_q);
1007         }
1008 }
1009
1010 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1011                                             u8 event)
1012 {
1013         struct hci_ev_cmd_complete *ev;
1014         struct hci_event_hdr *hdr;
1015         struct sk_buff *skb;
1016
1017         hci_dev_lock(hdev);
1018
1019         skb = hdev->recv_evt;
1020         hdev->recv_evt = NULL;
1021
1022         hci_dev_unlock(hdev);
1023
1024         if (!skb)
1025                 return ERR_PTR(-ENODATA);
1026
1027         if (skb->len < sizeof(*hdr)) {
1028                 BT_ERR("Too short HCI event");
1029                 goto failed;
1030         }
1031
1032         hdr = (void *) skb->data;
1033         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1034
1035         if (event) {
1036                 if (hdr->evt != event)
1037                         goto failed;
1038                 return skb;
1039         }
1040
1041         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1042                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1043                 goto failed;
1044         }
1045
1046         if (skb->len < sizeof(*ev)) {
1047                 BT_ERR("Too short cmd_complete event");
1048                 goto failed;
1049         }
1050
1051         ev = (void *) skb->data;
1052         skb_pull(skb, sizeof(*ev));
1053
1054         if (opcode == __le16_to_cpu(ev->opcode))
1055                 return skb;
1056
1057         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1058                __le16_to_cpu(ev->opcode));
1059
1060 failed:
1061         kfree_skb(skb);
1062         return ERR_PTR(-ENODATA);
1063 }
1064
1065 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1066                                   const void *param, u8 event, u32 timeout)
1067 {
1068         DECLARE_WAITQUEUE(wait, current);
1069         struct hci_request req;
1070         int err = 0;
1071
1072         BT_DBG("%s", hdev->name);
1073
1074         hci_req_init(&req, hdev);
1075
1076         hci_req_add_ev(&req, opcode, plen, param, event);
1077
1078         hdev->req_status = HCI_REQ_PEND;
1079
1080         err = hci_req_run(&req, hci_req_sync_complete);
1081         if (err < 0)
1082                 return ERR_PTR(err);
1083
1084         add_wait_queue(&hdev->req_wait_q, &wait);
1085         set_current_state(TASK_INTERRUPTIBLE);
1086
1087         schedule_timeout(timeout);
1088
1089         remove_wait_queue(&hdev->req_wait_q, &wait);
1090
1091         if (signal_pending(current))
1092                 return ERR_PTR(-EINTR);
1093
1094         switch (hdev->req_status) {
1095         case HCI_REQ_DONE:
1096                 err = -bt_to_errno(hdev->req_result);
1097                 break;
1098
1099         case HCI_REQ_CANCELED:
1100                 err = -hdev->req_result;
1101                 break;
1102
1103         default:
1104                 err = -ETIMEDOUT;
1105                 break;
1106         }
1107
1108         hdev->req_status = hdev->req_result = 0;
1109
1110         BT_DBG("%s end: err %d", hdev->name, err);
1111
1112         if (err < 0)
1113                 return ERR_PTR(err);
1114
1115         return hci_get_cmd_complete(hdev, opcode, event);
1116 }
1117 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1118
1119 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1120                                const void *param, u32 timeout)
1121 {
1122         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1123 }
1124 EXPORT_SYMBOL(__hci_cmd_sync);
1125
1126 /* Execute request and wait for completion. */
1127 static int __hci_req_sync(struct hci_dev *hdev,
1128                           void (*func)(struct hci_request *req,
1129                                       unsigned long opt),
1130                           unsigned long opt, __u32 timeout)
1131 {
1132         struct hci_request req;
1133         DECLARE_WAITQUEUE(wait, current);
1134         int err = 0;
1135
1136         BT_DBG("%s start", hdev->name);
1137
1138         hci_req_init(&req, hdev);
1139
1140         hdev->req_status = HCI_REQ_PEND;
1141
1142         func(&req, opt);
1143
1144         err = hci_req_run(&req, hci_req_sync_complete);
1145         if (err < 0) {
1146                 hdev->req_status = 0;
1147
1148                 /* ENODATA means the HCI request command queue is empty.
1149                  * This can happen when a request with conditionals doesn't
1150                  * trigger any commands to be sent. This is normal behavior
1151                  * and should not trigger an error return.
1152                  */
1153                 if (err == -ENODATA)
1154                         return 0;
1155
1156                 return err;
1157         }
1158
1159         add_wait_queue(&hdev->req_wait_q, &wait);
1160         set_current_state(TASK_INTERRUPTIBLE);
1161
1162         schedule_timeout(timeout);
1163
1164         remove_wait_queue(&hdev->req_wait_q, &wait);
1165
1166         if (signal_pending(current))
1167                 return -EINTR;
1168
1169         switch (hdev->req_status) {
1170         case HCI_REQ_DONE:
1171                 err = -bt_to_errno(hdev->req_result);
1172                 break;
1173
1174         case HCI_REQ_CANCELED:
1175                 err = -hdev->req_result;
1176                 break;
1177
1178         default:
1179                 err = -ETIMEDOUT;
1180                 break;
1181         }
1182
1183         hdev->req_status = hdev->req_result = 0;
1184
1185         BT_DBG("%s end: err %d", hdev->name, err);
1186
1187         return err;
1188 }
1189
1190 static int hci_req_sync(struct hci_dev *hdev,
1191                         void (*req)(struct hci_request *req,
1192                                     unsigned long opt),
1193                         unsigned long opt, __u32 timeout)
1194 {
1195         int ret;
1196
1197         if (!test_bit(HCI_UP, &hdev->flags))
1198                 return -ENETDOWN;
1199
1200         /* Serialize all requests */
1201         hci_req_lock(hdev);
1202         ret = __hci_req_sync(hdev, req, opt, timeout);
1203         hci_req_unlock(hdev);
1204
1205         return ret;
1206 }
1207
1208 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1209 {
1210         BT_DBG("%s %ld", req->hdev->name, opt);
1211
1212         /* Reset device */
1213         set_bit(HCI_RESET, &req->hdev->flags);
1214         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1215 }
1216
1217 static void bredr_init(struct hci_request *req)
1218 {
1219         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1220
1221         /* Read Local Supported Features */
1222         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1223
1224         /* Read Local Version */
1225         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1226
1227         /* Read BD Address */
1228         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1229 }
1230
1231 static void amp_init(struct hci_request *req)
1232 {
1233         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1234
1235         /* Read Local Version */
1236         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1237
1238         /* Read Local Supported Commands */
1239         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1240
1241         /* Read Local Supported Features */
1242         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1243
1244         /* Read Local AMP Info */
1245         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1246
1247         /* Read Data Blk size */
1248         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1249
1250         /* Read Flow Control Mode */
1251         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1252
1253         /* Read Location Data */
1254         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1255 }
1256
1257 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1258 {
1259         struct hci_dev *hdev = req->hdev;
1260
1261         BT_DBG("%s %ld", hdev->name, opt);
1262
1263         /* Reset */
1264         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1265                 hci_reset_req(req, 0);
1266
1267         switch (hdev->dev_type) {
1268         case HCI_BREDR:
1269                 bredr_init(req);
1270                 break;
1271
1272         case HCI_AMP:
1273                 amp_init(req);
1274                 break;
1275
1276         default:
1277                 BT_ERR("Unknown device type %d", hdev->dev_type);
1278                 break;
1279         }
1280 }
1281
1282 static void bredr_setup(struct hci_request *req)
1283 {
1284         struct hci_dev *hdev = req->hdev;
1285
1286         __le16 param;
1287         __u8 flt_type;
1288
1289         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1290         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1291
1292         /* Read Class of Device */
1293         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1294
1295         /* Read Local Name */
1296         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1297
1298         /* Read Voice Setting */
1299         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1300
1301         /* Read Number of Supported IAC */
1302         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1303
1304         /* Read Current IAC LAP */
1305         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1306
1307         /* Clear Event Filters */
1308         flt_type = HCI_FLT_CLEAR_ALL;
1309         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1310
1311         /* Connection accept timeout ~20 secs */
1312         param = cpu_to_le16(0x7d00);
1313         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1314
1315         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1316          * but it does not support page scan related HCI commands.
1317          */
1318         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1319                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1320                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1321         }
1322 }
1323
1324 static void le_setup(struct hci_request *req)
1325 {
1326         struct hci_dev *hdev = req->hdev;
1327
1328         /* Read LE Buffer Size */
1329         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1330
1331         /* Read LE Local Supported Features */
1332         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1333
1334         /* Read LE Supported States */
1335         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1336
1337         /* Read LE Advertising Channel TX Power */
1338         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1339
1340         /* Read LE White List Size */
1341         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1342
1343         /* Clear LE White List */
1344         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1345
1346         /* LE-only controllers have LE implicitly enabled */
1347         if (!lmp_bredr_capable(hdev))
1348                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1349 }
1350
1351 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1352 {
1353         if (lmp_ext_inq_capable(hdev))
1354                 return 0x02;
1355
1356         if (lmp_inq_rssi_capable(hdev))
1357                 return 0x01;
1358
1359         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1360             hdev->lmp_subver == 0x0757)
1361                 return 0x01;
1362
1363         if (hdev->manufacturer == 15) {
1364                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1365                         return 0x01;
1366                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1367                         return 0x01;
1368                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1369                         return 0x01;
1370         }
1371
1372         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1373             hdev->lmp_subver == 0x1805)
1374                 return 0x01;
1375
1376         return 0x00;
1377 }
1378
1379 static void hci_setup_inquiry_mode(struct hci_request *req)
1380 {
1381         u8 mode;
1382
1383         mode = hci_get_inquiry_mode(req->hdev);
1384
1385         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1386 }
1387
1388 static void hci_setup_event_mask(struct hci_request *req)
1389 {
1390         struct hci_dev *hdev = req->hdev;
1391
1392         /* The second byte is 0xff instead of 0x9f (two reserved bits
1393          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1394          * command otherwise.
1395          */
1396         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1397
1398         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1399          * any event mask for pre 1.2 devices.
1400          */
1401         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1402                 return;
1403
1404         if (lmp_bredr_capable(hdev)) {
1405                 events[4] |= 0x01; /* Flow Specification Complete */
1406                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1407                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1408                 events[5] |= 0x08; /* Synchronous Connection Complete */
1409                 events[5] |= 0x10; /* Synchronous Connection Changed */
1410         } else {
1411                 /* Use a different default for LE-only devices */
1412                 memset(events, 0, sizeof(events));
1413                 events[0] |= 0x10; /* Disconnection Complete */
1414                 events[0] |= 0x80; /* Encryption Change */
1415                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1416                 events[1] |= 0x20; /* Command Complete */
1417                 events[1] |= 0x40; /* Command Status */
1418                 events[1] |= 0x80; /* Hardware Error */
1419                 events[2] |= 0x04; /* Number of Completed Packets */
1420                 events[3] |= 0x02; /* Data Buffer Overflow */
1421                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1422         }
1423
1424         if (lmp_inq_rssi_capable(hdev))
1425                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1426
1427         if (lmp_sniffsubr_capable(hdev))
1428                 events[5] |= 0x20; /* Sniff Subrating */
1429
1430         if (lmp_pause_enc_capable(hdev))
1431                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1432
1433         if (lmp_ext_inq_capable(hdev))
1434                 events[5] |= 0x40; /* Extended Inquiry Result */
1435
1436         if (lmp_no_flush_capable(hdev))
1437                 events[7] |= 0x01; /* Enhanced Flush Complete */
1438
1439         if (lmp_lsto_capable(hdev))
1440                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1441
1442         if (lmp_ssp_capable(hdev)) {
1443                 events[6] |= 0x01;      /* IO Capability Request */
1444                 events[6] |= 0x02;      /* IO Capability Response */
1445                 events[6] |= 0x04;      /* User Confirmation Request */
1446                 events[6] |= 0x08;      /* User Passkey Request */
1447                 events[6] |= 0x10;      /* Remote OOB Data Request */
1448                 events[6] |= 0x20;      /* Simple Pairing Complete */
1449                 events[7] |= 0x04;      /* User Passkey Notification */
1450                 events[7] |= 0x08;      /* Keypress Notification */
1451                 events[7] |= 0x10;      /* Remote Host Supported
1452                                          * Features Notification
1453                                          */
1454         }
1455
1456         if (lmp_le_capable(hdev))
1457                 events[7] |= 0x20;      /* LE Meta-Event */
1458
1459         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1460
1461         if (lmp_le_capable(hdev)) {
1462                 memset(events, 0, sizeof(events));
1463                 events[0] = 0x1f;
1464                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1465                             sizeof(events), events);
1466         }
1467 }
1468
1469 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1470 {
1471         struct hci_dev *hdev = req->hdev;
1472
1473         if (lmp_bredr_capable(hdev))
1474                 bredr_setup(req);
1475         else
1476                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1477
1478         if (lmp_le_capable(hdev))
1479                 le_setup(req);
1480
1481         hci_setup_event_mask(req);
1482
1483         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1484          * local supported commands HCI command.
1485          */
1486         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1487                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1488
1489         if (lmp_ssp_capable(hdev)) {
1490                 /* When SSP is available, then the host features page
1491                  * should also be available as well. However some
1492                  * controllers list the max_page as 0 as long as SSP
1493                  * has not been enabled. To achieve proper debugging
1494                  * output, force the minimum max_page to 1 at least.
1495                  */
1496                 hdev->max_page = 0x01;
1497
1498                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1499                         u8 mode = 0x01;
1500                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1501                                     sizeof(mode), &mode);
1502                 } else {
1503                         struct hci_cp_write_eir cp;
1504
1505                         memset(hdev->eir, 0, sizeof(hdev->eir));
1506                         memset(&cp, 0, sizeof(cp));
1507
1508                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1509                 }
1510         }
1511
1512         if (lmp_inq_rssi_capable(hdev))
1513                 hci_setup_inquiry_mode(req);
1514
1515         if (lmp_inq_tx_pwr_capable(hdev))
1516                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1517
1518         if (lmp_ext_feat_capable(hdev)) {
1519                 struct hci_cp_read_local_ext_features cp;
1520
1521                 cp.page = 0x01;
1522                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1523                             sizeof(cp), &cp);
1524         }
1525
1526         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1527                 u8 enable = 1;
1528                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1529                             &enable);
1530         }
1531 }
1532
1533 static void hci_setup_link_policy(struct hci_request *req)
1534 {
1535         struct hci_dev *hdev = req->hdev;
1536         struct hci_cp_write_def_link_policy cp;
1537         u16 link_policy = 0;
1538
1539         if (lmp_rswitch_capable(hdev))
1540                 link_policy |= HCI_LP_RSWITCH;
1541         if (lmp_hold_capable(hdev))
1542                 link_policy |= HCI_LP_HOLD;
1543         if (lmp_sniff_capable(hdev))
1544                 link_policy |= HCI_LP_SNIFF;
1545         if (lmp_park_capable(hdev))
1546                 link_policy |= HCI_LP_PARK;
1547
1548         cp.policy = cpu_to_le16(link_policy);
1549         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1550 }
1551
1552 static void hci_set_le_support(struct hci_request *req)
1553 {
1554         struct hci_dev *hdev = req->hdev;
1555         struct hci_cp_write_le_host_supported cp;
1556
1557         /* LE-only devices do not support explicit enablement */
1558         if (!lmp_bredr_capable(hdev))
1559                 return;
1560
1561         memset(&cp, 0, sizeof(cp));
1562
1563         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1564                 cp.le = 0x01;
1565                 cp.simul = lmp_le_br_capable(hdev);
1566         }
1567
1568         if (cp.le != lmp_host_le_capable(hdev))
1569                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1570                             &cp);
1571 }
1572
1573 static void hci_set_event_mask_page_2(struct hci_request *req)
1574 {
1575         struct hci_dev *hdev = req->hdev;
1576         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1577
1578         /* If Connectionless Slave Broadcast master role is supported
1579          * enable all necessary events for it.
1580          */
1581         if (lmp_csb_master_capable(hdev)) {
1582                 events[1] |= 0x40;      /* Triggered Clock Capture */
1583                 events[1] |= 0x80;      /* Synchronization Train Complete */
1584                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1585                 events[2] |= 0x20;      /* CSB Channel Map Change */
1586         }
1587
1588         /* If Connectionless Slave Broadcast slave role is supported
1589          * enable all necessary events for it.
1590          */
1591         if (lmp_csb_slave_capable(hdev)) {
1592                 events[2] |= 0x01;      /* Synchronization Train Received */
1593                 events[2] |= 0x02;      /* CSB Receive */
1594                 events[2] |= 0x04;      /* CSB Timeout */
1595                 events[2] |= 0x08;      /* Truncated Page Complete */
1596         }
1597
1598         /* Enable Authenticated Payload Timeout Expired event if supported */
1599         if (lmp_ping_capable(hdev))
1600                 events[2] |= 0x80;
1601
1602         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1603 }
1604
1605 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1606 {
1607         struct hci_dev *hdev = req->hdev;
1608         u8 p;
1609
1610         /* Some Broadcom based Bluetooth controllers do not support the
1611          * Delete Stored Link Key command. They are clearly indicating its
1612          * absence in the bit mask of supported commands.
1613          *
1614          * Check the supported commands and only if the the command is marked
1615          * as supported send it. If not supported assume that the controller
1616          * does not have actual support for stored link keys which makes this
1617          * command redundant anyway.
1618          *
1619          * Some controllers indicate that they support handling deleting
1620          * stored link keys, but they don't. The quirk lets a driver
1621          * just disable this command.
1622          */
1623         if (hdev->commands[6] & 0x80 &&
1624             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1625                 struct hci_cp_delete_stored_link_key cp;
1626
1627                 bacpy(&cp.bdaddr, BDADDR_ANY);
1628                 cp.delete_all = 0x01;
1629                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1630                             sizeof(cp), &cp);
1631         }
1632
1633         if (hdev->commands[5] & 0x10)
1634                 hci_setup_link_policy(req);
1635
1636         if (lmp_le_capable(hdev))
1637                 hci_set_le_support(req);
1638
1639         /* Read features beyond page 1 if available */
1640         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1641                 struct hci_cp_read_local_ext_features cp;
1642
1643                 cp.page = p;
1644                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1645                             sizeof(cp), &cp);
1646         }
1647 }
1648
1649 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1650 {
1651         struct hci_dev *hdev = req->hdev;
1652
1653         /* Set event mask page 2 if the HCI command for it is supported */
1654         if (hdev->commands[22] & 0x04)
1655                 hci_set_event_mask_page_2(req);
1656
1657         /* Check for Synchronization Train support */
1658         if (lmp_sync_train_capable(hdev))
1659                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1660
1661         /* Enable Secure Connections if supported and configured */
1662         if ((lmp_sc_capable(hdev) ||
1663              test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1664             test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1665                 u8 support = 0x01;
1666                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1667                             sizeof(support), &support);
1668         }
1669 }
1670
1671 static int __hci_init(struct hci_dev *hdev)
1672 {
1673         int err;
1674
1675         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1676         if (err < 0)
1677                 return err;
1678
1679         /* The Device Under Test (DUT) mode is special and available for
1680          * all controller types. So just create it early on.
1681          */
1682         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1683                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1684                                     &dut_mode_fops);
1685         }
1686
1687         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1688          * BR/EDR/LE type controllers. AMP controllers only need the
1689          * first stage init.
1690          */
1691         if (hdev->dev_type != HCI_BREDR)
1692                 return 0;
1693
1694         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1695         if (err < 0)
1696                 return err;
1697
1698         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1699         if (err < 0)
1700                 return err;
1701
1702         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1703         if (err < 0)
1704                 return err;
1705
1706         /* Only create debugfs entries during the initial setup
1707          * phase and not every time the controller gets powered on.
1708          */
1709         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1710                 return 0;
1711
1712         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1713                             &features_fops);
1714         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1715                            &hdev->manufacturer);
1716         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1717         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1718         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1719                             &blacklist_fops);
1720         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1721
1722         debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1723                             &conn_info_min_age_fops);
1724         debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1725                             &conn_info_max_age_fops);
1726
1727         if (lmp_bredr_capable(hdev)) {
1728                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1729                                     hdev, &inquiry_cache_fops);
1730                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1731                                     hdev, &link_keys_fops);
1732                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1733                                     hdev, &dev_class_fops);
1734                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1735                                     hdev, &voice_setting_fops);
1736         }
1737
1738         if (lmp_ssp_capable(hdev)) {
1739                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1740                                     hdev, &auto_accept_delay_fops);
1741                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1742                                     hdev, &force_sc_support_fops);
1743                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1744                                     hdev, &sc_only_mode_fops);
1745         }
1746
1747         if (lmp_sniff_capable(hdev)) {
1748                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1749                                     hdev, &idle_timeout_fops);
1750                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1751                                     hdev, &sniff_min_interval_fops);
1752                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1753                                     hdev, &sniff_max_interval_fops);
1754         }
1755
1756         if (lmp_le_capable(hdev)) {
1757                 debugfs_create_file("identity", 0400, hdev->debugfs,
1758                                     hdev, &identity_fops);
1759                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1760                                     hdev, &rpa_timeout_fops);
1761                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1762                                     hdev, &random_address_fops);
1763                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1764                                     hdev, &static_address_fops);
1765
1766                 /* For controllers with a public address, provide a debug
1767                  * option to force the usage of the configured static
1768                  * address. By default the public address is used.
1769                  */
1770                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1771                         debugfs_create_file("force_static_address", 0644,
1772                                             hdev->debugfs, hdev,
1773                                             &force_static_address_fops);
1774
1775                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1776                                   &hdev->le_white_list_size);
1777                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1778                                     &white_list_fops);
1779                 debugfs_create_file("identity_resolving_keys", 0400,
1780                                     hdev->debugfs, hdev,
1781                                     &identity_resolving_keys_fops);
1782                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1783                                     hdev, &long_term_keys_fops);
1784                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1785                                     hdev, &conn_min_interval_fops);
1786                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1787                                     hdev, &conn_max_interval_fops);
1788                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1789                                     hdev, &adv_channel_map_fops);
1790                 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1791                                     &le_auto_conn_fops);
1792                 debugfs_create_u16("discov_interleaved_timeout", 0644,
1793                                    hdev->debugfs,
1794                                    &hdev->discov_interleaved_timeout);
1795         }
1796
1797         return 0;
1798 }
1799
1800 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1801 {
1802         __u8 scan = opt;
1803
1804         BT_DBG("%s %x", req->hdev->name, scan);
1805
1806         /* Inquiry and Page scans */
1807         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1808 }
1809
1810 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1811 {
1812         __u8 auth = opt;
1813
1814         BT_DBG("%s %x", req->hdev->name, auth);
1815
1816         /* Authentication */
1817         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1818 }
1819
1820 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1821 {
1822         __u8 encrypt = opt;
1823
1824         BT_DBG("%s %x", req->hdev->name, encrypt);
1825
1826         /* Encryption */
1827         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1828 }
1829
1830 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1831 {
1832         __le16 policy = cpu_to_le16(opt);
1833
1834         BT_DBG("%s %x", req->hdev->name, policy);
1835
1836         /* Default link policy */
1837         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1838 }
1839
1840 /* Get HCI device by index.
1841  * Device is held on return. */
1842 struct hci_dev *hci_dev_get(int index)
1843 {
1844         struct hci_dev *hdev = NULL, *d;
1845
1846         BT_DBG("%d", index);
1847
1848         if (index < 0)
1849                 return NULL;
1850
1851         read_lock(&hci_dev_list_lock);
1852         list_for_each_entry(d, &hci_dev_list, list) {
1853                 if (d->id == index) {
1854                         hdev = hci_dev_hold(d);
1855                         break;
1856                 }
1857         }
1858         read_unlock(&hci_dev_list_lock);
1859         return hdev;
1860 }
1861
1862 /* ---- Inquiry support ---- */
1863
1864 bool hci_discovery_active(struct hci_dev *hdev)
1865 {
1866         struct discovery_state *discov = &hdev->discovery;
1867
1868         switch (discov->state) {
1869         case DISCOVERY_FINDING:
1870         case DISCOVERY_RESOLVING:
1871                 return true;
1872
1873         default:
1874                 return false;
1875         }
1876 }
1877
1878 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1879 {
1880         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1881
1882         if (hdev->discovery.state == state)
1883                 return;
1884
1885         switch (state) {
1886         case DISCOVERY_STOPPED:
1887                 hci_update_background_scan(hdev);
1888
1889                 if (hdev->discovery.state != DISCOVERY_STARTING)
1890                         mgmt_discovering(hdev, 0);
1891                 break;
1892         case DISCOVERY_STARTING:
1893                 break;
1894         case DISCOVERY_FINDING:
1895                 mgmt_discovering(hdev, 1);
1896                 break;
1897         case DISCOVERY_RESOLVING:
1898                 break;
1899         case DISCOVERY_STOPPING:
1900                 break;
1901         }
1902
1903         hdev->discovery.state = state;
1904 }
1905
1906 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1907 {
1908         struct discovery_state *cache = &hdev->discovery;
1909         struct inquiry_entry *p, *n;
1910
1911         list_for_each_entry_safe(p, n, &cache->all, all) {
1912                 list_del(&p->all);
1913                 kfree(p);
1914         }
1915
1916         INIT_LIST_HEAD(&cache->unknown);
1917         INIT_LIST_HEAD(&cache->resolve);
1918 }
1919
1920 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1921                                                bdaddr_t *bdaddr)
1922 {
1923         struct discovery_state *cache = &hdev->discovery;
1924         struct inquiry_entry *e;
1925
1926         BT_DBG("cache %p, %pMR", cache, bdaddr);
1927
1928         list_for_each_entry(e, &cache->all, all) {
1929                 if (!bacmp(&e->data.bdaddr, bdaddr))
1930                         return e;
1931         }
1932
1933         return NULL;
1934 }
1935
1936 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1937                                                        bdaddr_t *bdaddr)
1938 {
1939         struct discovery_state *cache = &hdev->discovery;
1940         struct inquiry_entry *e;
1941
1942         BT_DBG("cache %p, %pMR", cache, bdaddr);
1943
1944         list_for_each_entry(e, &cache->unknown, list) {
1945                 if (!bacmp(&e->data.bdaddr, bdaddr))
1946                         return e;
1947         }
1948
1949         return NULL;
1950 }
1951
1952 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1953                                                        bdaddr_t *bdaddr,
1954                                                        int state)
1955 {
1956         struct discovery_state *cache = &hdev->discovery;
1957         struct inquiry_entry *e;
1958
1959         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1960
1961         list_for_each_entry(e, &cache->resolve, list) {
1962                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1963                         return e;
1964                 if (!bacmp(&e->data.bdaddr, bdaddr))
1965                         return e;
1966         }
1967
1968         return NULL;
1969 }
1970
1971 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1972                                       struct inquiry_entry *ie)
1973 {
1974         struct discovery_state *cache = &hdev->discovery;
1975         struct list_head *pos = &cache->resolve;
1976         struct inquiry_entry *p;
1977
1978         list_del(&ie->list);
1979
1980         list_for_each_entry(p, &cache->resolve, list) {
1981                 if (p->name_state != NAME_PENDING &&
1982                     abs(p->data.rssi) >= abs(ie->data.rssi))
1983                         break;
1984                 pos = &p->list;
1985         }
1986
1987         list_add(&ie->list, pos);
1988 }
1989
1990 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1991                               bool name_known, bool *ssp)
1992 {
1993         struct discovery_state *cache = &hdev->discovery;
1994         struct inquiry_entry *ie;
1995
1996         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1997
1998         hci_remove_remote_oob_data(hdev, &data->bdaddr);
1999
2000         *ssp = data->ssp_mode;
2001
2002         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2003         if (ie) {
2004                 if (ie->data.ssp_mode)
2005                         *ssp = true;
2006
2007                 if (ie->name_state == NAME_NEEDED &&
2008                     data->rssi != ie->data.rssi) {
2009                         ie->data.rssi = data->rssi;
2010                         hci_inquiry_cache_update_resolve(hdev, ie);
2011                 }
2012
2013                 goto update;
2014         }
2015
2016         /* Entry not in the cache. Add new one. */
2017         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2018         if (!ie)
2019                 return false;
2020
2021         list_add(&ie->all, &cache->all);
2022
2023         if (name_known) {
2024                 ie->name_state = NAME_KNOWN;
2025         } else {
2026                 ie->name_state = NAME_NOT_KNOWN;
2027                 list_add(&ie->list, &cache->unknown);
2028         }
2029
2030 update:
2031         if (name_known && ie->name_state != NAME_KNOWN &&
2032             ie->name_state != NAME_PENDING) {
2033                 ie->name_state = NAME_KNOWN;
2034                 list_del(&ie->list);
2035         }
2036
2037         memcpy(&ie->data, data, sizeof(*data));
2038         ie->timestamp = jiffies;
2039         cache->timestamp = jiffies;
2040
2041         if (ie->name_state == NAME_NOT_KNOWN)
2042                 return false;
2043
2044         return true;
2045 }
2046
2047 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2048 {
2049         struct discovery_state *cache = &hdev->discovery;
2050         struct inquiry_info *info = (struct inquiry_info *) buf;
2051         struct inquiry_entry *e;
2052         int copied = 0;
2053
2054         list_for_each_entry(e, &cache->all, all) {
2055                 struct inquiry_data *data = &e->data;
2056
2057                 if (copied >= num)
2058                         break;
2059
2060                 bacpy(&info->bdaddr, &data->bdaddr);
2061                 info->pscan_rep_mode    = data->pscan_rep_mode;
2062                 info->pscan_period_mode = data->pscan_period_mode;
2063                 info->pscan_mode        = data->pscan_mode;
2064                 memcpy(info->dev_class, data->dev_class, 3);
2065                 info->clock_offset      = data->clock_offset;
2066
2067                 info++;
2068                 copied++;
2069         }
2070
2071         BT_DBG("cache %p, copied %d", cache, copied);
2072         return copied;
2073 }
2074
2075 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2076 {
2077         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2078         struct hci_dev *hdev = req->hdev;
2079         struct hci_cp_inquiry cp;
2080
2081         BT_DBG("%s", hdev->name);
2082
2083         if (test_bit(HCI_INQUIRY, &hdev->flags))
2084                 return;
2085
2086         /* Start Inquiry */
2087         memcpy(&cp.lap, &ir->lap, 3);
2088         cp.length  = ir->length;
2089         cp.num_rsp = ir->num_rsp;
2090         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2091 }
2092
2093 static int wait_inquiry(void *word)
2094 {
2095         schedule();
2096         return signal_pending(current);
2097 }
2098
2099 int hci_inquiry(void __user *arg)
2100 {
2101         __u8 __user *ptr = arg;
2102         struct hci_inquiry_req ir;
2103         struct hci_dev *hdev;
2104         int err = 0, do_inquiry = 0, max_rsp;
2105         long timeo;
2106         __u8 *buf;
2107
2108         if (copy_from_user(&ir, ptr, sizeof(ir)))
2109                 return -EFAULT;
2110
2111         hdev = hci_dev_get(ir.dev_id);
2112         if (!hdev)
2113                 return -ENODEV;
2114
2115         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2116                 err = -EBUSY;
2117                 goto done;
2118         }
2119
2120         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2121                 err = -EOPNOTSUPP;
2122                 goto done;
2123         }
2124
2125         if (hdev->dev_type != HCI_BREDR) {
2126                 err = -EOPNOTSUPP;
2127                 goto done;
2128         }
2129
2130         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2131                 err = -EOPNOTSUPP;
2132                 goto done;
2133         }
2134
2135         hci_dev_lock(hdev);
2136         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2137             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2138                 hci_inquiry_cache_flush(hdev);
2139                 do_inquiry = 1;
2140         }
2141         hci_dev_unlock(hdev);
2142
2143         timeo = ir.length * msecs_to_jiffies(2000);
2144
2145         if (do_inquiry) {
2146                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2147                                    timeo);
2148                 if (err < 0)
2149                         goto done;
2150
2151                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2152                  * cleared). If it is interrupted by a signal, return -EINTR.
2153                  */
2154                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2155                                 TASK_INTERRUPTIBLE))
2156                         return -EINTR;
2157         }
2158
2159         /* for unlimited number of responses we will use buffer with
2160          * 255 entries
2161          */
2162         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2163
2164         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2165          * copy it to the user space.
2166          */
2167         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2168         if (!buf) {
2169                 err = -ENOMEM;
2170                 goto done;
2171         }
2172
2173         hci_dev_lock(hdev);
2174         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2175         hci_dev_unlock(hdev);
2176
2177         BT_DBG("num_rsp %d", ir.num_rsp);
2178
2179         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2180                 ptr += sizeof(ir);
2181                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2182                                  ir.num_rsp))
2183                         err = -EFAULT;
2184         } else
2185                 err = -EFAULT;
2186
2187         kfree(buf);
2188
2189 done:
2190         hci_dev_put(hdev);
2191         return err;
2192 }
2193
2194 static int hci_dev_do_open(struct hci_dev *hdev)
2195 {
2196         int ret = 0;
2197
2198         BT_DBG("%s %p", hdev->name, hdev);
2199
2200         hci_req_lock(hdev);
2201
2202         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2203                 ret = -ENODEV;
2204                 goto done;
2205         }
2206
2207         if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2208                 /* Check for rfkill but allow the HCI setup stage to
2209                  * proceed (which in itself doesn't cause any RF activity).
2210                  */
2211                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2212                         ret = -ERFKILL;
2213                         goto done;
2214                 }
2215
2216                 /* Check for valid public address or a configured static
2217                  * random adddress, but let the HCI setup proceed to
2218                  * be able to determine if there is a public address
2219                  * or not.
2220                  *
2221                  * In case of user channel usage, it is not important
2222                  * if a public address or static random address is
2223                  * available.
2224                  *
2225                  * This check is only valid for BR/EDR controllers
2226                  * since AMP controllers do not have an address.
2227                  */
2228                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2229                     hdev->dev_type == HCI_BREDR &&
2230                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2231                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2232                         ret = -EADDRNOTAVAIL;
2233                         goto done;
2234                 }
2235         }
2236
2237         if (test_bit(HCI_UP, &hdev->flags)) {
2238                 ret = -EALREADY;
2239                 goto done;
2240         }
2241
2242         if (hdev->open(hdev)) {
2243                 ret = -EIO;
2244                 goto done;
2245         }
2246
2247         atomic_set(&hdev->cmd_cnt, 1);
2248         set_bit(HCI_INIT, &hdev->flags);
2249
2250         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2251                 ret = hdev->setup(hdev);
2252
2253         if (!ret) {
2254                 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
2255                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2256                         ret = __hci_init(hdev);
2257         }
2258
2259         clear_bit(HCI_INIT, &hdev->flags);
2260
2261         if (!ret) {
2262                 hci_dev_hold(hdev);
2263                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2264                 set_bit(HCI_UP, &hdev->flags);
2265                 hci_notify(hdev, HCI_DEV_UP);
2266                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2267                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2268                     hdev->dev_type == HCI_BREDR) {
2269                         hci_dev_lock(hdev);
2270                         mgmt_powered(hdev, 1);
2271                         hci_dev_unlock(hdev);
2272                 }
2273         } else {
2274                 /* Init failed, cleanup */
2275                 flush_work(&hdev->tx_work);
2276                 flush_work(&hdev->cmd_work);
2277                 flush_work(&hdev->rx_work);
2278
2279                 skb_queue_purge(&hdev->cmd_q);
2280                 skb_queue_purge(&hdev->rx_q);
2281
2282                 if (hdev->flush)
2283                         hdev->flush(hdev);
2284
2285                 if (hdev->sent_cmd) {
2286                         kfree_skb(hdev->sent_cmd);
2287                         hdev->sent_cmd = NULL;
2288                 }
2289
2290                 hdev->close(hdev);
2291                 hdev->flags &= BIT(HCI_RAW);
2292         }
2293
2294 done:
2295         hci_req_unlock(hdev);
2296         return ret;
2297 }
2298
2299 /* ---- HCI ioctl helpers ---- */
2300
2301 int hci_dev_open(__u16 dev)
2302 {
2303         struct hci_dev *hdev;
2304         int err;
2305
2306         hdev = hci_dev_get(dev);
2307         if (!hdev)
2308                 return -ENODEV;
2309
2310         /* Devices that are marked for raw-only usage can only be powered
2311          * up as user channel. Trying to bring them up as normal devices
2312          * will result into a failure. Only user channel operation is
2313          * possible.
2314          *
2315          * When this function is called for a user channel, the flag
2316          * HCI_USER_CHANNEL will be set first before attempting to
2317          * open the device.
2318          */
2319         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
2320             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2321                 err = -EOPNOTSUPP;
2322                 goto done;
2323         }
2324
2325         /* We need to ensure that no other power on/off work is pending
2326          * before proceeding to call hci_dev_do_open. This is
2327          * particularly important if the setup procedure has not yet
2328          * completed.
2329          */
2330         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2331                 cancel_delayed_work(&hdev->power_off);
2332
2333         /* After this call it is guaranteed that the setup procedure
2334          * has finished. This means that error conditions like RFKILL
2335          * or no valid public or static random address apply.
2336          */
2337         flush_workqueue(hdev->req_workqueue);
2338
2339         err = hci_dev_do_open(hdev);
2340
2341 done:
2342         hci_dev_put(hdev);
2343         return err;
2344 }
2345
2346 static int hci_dev_do_close(struct hci_dev *hdev)
2347 {
2348         BT_DBG("%s %p", hdev->name, hdev);
2349
2350         cancel_delayed_work(&hdev->power_off);
2351
2352         hci_req_cancel(hdev, ENODEV);
2353         hci_req_lock(hdev);
2354
2355         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2356                 cancel_delayed_work_sync(&hdev->cmd_timer);
2357                 hci_req_unlock(hdev);
2358                 return 0;
2359         }
2360
2361         /* Flush RX and TX works */
2362         flush_work(&hdev->tx_work);
2363         flush_work(&hdev->rx_work);
2364
2365         if (hdev->discov_timeout > 0) {
2366                 cancel_delayed_work(&hdev->discov_off);
2367                 hdev->discov_timeout = 0;
2368                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2369                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2370         }
2371
2372         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2373                 cancel_delayed_work(&hdev->service_cache);
2374
2375         cancel_delayed_work_sync(&hdev->le_scan_disable);
2376
2377         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2378                 cancel_delayed_work_sync(&hdev->rpa_expired);
2379
2380         hci_dev_lock(hdev);
2381         hci_inquiry_cache_flush(hdev);
2382         hci_conn_hash_flush(hdev);
2383         hci_pend_le_conns_clear(hdev);
2384         hci_dev_unlock(hdev);
2385
2386         hci_notify(hdev, HCI_DEV_DOWN);
2387
2388         if (hdev->flush)
2389                 hdev->flush(hdev);
2390
2391         /* Reset device */
2392         skb_queue_purge(&hdev->cmd_q);
2393         atomic_set(&hdev->cmd_cnt, 1);
2394         if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
2395             !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2396             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2397                 set_bit(HCI_INIT, &hdev->flags);
2398                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2399                 clear_bit(HCI_INIT, &hdev->flags);
2400         }
2401
2402         /* flush cmd  work */
2403         flush_work(&hdev->cmd_work);
2404
2405         /* Drop queues */
2406         skb_queue_purge(&hdev->rx_q);
2407         skb_queue_purge(&hdev->cmd_q);
2408         skb_queue_purge(&hdev->raw_q);
2409
2410         /* Drop last sent command */
2411         if (hdev->sent_cmd) {
2412                 cancel_delayed_work_sync(&hdev->cmd_timer);
2413                 kfree_skb(hdev->sent_cmd);
2414                 hdev->sent_cmd = NULL;
2415         }
2416
2417         kfree_skb(hdev->recv_evt);
2418         hdev->recv_evt = NULL;
2419
2420         /* After this point our queues are empty
2421          * and no tasks are scheduled. */
2422         hdev->close(hdev);
2423
2424         /* Clear flags */
2425         hdev->flags &= BIT(HCI_RAW);
2426         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2427
2428         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2429                 if (hdev->dev_type == HCI_BREDR) {
2430                         hci_dev_lock(hdev);
2431                         mgmt_powered(hdev, 0);
2432                         hci_dev_unlock(hdev);
2433                 }
2434         }
2435
2436         /* Controller radio is available but is currently powered down */
2437         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2438
2439         memset(hdev->eir, 0, sizeof(hdev->eir));
2440         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2441         bacpy(&hdev->random_addr, BDADDR_ANY);
2442
2443         hci_req_unlock(hdev);
2444
2445         hci_dev_put(hdev);
2446         return 0;
2447 }
2448
2449 int hci_dev_close(__u16 dev)
2450 {
2451         struct hci_dev *hdev;
2452         int err;
2453
2454         hdev = hci_dev_get(dev);
2455         if (!hdev)
2456                 return -ENODEV;
2457
2458         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2459                 err = -EBUSY;
2460                 goto done;
2461         }
2462
2463         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2464                 cancel_delayed_work(&hdev->power_off);
2465
2466         err = hci_dev_do_close(hdev);
2467
2468 done:
2469         hci_dev_put(hdev);
2470         return err;
2471 }
2472
2473 int hci_dev_reset(__u16 dev)
2474 {
2475         struct hci_dev *hdev;
2476         int ret = 0;
2477
2478         hdev = hci_dev_get(dev);
2479         if (!hdev)
2480                 return -ENODEV;
2481
2482         hci_req_lock(hdev);
2483
2484         if (!test_bit(HCI_UP, &hdev->flags)) {
2485                 ret = -ENETDOWN;
2486                 goto done;
2487         }
2488
2489         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2490                 ret = -EBUSY;
2491                 goto done;
2492         }
2493
2494         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2495                 ret = -EOPNOTSUPP;
2496                 goto done;
2497         }
2498
2499         /* Drop queues */
2500         skb_queue_purge(&hdev->rx_q);
2501         skb_queue_purge(&hdev->cmd_q);
2502
2503         hci_dev_lock(hdev);
2504         hci_inquiry_cache_flush(hdev);
2505         hci_conn_hash_flush(hdev);
2506         hci_dev_unlock(hdev);
2507
2508         if (hdev->flush)
2509                 hdev->flush(hdev);
2510
2511         atomic_set(&hdev->cmd_cnt, 1);
2512         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2513
2514         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2515
2516 done:
2517         hci_req_unlock(hdev);
2518         hci_dev_put(hdev);
2519         return ret;
2520 }
2521
2522 int hci_dev_reset_stat(__u16 dev)
2523 {
2524         struct hci_dev *hdev;
2525         int ret = 0;
2526
2527         hdev = hci_dev_get(dev);
2528         if (!hdev)
2529                 return -ENODEV;
2530
2531         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2532                 ret = -EBUSY;
2533                 goto done;
2534         }
2535
2536         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2537                 ret = -EOPNOTSUPP;
2538                 goto done;
2539         }
2540
2541         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2542
2543 done:
2544         hci_dev_put(hdev);
2545         return ret;
2546 }
2547
2548 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2549 {
2550         struct hci_dev *hdev;
2551         struct hci_dev_req dr;
2552         int err = 0;
2553
2554         if (copy_from_user(&dr, arg, sizeof(dr)))
2555                 return -EFAULT;
2556
2557         hdev = hci_dev_get(dr.dev_id);
2558         if (!hdev)
2559                 return -ENODEV;
2560
2561         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2562                 err = -EBUSY;
2563                 goto done;
2564         }
2565
2566         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2567                 err = -EOPNOTSUPP;
2568                 goto done;
2569         }
2570
2571         if (hdev->dev_type != HCI_BREDR) {
2572                 err = -EOPNOTSUPP;
2573                 goto done;
2574         }
2575
2576         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2577                 err = -EOPNOTSUPP;
2578                 goto done;
2579         }
2580
2581         switch (cmd) {
2582         case HCISETAUTH:
2583                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2584                                    HCI_INIT_TIMEOUT);
2585                 break;
2586
2587         case HCISETENCRYPT:
2588                 if (!lmp_encrypt_capable(hdev)) {
2589                         err = -EOPNOTSUPP;
2590                         break;
2591                 }
2592
2593                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2594                         /* Auth must be enabled first */
2595                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2596                                            HCI_INIT_TIMEOUT);
2597                         if (err)
2598                                 break;
2599                 }
2600
2601                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2602                                    HCI_INIT_TIMEOUT);
2603                 break;
2604
2605         case HCISETSCAN:
2606                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2607                                    HCI_INIT_TIMEOUT);
2608                 break;
2609
2610         case HCISETLINKPOL:
2611                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2612                                    HCI_INIT_TIMEOUT);
2613                 break;
2614
2615         case HCISETLINKMODE:
2616                 hdev->link_mode = ((__u16) dr.dev_opt) &
2617                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2618                 break;
2619
2620         case HCISETPTYPE:
2621                 hdev->pkt_type = (__u16) dr.dev_opt;
2622                 break;
2623
2624         case HCISETACLMTU:
2625                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2626                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2627                 break;
2628
2629         case HCISETSCOMTU:
2630                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2631                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2632                 break;
2633
2634         default:
2635                 err = -EINVAL;
2636                 break;
2637         }
2638
2639 done:
2640         hci_dev_put(hdev);
2641         return err;
2642 }
2643
2644 int hci_get_dev_list(void __user *arg)
2645 {
2646         struct hci_dev *hdev;
2647         struct hci_dev_list_req *dl;
2648         struct hci_dev_req *dr;
2649         int n = 0, size, err;
2650         __u16 dev_num;
2651
2652         if (get_user(dev_num, (__u16 __user *) arg))
2653                 return -EFAULT;
2654
2655         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2656                 return -EINVAL;
2657
2658         size = sizeof(*dl) + dev_num * sizeof(*dr);
2659
2660         dl = kzalloc(size, GFP_KERNEL);
2661         if (!dl)
2662                 return -ENOMEM;
2663
2664         dr = dl->dev_req;
2665
2666         read_lock(&hci_dev_list_lock);
2667         list_for_each_entry(hdev, &hci_dev_list, list) {
2668                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2669                         cancel_delayed_work(&hdev->power_off);
2670
2671                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2672                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2673
2674                 (dr + n)->dev_id  = hdev->id;
2675                 (dr + n)->dev_opt = hdev->flags;
2676
2677                 if (++n >= dev_num)
2678                         break;
2679         }
2680         read_unlock(&hci_dev_list_lock);
2681
2682         dl->dev_num = n;
2683         size = sizeof(*dl) + n * sizeof(*dr);
2684
2685         err = copy_to_user(arg, dl, size);
2686         kfree(dl);
2687
2688         return err ? -EFAULT : 0;
2689 }
2690
2691 int hci_get_dev_info(void __user *arg)
2692 {
2693         struct hci_dev *hdev;
2694         struct hci_dev_info di;
2695         int err = 0;
2696
2697         if (copy_from_user(&di, arg, sizeof(di)))
2698                 return -EFAULT;
2699
2700         hdev = hci_dev_get(di.dev_id);
2701         if (!hdev)
2702                 return -ENODEV;
2703
2704         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2705                 cancel_delayed_work_sync(&hdev->power_off);
2706
2707         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2708                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2709
2710         strcpy(di.name, hdev->name);
2711         di.bdaddr   = hdev->bdaddr;
2712         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2713         di.flags    = hdev->flags;
2714         di.pkt_type = hdev->pkt_type;
2715         if (lmp_bredr_capable(hdev)) {
2716                 di.acl_mtu  = hdev->acl_mtu;
2717                 di.acl_pkts = hdev->acl_pkts;
2718                 di.sco_mtu  = hdev->sco_mtu;
2719                 di.sco_pkts = hdev->sco_pkts;
2720         } else {
2721                 di.acl_mtu  = hdev->le_mtu;
2722                 di.acl_pkts = hdev->le_pkts;
2723                 di.sco_mtu  = 0;
2724                 di.sco_pkts = 0;
2725         }
2726         di.link_policy = hdev->link_policy;
2727         di.link_mode   = hdev->link_mode;
2728
2729         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2730         memcpy(&di.features, &hdev->features, sizeof(di.features));
2731
2732         if (copy_to_user(arg, &di, sizeof(di)))
2733                 err = -EFAULT;
2734
2735         hci_dev_put(hdev);
2736
2737         return err;
2738 }
2739
2740 /* ---- Interface to HCI drivers ---- */
2741
2742 static int hci_rfkill_set_block(void *data, bool blocked)
2743 {
2744         struct hci_dev *hdev = data;
2745
2746         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2747
2748         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2749                 return -EBUSY;
2750
2751         if (blocked) {
2752                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2753                 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2754                         hci_dev_do_close(hdev);
2755         } else {
2756                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2757         }
2758
2759         return 0;
2760 }
2761
2762 static const struct rfkill_ops hci_rfkill_ops = {
2763         .set_block = hci_rfkill_set_block,
2764 };
2765
2766 static void hci_power_on(struct work_struct *work)
2767 {
2768         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2769         int err;
2770
2771         BT_DBG("%s", hdev->name);
2772
2773         err = hci_dev_do_open(hdev);
2774         if (err < 0) {
2775                 mgmt_set_powered_failed(hdev, err);
2776                 return;
2777         }
2778
2779         /* During the HCI setup phase, a few error conditions are
2780          * ignored and they need to be checked now. If they are still
2781          * valid, it is important to turn the device back off.
2782          */
2783         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2784             (hdev->dev_type == HCI_BREDR &&
2785              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2786              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2787                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2788                 hci_dev_do_close(hdev);
2789         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2790                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2791                                    HCI_AUTO_OFF_TIMEOUT);
2792         }
2793
2794         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2795                 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2796                         mgmt_index_added(hdev);
2797         }
2798 }
2799
2800 static void hci_power_off(struct work_struct *work)
2801 {
2802         struct hci_dev *hdev = container_of(work, struct hci_dev,
2803                                             power_off.work);
2804
2805         BT_DBG("%s", hdev->name);
2806
2807         hci_dev_do_close(hdev);
2808 }
2809
2810 static void hci_discov_off(struct work_struct *work)
2811 {
2812         struct hci_dev *hdev;
2813
2814         hdev = container_of(work, struct hci_dev, discov_off.work);
2815
2816         BT_DBG("%s", hdev->name);
2817
2818         mgmt_discoverable_timeout(hdev);
2819 }
2820
2821 void hci_uuids_clear(struct hci_dev *hdev)
2822 {
2823         struct bt_uuid *uuid, *tmp;
2824
2825         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2826                 list_del(&uuid->list);
2827                 kfree(uuid);
2828         }
2829 }
2830
2831 void hci_link_keys_clear(struct hci_dev *hdev)
2832 {
2833         struct list_head *p, *n;
2834
2835         list_for_each_safe(p, n, &hdev->link_keys) {
2836                 struct link_key *key;
2837
2838                 key = list_entry(p, struct link_key, list);
2839
2840                 list_del(p);
2841                 kfree(key);
2842         }
2843 }
2844
2845 void hci_smp_ltks_clear(struct hci_dev *hdev)
2846 {
2847         struct smp_ltk *k, *tmp;
2848
2849         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2850                 list_del(&k->list);
2851                 kfree(k);
2852         }
2853 }
2854
2855 void hci_smp_irks_clear(struct hci_dev *hdev)
2856 {
2857         struct smp_irk *k, *tmp;
2858
2859         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2860                 list_del(&k->list);
2861                 kfree(k);
2862         }
2863 }
2864
2865 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2866 {
2867         struct link_key *k;
2868
2869         list_for_each_entry(k, &hdev->link_keys, list)
2870                 if (bacmp(bdaddr, &k->bdaddr) == 0)
2871                         return k;
2872
2873         return NULL;
2874 }
2875
2876 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2877                                u8 key_type, u8 old_key_type)
2878 {
2879         /* Legacy key */
2880         if (key_type < 0x03)
2881                 return true;
2882
2883         /* Debug keys are insecure so don't store them persistently */
2884         if (key_type == HCI_LK_DEBUG_COMBINATION)
2885                 return false;
2886
2887         /* Changed combination key and there's no previous one */
2888         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2889                 return false;
2890
2891         /* Security mode 3 case */
2892         if (!conn)
2893                 return true;
2894
2895         /* Neither local nor remote side had no-bonding as requirement */
2896         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2897                 return true;
2898
2899         /* Local side had dedicated bonding as requirement */
2900         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2901                 return true;
2902
2903         /* Remote side had dedicated bonding as requirement */
2904         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2905                 return true;
2906
2907         /* If none of the above criteria match, then don't store the key
2908          * persistently */
2909         return false;
2910 }
2911
2912 static bool ltk_type_master(u8 type)
2913 {
2914         return (type == SMP_LTK);
2915 }
2916
2917 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
2918                              bool master)
2919 {
2920         struct smp_ltk *k;
2921
2922         list_for_each_entry(k, &hdev->long_term_keys, list) {
2923                 if (k->ediv != ediv || k->rand != rand)
2924                         continue;
2925
2926                 if (ltk_type_master(k->type) != master)
2927                         continue;
2928
2929                 return k;
2930         }
2931
2932         return NULL;
2933 }
2934
2935 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2936                                      u8 addr_type, bool master)
2937 {
2938         struct smp_ltk *k;
2939
2940         list_for_each_entry(k, &hdev->long_term_keys, list)
2941                 if (addr_type == k->bdaddr_type &&
2942                     bacmp(bdaddr, &k->bdaddr) == 0 &&
2943                     ltk_type_master(k->type) == master)
2944                         return k;
2945
2946         return NULL;
2947 }
2948
2949 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2950 {
2951         struct smp_irk *irk;
2952
2953         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2954                 if (!bacmp(&irk->rpa, rpa))
2955                         return irk;
2956         }
2957
2958         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2959                 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2960                         bacpy(&irk->rpa, rpa);
2961                         return irk;
2962                 }
2963         }
2964
2965         return NULL;
2966 }
2967
2968 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2969                                      u8 addr_type)
2970 {
2971         struct smp_irk *irk;
2972
2973         /* Identity Address must be public or static random */
2974         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2975                 return NULL;
2976
2977         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2978                 if (addr_type == irk->addr_type &&
2979                     bacmp(bdaddr, &irk->bdaddr) == 0)
2980                         return irk;
2981         }
2982
2983         return NULL;
2984 }
2985
2986 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2987                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2988                                   u8 pin_len, bool *persistent)
2989 {
2990         struct link_key *key, *old_key;
2991         u8 old_key_type;
2992
2993         old_key = hci_find_link_key(hdev, bdaddr);
2994         if (old_key) {
2995                 old_key_type = old_key->type;
2996                 key = old_key;
2997         } else {
2998                 old_key_type = conn ? conn->key_type : 0xff;
2999                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3000                 if (!key)
3001                         return NULL;
3002                 list_add(&key->list, &hdev->link_keys);
3003         }
3004
3005         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3006
3007         /* Some buggy controller combinations generate a changed
3008          * combination key for legacy pairing even when there's no
3009          * previous key */
3010         if (type == HCI_LK_CHANGED_COMBINATION &&
3011             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3012                 type = HCI_LK_COMBINATION;
3013                 if (conn)
3014                         conn->key_type = type;
3015         }
3016
3017         bacpy(&key->bdaddr, bdaddr);
3018         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3019         key->pin_len = pin_len;
3020
3021         if (type == HCI_LK_CHANGED_COMBINATION)
3022                 key->type = old_key_type;
3023         else
3024                 key->type = type;
3025
3026         if (persistent)
3027                 *persistent = hci_persistent_key(hdev, conn, type,
3028                                                  old_key_type);
3029
3030         return key;
3031 }
3032
3033 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3034                             u8 addr_type, u8 type, u8 authenticated,
3035                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3036 {
3037         struct smp_ltk *key, *old_key;
3038         bool master = ltk_type_master(type);
3039
3040         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3041         if (old_key)
3042                 key = old_key;
3043         else {
3044                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3045                 if (!key)
3046                         return NULL;
3047                 list_add(&key->list, &hdev->long_term_keys);
3048         }
3049
3050         bacpy(&key->bdaddr, bdaddr);
3051         key->bdaddr_type = addr_type;
3052         memcpy(key->val, tk, sizeof(key->val));
3053         key->authenticated = authenticated;
3054         key->ediv = ediv;
3055         key->rand = rand;
3056         key->enc_size = enc_size;
3057         key->type = type;
3058
3059         return key;
3060 }
3061
3062 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3063                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3064 {
3065         struct smp_irk *irk;
3066
3067         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3068         if (!irk) {
3069                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3070                 if (!irk)
3071                         return NULL;
3072
3073                 bacpy(&irk->bdaddr, bdaddr);
3074                 irk->addr_type = addr_type;
3075
3076                 list_add(&irk->list, &hdev->identity_resolving_keys);
3077         }
3078
3079         memcpy(irk->val, val, 16);
3080         bacpy(&irk->rpa, rpa);
3081
3082         return irk;
3083 }
3084
3085 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3086 {
3087         struct link_key *key;
3088
3089         key = hci_find_link_key(hdev, bdaddr);
3090         if (!key)
3091                 return -ENOENT;
3092
3093         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3094
3095         list_del(&key->list);
3096         kfree(key);
3097
3098         return 0;
3099 }
3100
3101 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3102 {
3103         struct smp_ltk *k, *tmp;
3104         int removed = 0;
3105
3106         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3107                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3108                         continue;
3109
3110                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3111
3112                 list_del(&k->list);
3113                 kfree(k);
3114                 removed++;
3115         }
3116
3117         return removed ? 0 : -ENOENT;
3118 }
3119
3120 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3121 {
3122         struct smp_irk *k, *tmp;
3123
3124         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3125                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3126                         continue;
3127
3128                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3129
3130                 list_del(&k->list);
3131                 kfree(k);
3132         }
3133 }
3134
3135 /* HCI command timer function */
3136 static void hci_cmd_timeout(struct work_struct *work)
3137 {
3138         struct hci_dev *hdev = container_of(work, struct hci_dev,
3139                                             cmd_timer.work);
3140
3141         if (hdev->sent_cmd) {
3142                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3143                 u16 opcode = __le16_to_cpu(sent->opcode);
3144
3145                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3146         } else {
3147                 BT_ERR("%s command tx timeout", hdev->name);
3148         }
3149
3150         atomic_set(&hdev->cmd_cnt, 1);
3151         queue_work(hdev->workqueue, &hdev->cmd_work);
3152 }
3153
3154 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3155                                           bdaddr_t *bdaddr)
3156 {
3157         struct oob_data *data;
3158
3159         list_for_each_entry(data, &hdev->remote_oob_data, list)
3160                 if (bacmp(bdaddr, &data->bdaddr) == 0)
3161                         return data;
3162
3163         return NULL;
3164 }
3165
3166 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3167 {
3168         struct oob_data *data;
3169
3170         data = hci_find_remote_oob_data(hdev, bdaddr);
3171         if (!data)
3172                 return -ENOENT;
3173
3174         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3175
3176         list_del(&data->list);
3177         kfree(data);
3178
3179         return 0;
3180 }
3181
3182 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3183 {
3184         struct oob_data *data, *n;
3185
3186         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3187                 list_del(&data->list);
3188                 kfree(data);
3189         }
3190 }
3191
3192 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3193                             u8 *hash, u8 *randomizer)
3194 {
3195         struct oob_data *data;
3196
3197         data = hci_find_remote_oob_data(hdev, bdaddr);
3198         if (!data) {
3199                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3200                 if (!data)
3201                         return -ENOMEM;
3202
3203                 bacpy(&data->bdaddr, bdaddr);
3204                 list_add(&data->list, &hdev->remote_oob_data);
3205         }
3206
3207         memcpy(data->hash192, hash, sizeof(data->hash192));
3208         memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3209
3210         memset(data->hash256, 0, sizeof(data->hash256));
3211         memset(data->randomizer256, 0, sizeof(data->randomizer256));
3212
3213         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3214
3215         return 0;
3216 }
3217
3218 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3219                                 u8 *hash192, u8 *randomizer192,
3220                                 u8 *hash256, u8 *randomizer256)
3221 {
3222         struct oob_data *data;
3223
3224         data = hci_find_remote_oob_data(hdev, bdaddr);
3225         if (!data) {
3226                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3227                 if (!data)
3228                         return -ENOMEM;
3229
3230                 bacpy(&data->bdaddr, bdaddr);
3231                 list_add(&data->list, &hdev->remote_oob_data);
3232         }
3233
3234         memcpy(data->hash192, hash192, sizeof(data->hash192));
3235         memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3236
3237         memcpy(data->hash256, hash256, sizeof(data->hash256));
3238         memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3239
3240         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3241
3242         return 0;
3243 }
3244
3245 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3246                                          bdaddr_t *bdaddr, u8 type)
3247 {
3248         struct bdaddr_list *b;
3249
3250         list_for_each_entry(b, &hdev->blacklist, list) {
3251                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3252                         return b;
3253         }
3254
3255         return NULL;
3256 }
3257
3258 static void hci_blacklist_clear(struct hci_dev *hdev)
3259 {
3260         struct list_head *p, *n;
3261
3262         list_for_each_safe(p, n, &hdev->blacklist) {
3263                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3264
3265                 list_del(p);
3266                 kfree(b);
3267         }
3268 }
3269
3270 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3271 {
3272         struct bdaddr_list *entry;
3273
3274         if (!bacmp(bdaddr, BDADDR_ANY))
3275                 return -EBADF;
3276
3277         if (hci_blacklist_lookup(hdev, bdaddr, type))
3278                 return -EEXIST;
3279
3280         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3281         if (!entry)
3282                 return -ENOMEM;
3283
3284         bacpy(&entry->bdaddr, bdaddr);
3285         entry->bdaddr_type = type;
3286
3287         list_add(&entry->list, &hdev->blacklist);
3288
3289         return mgmt_device_blocked(hdev, bdaddr, type);
3290 }
3291
3292 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3293 {
3294         struct bdaddr_list *entry;
3295
3296         if (!bacmp(bdaddr, BDADDR_ANY)) {
3297                 hci_blacklist_clear(hdev);
3298                 return 0;
3299         }
3300
3301         entry = hci_blacklist_lookup(hdev, bdaddr, type);
3302         if (!entry)
3303                 return -ENOENT;
3304
3305         list_del(&entry->list);
3306         kfree(entry);
3307
3308         return mgmt_device_unblocked(hdev, bdaddr, type);
3309 }
3310
3311 struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3312                                           bdaddr_t *bdaddr, u8 type)
3313 {
3314         struct bdaddr_list *b;
3315
3316         list_for_each_entry(b, &hdev->le_white_list, list) {
3317                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3318                         return b;
3319         }
3320
3321         return NULL;
3322 }
3323
3324 void hci_white_list_clear(struct hci_dev *hdev)
3325 {
3326         struct list_head *p, *n;
3327
3328         list_for_each_safe(p, n, &hdev->le_white_list) {
3329                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3330
3331                 list_del(p);
3332                 kfree(b);
3333         }
3334 }
3335
3336 int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3337 {
3338         struct bdaddr_list *entry;
3339
3340         if (!bacmp(bdaddr, BDADDR_ANY))
3341                 return -EBADF;
3342
3343         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3344         if (!entry)
3345                 return -ENOMEM;
3346
3347         bacpy(&entry->bdaddr, bdaddr);
3348         entry->bdaddr_type = type;
3349
3350         list_add(&entry->list, &hdev->le_white_list);
3351
3352         return 0;
3353 }
3354
3355 int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3356 {
3357         struct bdaddr_list *entry;
3358
3359         if (!bacmp(bdaddr, BDADDR_ANY))
3360                 return -EBADF;
3361
3362         entry = hci_white_list_lookup(hdev, bdaddr, type);
3363         if (!entry)
3364                 return -ENOENT;
3365
3366         list_del(&entry->list);
3367         kfree(entry);
3368
3369         return 0;
3370 }
3371
3372 /* This function requires the caller holds hdev->lock */
3373 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3374                                                bdaddr_t *addr, u8 addr_type)
3375 {
3376         struct hci_conn_params *params;
3377
3378         list_for_each_entry(params, &hdev->le_conn_params, list) {
3379                 if (bacmp(&params->addr, addr) == 0 &&
3380                     params->addr_type == addr_type) {
3381                         return params;
3382                 }
3383         }
3384
3385         return NULL;
3386 }
3387
3388 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3389 {
3390         struct hci_conn *conn;
3391
3392         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3393         if (!conn)
3394                 return false;
3395
3396         if (conn->dst_type != type)
3397                 return false;
3398
3399         if (conn->state != BT_CONNECTED)
3400                 return false;
3401
3402         return true;
3403 }
3404
3405 static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3406 {
3407         if (addr_type == ADDR_LE_DEV_PUBLIC)
3408                 return true;
3409
3410         /* Check for Random Static address type */
3411         if ((addr->b[5] & 0xc0) == 0xc0)
3412                 return true;
3413
3414         return false;
3415 }
3416
3417 /* This function requires the caller holds hdev->lock */
3418 struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3419                                             bdaddr_t *addr, u8 addr_type)
3420 {
3421         struct bdaddr_list *entry;
3422
3423         list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3424                 if (bacmp(&entry->bdaddr, addr) == 0 &&
3425                     entry->bdaddr_type == addr_type)
3426                         return entry;
3427         }
3428
3429         return NULL;
3430 }
3431
3432 /* This function requires the caller holds hdev->lock */
3433 void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3434 {
3435         struct bdaddr_list *entry;
3436
3437         entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3438         if (entry)
3439                 goto done;
3440
3441         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3442         if (!entry) {
3443                 BT_ERR("Out of memory");
3444                 return;
3445         }
3446
3447         bacpy(&entry->bdaddr, addr);
3448         entry->bdaddr_type = addr_type;
3449
3450         list_add(&entry->list, &hdev->pend_le_conns);
3451
3452         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3453
3454 done:
3455         hci_update_background_scan(hdev);
3456 }
3457
3458 /* This function requires the caller holds hdev->lock */
3459 void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3460 {
3461         struct bdaddr_list *entry;
3462
3463         entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3464         if (!entry)
3465                 goto done;
3466
3467         list_del(&entry->list);
3468         kfree(entry);
3469
3470         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3471
3472 done:
3473         hci_update_background_scan(hdev);
3474 }
3475
3476 /* This function requires the caller holds hdev->lock */
3477 void hci_pend_le_conns_clear(struct hci_dev *hdev)
3478 {
3479         struct bdaddr_list *entry, *tmp;
3480
3481         list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3482                 list_del(&entry->list);
3483                 kfree(entry);
3484         }
3485
3486         BT_DBG("All LE pending connections cleared");
3487 }
3488
3489 /* This function requires the caller holds hdev->lock */
3490 int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3491                         u8 auto_connect, u16 conn_min_interval,
3492                         u16 conn_max_interval)
3493 {
3494         struct hci_conn_params *params;
3495
3496         if (!is_identity_address(addr, addr_type))
3497                 return -EINVAL;
3498
3499         params = hci_conn_params_lookup(hdev, addr, addr_type);
3500         if (params)
3501                 goto update;
3502
3503         params = kzalloc(sizeof(*params), GFP_KERNEL);
3504         if (!params) {
3505                 BT_ERR("Out of memory");
3506                 return -ENOMEM;
3507         }
3508
3509         bacpy(&params->addr, addr);
3510         params->addr_type = addr_type;
3511
3512         list_add(&params->list, &hdev->le_conn_params);
3513
3514 update:
3515         params->conn_min_interval = conn_min_interval;
3516         params->conn_max_interval = conn_max_interval;
3517         params->auto_connect = auto_connect;
3518
3519         switch (auto_connect) {
3520         case HCI_AUTO_CONN_DISABLED:
3521         case HCI_AUTO_CONN_LINK_LOSS:
3522                 hci_pend_le_conn_del(hdev, addr, addr_type);
3523                 break;
3524         case HCI_AUTO_CONN_ALWAYS:
3525                 if (!is_connected(hdev, addr, addr_type))
3526                         hci_pend_le_conn_add(hdev, addr, addr_type);
3527                 break;
3528         }
3529
3530         BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3531                "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3532                conn_min_interval, conn_max_interval);
3533
3534         return 0;
3535 }
3536
3537 /* This function requires the caller holds hdev->lock */
3538 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3539 {
3540         struct hci_conn_params *params;
3541
3542         params = hci_conn_params_lookup(hdev, addr, addr_type);
3543         if (!params)
3544                 return;
3545
3546         hci_pend_le_conn_del(hdev, addr, addr_type);
3547
3548         list_del(&params->list);
3549         kfree(params);
3550
3551         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3552 }
3553
3554 /* This function requires the caller holds hdev->lock */
3555 void hci_conn_params_clear(struct hci_dev *hdev)
3556 {
3557         struct hci_conn_params *params, *tmp;
3558
3559         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3560                 list_del(&params->list);
3561                 kfree(params);
3562         }
3563
3564         BT_DBG("All LE connection parameters were removed");
3565 }
3566
3567 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3568 {
3569         if (status) {
3570                 BT_ERR("Failed to start inquiry: status %d", status);
3571
3572                 hci_dev_lock(hdev);
3573                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3574                 hci_dev_unlock(hdev);
3575                 return;
3576         }
3577 }
3578
3579 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3580 {
3581         /* General inquiry access code (GIAC) */
3582         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3583         struct hci_request req;
3584         struct hci_cp_inquiry cp;
3585         int err;
3586
3587         if (status) {
3588                 BT_ERR("Failed to disable LE scanning: status %d", status);
3589                 return;
3590         }
3591
3592         switch (hdev->discovery.type) {
3593         case DISCOV_TYPE_LE:
3594                 hci_dev_lock(hdev);
3595                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3596                 hci_dev_unlock(hdev);
3597                 break;
3598
3599         case DISCOV_TYPE_INTERLEAVED:
3600                 hci_req_init(&req, hdev);
3601
3602                 memset(&cp, 0, sizeof(cp));
3603                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3604                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3605                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3606
3607                 hci_dev_lock(hdev);
3608
3609                 hci_inquiry_cache_flush(hdev);
3610
3611                 err = hci_req_run(&req, inquiry_complete);
3612                 if (err) {
3613                         BT_ERR("Inquiry request failed: err %d", err);
3614                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3615                 }
3616
3617                 hci_dev_unlock(hdev);
3618                 break;
3619         }
3620 }
3621
3622 static void le_scan_disable_work(struct work_struct *work)
3623 {
3624         struct hci_dev *hdev = container_of(work, struct hci_dev,
3625                                             le_scan_disable.work);
3626         struct hci_request req;
3627         int err;
3628
3629         BT_DBG("%s", hdev->name);
3630
3631         hci_req_init(&req, hdev);
3632
3633         hci_req_add_le_scan_disable(&req);
3634
3635         err = hci_req_run(&req, le_scan_disable_work_complete);
3636         if (err)
3637                 BT_ERR("Disable LE scanning request failed: err %d", err);
3638 }
3639
3640 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3641 {
3642         struct hci_dev *hdev = req->hdev;
3643
3644         /* If we're advertising or initiating an LE connection we can't
3645          * go ahead and change the random address at this time. This is
3646          * because the eventual initiator address used for the
3647          * subsequently created connection will be undefined (some
3648          * controllers use the new address and others the one we had
3649          * when the operation started).
3650          *
3651          * In this kind of scenario skip the update and let the random
3652          * address be updated at the next cycle.
3653          */
3654         if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3655             hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3656                 BT_DBG("Deferring random address update");
3657                 return;
3658         }
3659
3660         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3661 }
3662
3663 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3664                               u8 *own_addr_type)
3665 {
3666         struct hci_dev *hdev = req->hdev;
3667         int err;
3668
3669         /* If privacy is enabled use a resolvable private address. If
3670          * current RPA has expired or there is something else than
3671          * the current RPA in use, then generate a new one.
3672          */
3673         if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3674                 int to;
3675
3676                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3677
3678                 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3679                     !bacmp(&hdev->random_addr, &hdev->rpa))
3680                         return 0;
3681
3682                 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3683                 if (err < 0) {
3684                         BT_ERR("%s failed to generate new RPA", hdev->name);
3685                         return err;
3686                 }
3687
3688                 set_random_addr(req, &hdev->rpa);
3689
3690                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3691                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3692
3693                 return 0;
3694         }
3695
3696         /* In case of required privacy without resolvable private address,
3697          * use an unresolvable private address. This is useful for active
3698          * scanning and non-connectable advertising.
3699          */
3700         if (require_privacy) {
3701                 bdaddr_t urpa;
3702
3703                 get_random_bytes(&urpa, 6);
3704                 urpa.b[5] &= 0x3f;      /* Clear two most significant bits */
3705
3706                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3707                 set_random_addr(req, &urpa);
3708                 return 0;
3709         }
3710
3711         /* If forcing static address is in use or there is no public
3712          * address use the static address as random address (but skip
3713          * the HCI command if the current random address is already the
3714          * static one.
3715          */
3716         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3717             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3718                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3719                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3720                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3721                                     &hdev->static_addr);
3722                 return 0;
3723         }
3724
3725         /* Neither privacy nor static address is being used so use a
3726          * public address.
3727          */
3728         *own_addr_type = ADDR_LE_DEV_PUBLIC;
3729
3730         return 0;
3731 }
3732
3733 /* Copy the Identity Address of the controller.
3734  *
3735  * If the controller has a public BD_ADDR, then by default use that one.
3736  * If this is a LE only controller without a public address, default to
3737  * the static random address.
3738  *
3739  * For debugging purposes it is possible to force controllers with a
3740  * public address to use the static random address instead.
3741  */
3742 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3743                                u8 *bdaddr_type)
3744 {
3745         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3746             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3747                 bacpy(bdaddr, &hdev->static_addr);
3748                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3749         } else {
3750                 bacpy(bdaddr, &hdev->bdaddr);
3751                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3752         }
3753 }
3754
3755 /* Alloc HCI device */
3756 struct hci_dev *hci_alloc_dev(void)
3757 {
3758         struct hci_dev *hdev;
3759
3760         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3761         if (!hdev)
3762                 return NULL;
3763
3764         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3765         hdev->esco_type = (ESCO_HV1);
3766         hdev->link_mode = (HCI_LM_ACCEPT);
3767         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3768         hdev->io_capability = 0x03;     /* No Input No Output */
3769         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3770         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3771
3772         hdev->sniff_max_interval = 800;
3773         hdev->sniff_min_interval = 80;
3774
3775         hdev->le_adv_channel_map = 0x07;
3776         hdev->le_scan_interval = 0x0060;
3777         hdev->le_scan_window = 0x0030;
3778         hdev->le_conn_min_interval = 0x0028;
3779         hdev->le_conn_max_interval = 0x0038;
3780
3781         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3782         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3783         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3784         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3785
3786         mutex_init(&hdev->lock);
3787         mutex_init(&hdev->req_lock);
3788
3789         INIT_LIST_HEAD(&hdev->mgmt_pending);
3790         INIT_LIST_HEAD(&hdev->blacklist);
3791         INIT_LIST_HEAD(&hdev->uuids);
3792         INIT_LIST_HEAD(&hdev->link_keys);
3793         INIT_LIST_HEAD(&hdev->long_term_keys);
3794         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3795         INIT_LIST_HEAD(&hdev->remote_oob_data);
3796         INIT_LIST_HEAD(&hdev->le_white_list);
3797         INIT_LIST_HEAD(&hdev->le_conn_params);
3798         INIT_LIST_HEAD(&hdev->pend_le_conns);
3799         INIT_LIST_HEAD(&hdev->conn_hash.list);
3800
3801         INIT_WORK(&hdev->rx_work, hci_rx_work);
3802         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3803         INIT_WORK(&hdev->tx_work, hci_tx_work);
3804         INIT_WORK(&hdev->power_on, hci_power_on);
3805
3806         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3807         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3808         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3809
3810         skb_queue_head_init(&hdev->rx_q);
3811         skb_queue_head_init(&hdev->cmd_q);
3812         skb_queue_head_init(&hdev->raw_q);
3813
3814         init_waitqueue_head(&hdev->req_wait_q);
3815
3816         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3817
3818         hci_init_sysfs(hdev);
3819         discovery_init(hdev);
3820
3821         return hdev;
3822 }
3823 EXPORT_SYMBOL(hci_alloc_dev);
3824
3825 /* Free HCI device */
3826 void hci_free_dev(struct hci_dev *hdev)
3827 {
3828         /* will free via device release */
3829         put_device(&hdev->dev);
3830 }
3831 EXPORT_SYMBOL(hci_free_dev);
3832
3833 /* Register HCI device */
3834 int hci_register_dev(struct hci_dev *hdev)
3835 {
3836         int id, error;
3837
3838         if (!hdev->open || !hdev->close)
3839                 return -EINVAL;
3840
3841         /* Do not allow HCI_AMP devices to register at index 0,
3842          * so the index can be used as the AMP controller ID.
3843          */
3844         switch (hdev->dev_type) {
3845         case HCI_BREDR:
3846                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3847                 break;
3848         case HCI_AMP:
3849                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3850                 break;
3851         default:
3852                 return -EINVAL;
3853         }
3854
3855         if (id < 0)
3856                 return id;
3857
3858         sprintf(hdev->name, "hci%d", id);
3859         hdev->id = id;
3860
3861         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3862
3863         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3864                                           WQ_MEM_RECLAIM, 1, hdev->name);
3865         if (!hdev->workqueue) {
3866                 error = -ENOMEM;
3867                 goto err;
3868         }
3869
3870         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3871                                               WQ_MEM_RECLAIM, 1, hdev->name);
3872         if (!hdev->req_workqueue) {
3873                 destroy_workqueue(hdev->workqueue);
3874                 error = -ENOMEM;
3875                 goto err;
3876         }
3877
3878         if (!IS_ERR_OR_NULL(bt_debugfs))
3879                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3880
3881         dev_set_name(&hdev->dev, "%s", hdev->name);
3882
3883         hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3884                                                CRYPTO_ALG_ASYNC);
3885         if (IS_ERR(hdev->tfm_aes)) {
3886                 BT_ERR("Unable to create crypto context");
3887                 error = PTR_ERR(hdev->tfm_aes);
3888                 hdev->tfm_aes = NULL;
3889                 goto err_wqueue;
3890         }
3891
3892         error = device_add(&hdev->dev);
3893         if (error < 0)
3894                 goto err_tfm;
3895
3896         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3897                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3898                                     hdev);
3899         if (hdev->rfkill) {
3900                 if (rfkill_register(hdev->rfkill) < 0) {
3901                         rfkill_destroy(hdev->rfkill);
3902                         hdev->rfkill = NULL;
3903                 }
3904         }
3905
3906         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3907                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3908
3909         set_bit(HCI_SETUP, &hdev->dev_flags);
3910         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3911
3912         if (hdev->dev_type == HCI_BREDR) {
3913                 /* Assume BR/EDR support until proven otherwise (such as
3914                  * through reading supported features during init.
3915                  */
3916                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3917         }
3918
3919         write_lock(&hci_dev_list_lock);
3920         list_add(&hdev->list, &hci_dev_list);
3921         write_unlock(&hci_dev_list_lock);
3922
3923         /* Devices that are marked for raw-only usage need to set
3924          * the HCI_RAW flag to indicate that only user channel is
3925          * supported.
3926          */
3927         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3928                 set_bit(HCI_RAW, &hdev->flags);
3929
3930         hci_notify(hdev, HCI_DEV_REG);
3931         hci_dev_hold(hdev);
3932
3933         queue_work(hdev->req_workqueue, &hdev->power_on);
3934
3935         return id;
3936
3937 err_tfm:
3938         crypto_free_blkcipher(hdev->tfm_aes);
3939 err_wqueue:
3940         destroy_workqueue(hdev->workqueue);
3941         destroy_workqueue(hdev->req_workqueue);
3942 err:
3943         ida_simple_remove(&hci_index_ida, hdev->id);
3944
3945         return error;
3946 }
3947 EXPORT_SYMBOL(hci_register_dev);
3948
3949 /* Unregister HCI device */
3950 void hci_unregister_dev(struct hci_dev *hdev)
3951 {
3952         int i, id;
3953
3954         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3955
3956         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3957
3958         id = hdev->id;
3959
3960         write_lock(&hci_dev_list_lock);
3961         list_del(&hdev->list);
3962         write_unlock(&hci_dev_list_lock);
3963
3964         hci_dev_do_close(hdev);
3965
3966         for (i = 0; i < NUM_REASSEMBLY; i++)
3967                 kfree_skb(hdev->reassembly[i]);
3968
3969         cancel_work_sync(&hdev->power_on);
3970
3971         if (!test_bit(HCI_INIT, &hdev->flags) &&
3972             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3973             !test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
3974                 hci_dev_lock(hdev);
3975                 mgmt_index_removed(hdev);
3976                 hci_dev_unlock(hdev);
3977         }
3978
3979         /* mgmt_index_removed should take care of emptying the
3980          * pending list */
3981         BUG_ON(!list_empty(&hdev->mgmt_pending));
3982
3983         hci_notify(hdev, HCI_DEV_UNREG);
3984
3985         if (hdev->rfkill) {
3986                 rfkill_unregister(hdev->rfkill);
3987                 rfkill_destroy(hdev->rfkill);
3988         }
3989
3990         if (hdev->tfm_aes)
3991                 crypto_free_blkcipher(hdev->tfm_aes);
3992
3993         device_del(&hdev->dev);
3994
3995         debugfs_remove_recursive(hdev->debugfs);
3996
3997         destroy_workqueue(hdev->workqueue);
3998         destroy_workqueue(hdev->req_workqueue);
3999
4000         hci_dev_lock(hdev);
4001         hci_blacklist_clear(hdev);
4002         hci_uuids_clear(hdev);
4003         hci_link_keys_clear(hdev);
4004         hci_smp_ltks_clear(hdev);
4005         hci_smp_irks_clear(hdev);
4006         hci_remote_oob_data_clear(hdev);
4007         hci_white_list_clear(hdev);
4008         hci_conn_params_clear(hdev);
4009         hci_pend_le_conns_clear(hdev);
4010         hci_dev_unlock(hdev);
4011
4012         hci_dev_put(hdev);
4013
4014         ida_simple_remove(&hci_index_ida, id);
4015 }
4016 EXPORT_SYMBOL(hci_unregister_dev);
4017
4018 /* Suspend HCI device */
4019 int hci_suspend_dev(struct hci_dev *hdev)
4020 {
4021         hci_notify(hdev, HCI_DEV_SUSPEND);
4022         return 0;
4023 }
4024 EXPORT_SYMBOL(hci_suspend_dev);
4025
4026 /* Resume HCI device */
4027 int hci_resume_dev(struct hci_dev *hdev)
4028 {
4029         hci_notify(hdev, HCI_DEV_RESUME);
4030         return 0;
4031 }
4032 EXPORT_SYMBOL(hci_resume_dev);
4033
4034 /* Receive frame from HCI drivers */
4035 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4036 {
4037         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4038                       && !test_bit(HCI_INIT, &hdev->flags))) {
4039                 kfree_skb(skb);
4040                 return -ENXIO;
4041         }
4042
4043         /* Incoming skb */
4044         bt_cb(skb)->incoming = 1;
4045
4046         /* Time stamp */
4047         __net_timestamp(skb);
4048
4049         skb_queue_tail(&hdev->rx_q, skb);
4050         queue_work(hdev->workqueue, &hdev->rx_work);
4051
4052         return 0;
4053 }
4054 EXPORT_SYMBOL(hci_recv_frame);
4055
4056 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4057                           int count, __u8 index)
4058 {
4059         int len = 0;
4060         int hlen = 0;
4061         int remain = count;
4062         struct sk_buff *skb;
4063         struct bt_skb_cb *scb;
4064
4065         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4066             index >= NUM_REASSEMBLY)
4067                 return -EILSEQ;
4068
4069         skb = hdev->reassembly[index];
4070
4071         if (!skb) {
4072                 switch (type) {
4073                 case HCI_ACLDATA_PKT:
4074                         len = HCI_MAX_FRAME_SIZE;
4075                         hlen = HCI_ACL_HDR_SIZE;
4076                         break;
4077                 case HCI_EVENT_PKT:
4078                         len = HCI_MAX_EVENT_SIZE;
4079                         hlen = HCI_EVENT_HDR_SIZE;
4080                         break;
4081                 case HCI_SCODATA_PKT:
4082                         len = HCI_MAX_SCO_SIZE;
4083                         hlen = HCI_SCO_HDR_SIZE;
4084                         break;
4085                 }
4086
4087                 skb = bt_skb_alloc(len, GFP_ATOMIC);
4088                 if (!skb)
4089                         return -ENOMEM;
4090
4091                 scb = (void *) skb->cb;
4092                 scb->expect = hlen;
4093                 scb->pkt_type = type;
4094
4095                 hdev->reassembly[index] = skb;
4096         }
4097
4098         while (count) {
4099                 scb = (void *) skb->cb;
4100                 len = min_t(uint, scb->expect, count);
4101
4102                 memcpy(skb_put(skb, len), data, len);
4103
4104                 count -= len;
4105                 data += len;
4106                 scb->expect -= len;
4107                 remain = count;
4108
4109                 switch (type) {
4110                 case HCI_EVENT_PKT:
4111                         if (skb->len == HCI_EVENT_HDR_SIZE) {
4112                                 struct hci_event_hdr *h = hci_event_hdr(skb);
4113                                 scb->expect = h->plen;
4114
4115                                 if (skb_tailroom(skb) < scb->expect) {
4116                                         kfree_skb(skb);
4117                                         hdev->reassembly[index] = NULL;
4118                                         return -ENOMEM;
4119                                 }
4120                         }
4121                         break;
4122
4123                 case HCI_ACLDATA_PKT:
4124                         if (skb->len  == HCI_ACL_HDR_SIZE) {
4125                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4126                                 scb->expect = __le16_to_cpu(h->dlen);
4127
4128                                 if (skb_tailroom(skb) < scb->expect) {
4129                                         kfree_skb(skb);
4130                                         hdev->reassembly[index] = NULL;
4131                                         return -ENOMEM;
4132                                 }
4133                         }
4134                         break;
4135
4136                 case HCI_SCODATA_PKT:
4137                         if (skb->len == HCI_SCO_HDR_SIZE) {
4138                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4139                                 scb->expect = h->dlen;
4140
4141                                 if (skb_tailroom(skb) < scb->expect) {
4142                                         kfree_skb(skb);
4143                                         hdev->reassembly[index] = NULL;
4144                                         return -ENOMEM;
4145                                 }
4146                         }
4147                         break;
4148                 }
4149
4150                 if (scb->expect == 0) {
4151                         /* Complete frame */
4152
4153                         bt_cb(skb)->pkt_type = type;
4154                         hci_recv_frame(hdev, skb);
4155
4156                         hdev->reassembly[index] = NULL;
4157                         return remain;
4158                 }
4159         }
4160
4161         return remain;
4162 }
4163
4164 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4165 {
4166         int rem = 0;
4167
4168         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4169                 return -EILSEQ;
4170
4171         while (count) {
4172                 rem = hci_reassembly(hdev, type, data, count, type - 1);
4173                 if (rem < 0)
4174                         return rem;
4175
4176                 data += (count - rem);
4177                 count = rem;
4178         }
4179
4180         return rem;
4181 }
4182 EXPORT_SYMBOL(hci_recv_fragment);
4183
4184 #define STREAM_REASSEMBLY 0
4185
4186 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4187 {
4188         int type;
4189         int rem = 0;
4190
4191         while (count) {
4192                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4193
4194                 if (!skb) {
4195                         struct { char type; } *pkt;
4196
4197                         /* Start of the frame */
4198                         pkt = data;
4199                         type = pkt->type;
4200
4201                         data++;
4202                         count--;
4203                 } else
4204                         type = bt_cb(skb)->pkt_type;
4205
4206                 rem = hci_reassembly(hdev, type, data, count,
4207                                      STREAM_REASSEMBLY);
4208                 if (rem < 0)
4209                         return rem;
4210
4211                 data += (count - rem);
4212                 count = rem;
4213         }
4214
4215         return rem;
4216 }
4217 EXPORT_SYMBOL(hci_recv_stream_fragment);
4218
4219 /* ---- Interface to upper protocols ---- */
4220
4221 int hci_register_cb(struct hci_cb *cb)
4222 {
4223         BT_DBG("%p name %s", cb, cb->name);
4224
4225         write_lock(&hci_cb_list_lock);
4226         list_add(&cb->list, &hci_cb_list);
4227         write_unlock(&hci_cb_list_lock);
4228
4229         return 0;
4230 }
4231 EXPORT_SYMBOL(hci_register_cb);
4232
4233 int hci_unregister_cb(struct hci_cb *cb)
4234 {
4235         BT_DBG("%p name %s", cb, cb->name);
4236
4237         write_lock(&hci_cb_list_lock);
4238         list_del(&cb->list);
4239         write_unlock(&hci_cb_list_lock);
4240
4241         return 0;
4242 }
4243 EXPORT_SYMBOL(hci_unregister_cb);
4244
4245 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4246 {
4247         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4248
4249         /* Time stamp */
4250         __net_timestamp(skb);
4251
4252         /* Send copy to monitor */
4253         hci_send_to_monitor(hdev, skb);
4254
4255         if (atomic_read(&hdev->promisc)) {
4256                 /* Send copy to the sockets */
4257                 hci_send_to_sock(hdev, skb);
4258         }
4259
4260         /* Get rid of skb owner, prior to sending to the driver. */
4261         skb_orphan(skb);
4262
4263         if (hdev->send(hdev, skb) < 0)
4264                 BT_ERR("%s sending frame failed", hdev->name);
4265 }
4266
4267 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4268 {
4269         skb_queue_head_init(&req->cmd_q);
4270         req->hdev = hdev;
4271         req->err = 0;
4272 }
4273
4274 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4275 {
4276         struct hci_dev *hdev = req->hdev;
4277         struct sk_buff *skb;
4278         unsigned long flags;
4279
4280         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4281
4282         /* If an error occured during request building, remove all HCI
4283          * commands queued on the HCI request queue.
4284          */
4285         if (req->err) {
4286                 skb_queue_purge(&req->cmd_q);
4287                 return req->err;
4288         }
4289
4290         /* Do not allow empty requests */
4291         if (skb_queue_empty(&req->cmd_q))
4292                 return -ENODATA;
4293
4294         skb = skb_peek_tail(&req->cmd_q);
4295         bt_cb(skb)->req.complete = complete;
4296
4297         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4298         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4299         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4300
4301         queue_work(hdev->workqueue, &hdev->cmd_work);
4302
4303         return 0;
4304 }
4305
4306 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4307                                        u32 plen, const void *param)
4308 {
4309         int len = HCI_COMMAND_HDR_SIZE + plen;
4310         struct hci_command_hdr *hdr;
4311         struct sk_buff *skb;
4312
4313         skb = bt_skb_alloc(len, GFP_ATOMIC);
4314         if (!skb)
4315                 return NULL;
4316
4317         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4318         hdr->opcode = cpu_to_le16(opcode);
4319         hdr->plen   = plen;
4320
4321         if (plen)
4322                 memcpy(skb_put(skb, plen), param, plen);
4323
4324         BT_DBG("skb len %d", skb->len);
4325
4326         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4327
4328         return skb;
4329 }
4330
4331 /* Send HCI command */
4332 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4333                  const void *param)
4334 {
4335         struct sk_buff *skb;
4336
4337         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4338
4339         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4340         if (!skb) {
4341                 BT_ERR("%s no memory for command", hdev->name);
4342                 return -ENOMEM;
4343         }
4344
4345         /* Stand-alone HCI commands must be flaged as
4346          * single-command requests.
4347          */
4348         bt_cb(skb)->req.start = true;
4349
4350         skb_queue_tail(&hdev->cmd_q, skb);
4351         queue_work(hdev->workqueue, &hdev->cmd_work);
4352
4353         return 0;
4354 }
4355
4356 /* Queue a command to an asynchronous HCI request */
4357 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4358                     const void *param, u8 event)
4359 {
4360         struct hci_dev *hdev = req->hdev;
4361         struct sk_buff *skb;
4362
4363         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4364
4365         /* If an error occured during request building, there is no point in
4366          * queueing the HCI command. We can simply return.
4367          */
4368         if (req->err)
4369                 return;
4370
4371         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4372         if (!skb) {
4373                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4374                        hdev->name, opcode);
4375                 req->err = -ENOMEM;
4376                 return;
4377         }
4378
4379         if (skb_queue_empty(&req->cmd_q))
4380                 bt_cb(skb)->req.start = true;
4381
4382         bt_cb(skb)->req.event = event;
4383
4384         skb_queue_tail(&req->cmd_q, skb);
4385 }
4386
4387 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4388                  const void *param)
4389 {
4390         hci_req_add_ev(req, opcode, plen, param, 0);
4391 }
4392
4393 /* Get data from the previously sent command */
4394 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4395 {
4396         struct hci_command_hdr *hdr;
4397
4398         if (!hdev->sent_cmd)
4399                 return NULL;
4400
4401         hdr = (void *) hdev->sent_cmd->data;
4402
4403         if (hdr->opcode != cpu_to_le16(opcode))
4404                 return NULL;
4405
4406         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4407
4408         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4409 }
4410
4411 /* Send ACL data */
4412 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4413 {
4414         struct hci_acl_hdr *hdr;
4415         int len = skb->len;
4416
4417         skb_push(skb, HCI_ACL_HDR_SIZE);
4418         skb_reset_transport_header(skb);
4419         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4420         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4421         hdr->dlen   = cpu_to_le16(len);
4422 }
4423
4424 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4425                           struct sk_buff *skb, __u16 flags)
4426 {
4427         struct hci_conn *conn = chan->conn;
4428         struct hci_dev *hdev = conn->hdev;
4429         struct sk_buff *list;
4430
4431         skb->len = skb_headlen(skb);
4432         skb->data_len = 0;
4433
4434         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4435
4436         switch (hdev->dev_type) {
4437         case HCI_BREDR:
4438                 hci_add_acl_hdr(skb, conn->handle, flags);
4439                 break;
4440         case HCI_AMP:
4441                 hci_add_acl_hdr(skb, chan->handle, flags);
4442                 break;
4443         default:
4444                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4445                 return;
4446         }
4447
4448         list = skb_shinfo(skb)->frag_list;
4449         if (!list) {
4450                 /* Non fragmented */
4451                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4452
4453                 skb_queue_tail(queue, skb);
4454         } else {
4455                 /* Fragmented */
4456                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4457
4458                 skb_shinfo(skb)->frag_list = NULL;
4459
4460                 /* Queue all fragments atomically */
4461                 spin_lock(&queue->lock);
4462
4463                 __skb_queue_tail(queue, skb);
4464
4465                 flags &= ~ACL_START;
4466                 flags |= ACL_CONT;
4467                 do {
4468                         skb = list; list = list->next;
4469
4470                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4471                         hci_add_acl_hdr(skb, conn->handle, flags);
4472
4473                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4474
4475                         __skb_queue_tail(queue, skb);
4476                 } while (list);
4477
4478                 spin_unlock(&queue->lock);
4479         }
4480 }
4481
4482 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4483 {
4484         struct hci_dev *hdev = chan->conn->hdev;
4485
4486         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4487
4488         hci_queue_acl(chan, &chan->data_q, skb, flags);
4489
4490         queue_work(hdev->workqueue, &hdev->tx_work);
4491 }
4492
4493 /* Send SCO data */
4494 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4495 {
4496         struct hci_dev *hdev = conn->hdev;
4497         struct hci_sco_hdr hdr;
4498
4499         BT_DBG("%s len %d", hdev->name, skb->len);
4500
4501         hdr.handle = cpu_to_le16(conn->handle);
4502         hdr.dlen   = skb->len;
4503
4504         skb_push(skb, HCI_SCO_HDR_SIZE);
4505         skb_reset_transport_header(skb);
4506         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4507
4508         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4509
4510         skb_queue_tail(&conn->data_q, skb);
4511         queue_work(hdev->workqueue, &hdev->tx_work);
4512 }
4513
4514 /* ---- HCI TX task (outgoing data) ---- */
4515
4516 /* HCI Connection scheduler */
4517 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4518                                      int *quote)
4519 {
4520         struct hci_conn_hash *h = &hdev->conn_hash;
4521         struct hci_conn *conn = NULL, *c;
4522         unsigned int num = 0, min = ~0;
4523
4524         /* We don't have to lock device here. Connections are always
4525          * added and removed with TX task disabled. */
4526
4527         rcu_read_lock();
4528
4529         list_for_each_entry_rcu(c, &h->list, list) {
4530                 if (c->type != type || skb_queue_empty(&c->data_q))
4531                         continue;
4532
4533                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4534                         continue;
4535
4536                 num++;
4537
4538                 if (c->sent < min) {
4539                         min  = c->sent;
4540                         conn = c;
4541                 }
4542
4543                 if (hci_conn_num(hdev, type) == num)
4544                         break;
4545         }
4546
4547         rcu_read_unlock();
4548
4549         if (conn) {
4550                 int cnt, q;
4551
4552                 switch (conn->type) {
4553                 case ACL_LINK:
4554                         cnt = hdev->acl_cnt;
4555                         break;
4556                 case SCO_LINK:
4557                 case ESCO_LINK:
4558                         cnt = hdev->sco_cnt;
4559                         break;
4560                 case LE_LINK:
4561                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4562                         break;
4563                 default:
4564                         cnt = 0;
4565                         BT_ERR("Unknown link type");
4566                 }
4567
4568                 q = cnt / num;
4569                 *quote = q ? q : 1;
4570         } else
4571                 *quote = 0;
4572
4573         BT_DBG("conn %p quote %d", conn, *quote);
4574         return conn;
4575 }
4576
4577 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4578 {
4579         struct hci_conn_hash *h = &hdev->conn_hash;
4580         struct hci_conn *c;
4581
4582         BT_ERR("%s link tx timeout", hdev->name);
4583
4584         rcu_read_lock();
4585
4586         /* Kill stalled connections */
4587         list_for_each_entry_rcu(c, &h->list, list) {
4588                 if (c->type == type && c->sent) {
4589                         BT_ERR("%s killing stalled connection %pMR",
4590                                hdev->name, &c->dst);
4591                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4592                 }
4593         }
4594
4595         rcu_read_unlock();
4596 }
4597
4598 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4599                                       int *quote)
4600 {
4601         struct hci_conn_hash *h = &hdev->conn_hash;
4602         struct hci_chan *chan = NULL;
4603         unsigned int num = 0, min = ~0, cur_prio = 0;
4604         struct hci_conn *conn;
4605         int cnt, q, conn_num = 0;
4606
4607         BT_DBG("%s", hdev->name);
4608
4609         rcu_read_lock();
4610
4611         list_for_each_entry_rcu(conn, &h->list, list) {
4612                 struct hci_chan *tmp;
4613
4614                 if (conn->type != type)
4615                         continue;
4616
4617                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4618                         continue;
4619
4620                 conn_num++;
4621
4622                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4623                         struct sk_buff *skb;
4624
4625                         if (skb_queue_empty(&tmp->data_q))
4626                                 continue;
4627
4628                         skb = skb_peek(&tmp->data_q);
4629                         if (skb->priority < cur_prio)
4630                                 continue;
4631
4632                         if (skb->priority > cur_prio) {
4633                                 num = 0;
4634                                 min = ~0;
4635                                 cur_prio = skb->priority;
4636                         }
4637
4638                         num++;
4639
4640                         if (conn->sent < min) {
4641                                 min  = conn->sent;
4642                                 chan = tmp;
4643                         }
4644                 }
4645
4646                 if (hci_conn_num(hdev, type) == conn_num)
4647                         break;
4648         }
4649
4650         rcu_read_unlock();
4651
4652         if (!chan)
4653                 return NULL;
4654
4655         switch (chan->conn->type) {
4656         case ACL_LINK:
4657                 cnt = hdev->acl_cnt;
4658                 break;
4659         case AMP_LINK:
4660                 cnt = hdev->block_cnt;
4661                 break;
4662         case SCO_LINK:
4663         case ESCO_LINK:
4664                 cnt = hdev->sco_cnt;
4665                 break;
4666         case LE_LINK:
4667                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4668                 break;
4669         default:
4670                 cnt = 0;
4671                 BT_ERR("Unknown link type");
4672         }
4673
4674         q = cnt / num;
4675         *quote = q ? q : 1;
4676         BT_DBG("chan %p quote %d", chan, *quote);
4677         return chan;
4678 }
4679
4680 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4681 {
4682         struct hci_conn_hash *h = &hdev->conn_hash;
4683         struct hci_conn *conn;
4684         int num = 0;
4685
4686         BT_DBG("%s", hdev->name);
4687
4688         rcu_read_lock();
4689
4690         list_for_each_entry_rcu(conn, &h->list, list) {
4691                 struct hci_chan *chan;
4692
4693                 if (conn->type != type)
4694                         continue;
4695
4696                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4697                         continue;
4698
4699                 num++;
4700
4701                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4702                         struct sk_buff *skb;
4703
4704                         if (chan->sent) {
4705                                 chan->sent = 0;
4706                                 continue;
4707                         }
4708
4709                         if (skb_queue_empty(&chan->data_q))
4710                                 continue;
4711
4712                         skb = skb_peek(&chan->data_q);
4713                         if (skb->priority >= HCI_PRIO_MAX - 1)
4714                                 continue;
4715
4716                         skb->priority = HCI_PRIO_MAX - 1;
4717
4718                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4719                                skb->priority);
4720                 }
4721
4722                 if (hci_conn_num(hdev, type) == num)
4723                         break;
4724         }
4725
4726         rcu_read_unlock();
4727
4728 }
4729
4730 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4731 {
4732         /* Calculate count of blocks used by this packet */
4733         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4734 }
4735
4736 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4737 {
4738         if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
4739                 /* ACL tx timeout must be longer than maximum
4740                  * link supervision timeout (40.9 seconds) */
4741                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4742                                        HCI_ACL_TX_TIMEOUT))
4743                         hci_link_tx_to(hdev, ACL_LINK);
4744         }
4745 }
4746
4747 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4748 {
4749         unsigned int cnt = hdev->acl_cnt;
4750         struct hci_chan *chan;
4751         struct sk_buff *skb;
4752         int quote;
4753
4754         __check_timeout(hdev, cnt);
4755
4756         while (hdev->acl_cnt &&
4757                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4758                 u32 priority = (skb_peek(&chan->data_q))->priority;
4759                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4760                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4761                                skb->len, skb->priority);
4762
4763                         /* Stop if priority has changed */
4764                         if (skb->priority < priority)
4765                                 break;
4766
4767                         skb = skb_dequeue(&chan->data_q);
4768
4769                         hci_conn_enter_active_mode(chan->conn,
4770                                                    bt_cb(skb)->force_active);
4771
4772                         hci_send_frame(hdev, skb);
4773                         hdev->acl_last_tx = jiffies;
4774
4775                         hdev->acl_cnt--;
4776                         chan->sent++;
4777                         chan->conn->sent++;
4778                 }
4779         }
4780
4781         if (cnt != hdev->acl_cnt)
4782                 hci_prio_recalculate(hdev, ACL_LINK);
4783 }
4784
4785 static void hci_sched_acl_blk(struct hci_dev *hdev)
4786 {
4787         unsigned int cnt = hdev->block_cnt;
4788         struct hci_chan *chan;
4789         struct sk_buff *skb;
4790         int quote;
4791         u8 type;
4792
4793         __check_timeout(hdev, cnt);
4794
4795         BT_DBG("%s", hdev->name);
4796
4797         if (hdev->dev_type == HCI_AMP)
4798                 type = AMP_LINK;
4799         else
4800                 type = ACL_LINK;
4801
4802         while (hdev->block_cnt > 0 &&
4803                (chan = hci_chan_sent(hdev, type, &quote))) {
4804                 u32 priority = (skb_peek(&chan->data_q))->priority;
4805                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4806                         int blocks;
4807
4808                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4809                                skb->len, skb->priority);
4810
4811                         /* Stop if priority has changed */
4812                         if (skb->priority < priority)
4813                                 break;
4814
4815                         skb = skb_dequeue(&chan->data_q);
4816
4817                         blocks = __get_blocks(hdev, skb);
4818                         if (blocks > hdev->block_cnt)
4819                                 return;
4820
4821                         hci_conn_enter_active_mode(chan->conn,
4822                                                    bt_cb(skb)->force_active);
4823
4824                         hci_send_frame(hdev, skb);
4825                         hdev->acl_last_tx = jiffies;
4826
4827                         hdev->block_cnt -= blocks;
4828                         quote -= blocks;
4829
4830                         chan->sent += blocks;
4831                         chan->conn->sent += blocks;
4832                 }
4833         }
4834
4835         if (cnt != hdev->block_cnt)
4836                 hci_prio_recalculate(hdev, type);
4837 }
4838
4839 static void hci_sched_acl(struct hci_dev *hdev)
4840 {
4841         BT_DBG("%s", hdev->name);
4842
4843         /* No ACL link over BR/EDR controller */
4844         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4845                 return;
4846
4847         /* No AMP link over AMP controller */
4848         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4849                 return;
4850
4851         switch (hdev->flow_ctl_mode) {
4852         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4853                 hci_sched_acl_pkt(hdev);
4854                 break;
4855
4856         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4857                 hci_sched_acl_blk(hdev);
4858                 break;
4859         }
4860 }
4861
4862 /* Schedule SCO */
4863 static void hci_sched_sco(struct hci_dev *hdev)
4864 {
4865         struct hci_conn *conn;
4866         struct sk_buff *skb;
4867         int quote;
4868
4869         BT_DBG("%s", hdev->name);
4870
4871         if (!hci_conn_num(hdev, SCO_LINK))
4872                 return;
4873
4874         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4875                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4876                         BT_DBG("skb %p len %d", skb, skb->len);
4877                         hci_send_frame(hdev, skb);
4878
4879                         conn->sent++;
4880                         if (conn->sent == ~0)
4881                                 conn->sent = 0;
4882                 }
4883         }
4884 }
4885
4886 static void hci_sched_esco(struct hci_dev *hdev)
4887 {
4888         struct hci_conn *conn;
4889         struct sk_buff *skb;
4890         int quote;
4891
4892         BT_DBG("%s", hdev->name);
4893
4894         if (!hci_conn_num(hdev, ESCO_LINK))
4895                 return;
4896
4897         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4898                                                      &quote))) {
4899                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4900                         BT_DBG("skb %p len %d", skb, skb->len);
4901                         hci_send_frame(hdev, skb);
4902
4903                         conn->sent++;
4904                         if (conn->sent == ~0)
4905                                 conn->sent = 0;
4906                 }
4907         }
4908 }
4909
4910 static void hci_sched_le(struct hci_dev *hdev)
4911 {
4912         struct hci_chan *chan;
4913         struct sk_buff *skb;
4914         int quote, cnt, tmp;
4915
4916         BT_DBG("%s", hdev->name);
4917
4918         if (!hci_conn_num(hdev, LE_LINK))
4919                 return;
4920
4921         if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
4922                 /* LE tx timeout must be longer than maximum
4923                  * link supervision timeout (40.9 seconds) */
4924                 if (!hdev->le_cnt && hdev->le_pkts &&
4925                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4926                         hci_link_tx_to(hdev, LE_LINK);
4927         }
4928
4929         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4930         tmp = cnt;
4931         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4932                 u32 priority = (skb_peek(&chan->data_q))->priority;
4933                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4934                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4935                                skb->len, skb->priority);
4936
4937                         /* Stop if priority has changed */
4938                         if (skb->priority < priority)
4939                                 break;
4940
4941                         skb = skb_dequeue(&chan->data_q);
4942
4943                         hci_send_frame(hdev, skb);
4944                         hdev->le_last_tx = jiffies;
4945
4946                         cnt--;
4947                         chan->sent++;
4948                         chan->conn->sent++;
4949                 }
4950         }
4951
4952         if (hdev->le_pkts)
4953                 hdev->le_cnt = cnt;
4954         else
4955                 hdev->acl_cnt = cnt;
4956
4957         if (cnt != tmp)
4958                 hci_prio_recalculate(hdev, LE_LINK);
4959 }
4960
4961 static void hci_tx_work(struct work_struct *work)
4962 {
4963         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4964         struct sk_buff *skb;
4965
4966         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4967                hdev->sco_cnt, hdev->le_cnt);
4968
4969         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4970                 /* Schedule queues and send stuff to HCI driver */
4971                 hci_sched_acl(hdev);
4972                 hci_sched_sco(hdev);
4973                 hci_sched_esco(hdev);
4974                 hci_sched_le(hdev);
4975         }
4976
4977         /* Send next queued raw (unknown type) packet */
4978         while ((skb = skb_dequeue(&hdev->raw_q)))
4979                 hci_send_frame(hdev, skb);
4980 }
4981
4982 /* ----- HCI RX task (incoming data processing) ----- */
4983
4984 /* ACL data packet */
4985 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4986 {
4987         struct hci_acl_hdr *hdr = (void *) skb->data;
4988         struct hci_conn *conn;
4989         __u16 handle, flags;
4990
4991         skb_pull(skb, HCI_ACL_HDR_SIZE);
4992
4993         handle = __le16_to_cpu(hdr->handle);
4994         flags  = hci_flags(handle);
4995         handle = hci_handle(handle);
4996
4997         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4998                handle, flags);
4999
5000         hdev->stat.acl_rx++;
5001
5002         hci_dev_lock(hdev);
5003         conn = hci_conn_hash_lookup_handle(hdev, handle);
5004         hci_dev_unlock(hdev);
5005
5006         if (conn) {
5007                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5008
5009                 /* Send to upper protocol */
5010                 l2cap_recv_acldata(conn, skb, flags);
5011                 return;
5012         } else {
5013                 BT_ERR("%s ACL packet for unknown connection handle %d",
5014                        hdev->name, handle);
5015         }
5016
5017         kfree_skb(skb);
5018 }
5019
5020 /* SCO data packet */
5021 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5022 {
5023         struct hci_sco_hdr *hdr = (void *) skb->data;
5024         struct hci_conn *conn;
5025         __u16 handle;
5026
5027         skb_pull(skb, HCI_SCO_HDR_SIZE);
5028
5029         handle = __le16_to_cpu(hdr->handle);
5030
5031         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5032
5033         hdev->stat.sco_rx++;
5034
5035         hci_dev_lock(hdev);
5036         conn = hci_conn_hash_lookup_handle(hdev, handle);
5037         hci_dev_unlock(hdev);
5038
5039         if (conn) {
5040                 /* Send to upper protocol */
5041                 sco_recv_scodata(conn, skb);
5042                 return;
5043         } else {
5044                 BT_ERR("%s SCO packet for unknown connection handle %d",
5045                        hdev->name, handle);
5046         }
5047
5048         kfree_skb(skb);
5049 }
5050
5051 static bool hci_req_is_complete(struct hci_dev *hdev)
5052 {
5053         struct sk_buff *skb;
5054
5055         skb = skb_peek(&hdev->cmd_q);
5056         if (!skb)
5057                 return true;
5058
5059         return bt_cb(skb)->req.start;
5060 }
5061
5062 static void hci_resend_last(struct hci_dev *hdev)
5063 {
5064         struct hci_command_hdr *sent;
5065         struct sk_buff *skb;
5066         u16 opcode;
5067
5068         if (!hdev->sent_cmd)
5069                 return;
5070
5071         sent = (void *) hdev->sent_cmd->data;
5072         opcode = __le16_to_cpu(sent->opcode);
5073         if (opcode == HCI_OP_RESET)
5074                 return;
5075
5076         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5077         if (!skb)
5078                 return;
5079
5080         skb_queue_head(&hdev->cmd_q, skb);
5081         queue_work(hdev->workqueue, &hdev->cmd_work);
5082 }
5083
5084 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5085 {
5086         hci_req_complete_t req_complete = NULL;
5087         struct sk_buff *skb;
5088         unsigned long flags;
5089
5090         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5091
5092         /* If the completed command doesn't match the last one that was
5093          * sent we need to do special handling of it.
5094          */
5095         if (!hci_sent_cmd_data(hdev, opcode)) {
5096                 /* Some CSR based controllers generate a spontaneous
5097                  * reset complete event during init and any pending
5098                  * command will never be completed. In such a case we
5099                  * need to resend whatever was the last sent
5100                  * command.
5101                  */
5102                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5103                         hci_resend_last(hdev);
5104
5105                 return;
5106         }
5107
5108         /* If the command succeeded and there's still more commands in
5109          * this request the request is not yet complete.
5110          */
5111         if (!status && !hci_req_is_complete(hdev))
5112                 return;
5113
5114         /* If this was the last command in a request the complete
5115          * callback would be found in hdev->sent_cmd instead of the
5116          * command queue (hdev->cmd_q).
5117          */
5118         if (hdev->sent_cmd) {
5119                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5120
5121                 if (req_complete) {
5122                         /* We must set the complete callback to NULL to
5123                          * avoid calling the callback more than once if
5124                          * this function gets called again.
5125                          */
5126                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
5127
5128                         goto call_complete;
5129                 }
5130         }
5131
5132         /* Remove all pending commands belonging to this request */
5133         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5134         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5135                 if (bt_cb(skb)->req.start) {
5136                         __skb_queue_head(&hdev->cmd_q, skb);
5137                         break;
5138                 }
5139
5140                 req_complete = bt_cb(skb)->req.complete;
5141                 kfree_skb(skb);
5142         }
5143         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5144
5145 call_complete:
5146         if (req_complete)
5147                 req_complete(hdev, status);
5148 }
5149
5150 static void hci_rx_work(struct work_struct *work)
5151 {
5152         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5153         struct sk_buff *skb;
5154
5155         BT_DBG("%s", hdev->name);
5156
5157         while ((skb = skb_dequeue(&hdev->rx_q))) {
5158                 /* Send copy to monitor */
5159                 hci_send_to_monitor(hdev, skb);
5160
5161                 if (atomic_read(&hdev->promisc)) {
5162                         /* Send copy to the sockets */
5163                         hci_send_to_sock(hdev, skb);
5164                 }
5165
5166                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5167                         kfree_skb(skb);
5168                         continue;
5169                 }
5170
5171                 if (test_bit(HCI_INIT, &hdev->flags)) {
5172                         /* Don't process data packets in this states. */
5173                         switch (bt_cb(skb)->pkt_type) {
5174                         case HCI_ACLDATA_PKT:
5175                         case HCI_SCODATA_PKT:
5176                                 kfree_skb(skb);
5177                                 continue;
5178                         }
5179                 }
5180
5181                 /* Process frame */
5182                 switch (bt_cb(skb)->pkt_type) {
5183                 case HCI_EVENT_PKT:
5184                         BT_DBG("%s Event packet", hdev->name);
5185                         hci_event_packet(hdev, skb);
5186                         break;
5187
5188                 case HCI_ACLDATA_PKT:
5189                         BT_DBG("%s ACL data packet", hdev->name);
5190                         hci_acldata_packet(hdev, skb);
5191                         break;
5192
5193                 case HCI_SCODATA_PKT:
5194                         BT_DBG("%s SCO data packet", hdev->name);
5195                         hci_scodata_packet(hdev, skb);
5196                         break;
5197
5198                 default:
5199                         kfree_skb(skb);
5200                         break;
5201                 }
5202         }
5203 }
5204
5205 static void hci_cmd_work(struct work_struct *work)
5206 {
5207         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5208         struct sk_buff *skb;
5209
5210         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5211                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5212
5213         /* Send queued commands */
5214         if (atomic_read(&hdev->cmd_cnt)) {
5215                 skb = skb_dequeue(&hdev->cmd_q);
5216                 if (!skb)
5217                         return;
5218
5219                 kfree_skb(hdev->sent_cmd);
5220
5221                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5222                 if (hdev->sent_cmd) {
5223                         atomic_dec(&hdev->cmd_cnt);
5224                         hci_send_frame(hdev, skb);
5225                         if (test_bit(HCI_RESET, &hdev->flags))
5226                                 cancel_delayed_work(&hdev->cmd_timer);
5227                         else
5228                                 schedule_delayed_work(&hdev->cmd_timer,
5229                                                       HCI_CMD_TIMEOUT);
5230                 } else {
5231                         skb_queue_head(&hdev->cmd_q, skb);
5232                         queue_work(hdev->workqueue, &hdev->cmd_work);
5233                 }
5234         }
5235 }
5236
5237 void hci_req_add_le_scan_disable(struct hci_request *req)
5238 {
5239         struct hci_cp_le_set_scan_enable cp;
5240
5241         memset(&cp, 0, sizeof(cp));
5242         cp.enable = LE_SCAN_DISABLE;
5243         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5244 }
5245
5246 void hci_req_add_le_passive_scan(struct hci_request *req)
5247 {
5248         struct hci_cp_le_set_scan_param param_cp;
5249         struct hci_cp_le_set_scan_enable enable_cp;
5250         struct hci_dev *hdev = req->hdev;
5251         u8 own_addr_type;
5252
5253         /* Set require_privacy to false since no SCAN_REQ are send
5254          * during passive scanning. Not using an unresolvable address
5255          * here is important so that peer devices using direct
5256          * advertising with our address will be correctly reported
5257          * by the controller.
5258          */
5259         if (hci_update_random_address(req, false, &own_addr_type))
5260                 return;
5261
5262         memset(&param_cp, 0, sizeof(param_cp));
5263         param_cp.type = LE_SCAN_PASSIVE;
5264         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5265         param_cp.window = cpu_to_le16(hdev->le_scan_window);
5266         param_cp.own_address_type = own_addr_type;
5267         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5268                     &param_cp);
5269
5270         memset(&enable_cp, 0, sizeof(enable_cp));
5271         enable_cp.enable = LE_SCAN_ENABLE;
5272         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5273         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5274                     &enable_cp);
5275 }
5276
5277 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5278 {
5279         if (status)
5280                 BT_DBG("HCI request failed to update background scanning: "
5281                        "status 0x%2.2x", status);
5282 }
5283
5284 /* This function controls the background scanning based on hdev->pend_le_conns
5285  * list. If there are pending LE connection we start the background scanning,
5286  * otherwise we stop it.
5287  *
5288  * This function requires the caller holds hdev->lock.
5289  */
5290 void hci_update_background_scan(struct hci_dev *hdev)
5291 {
5292         struct hci_request req;
5293         struct hci_conn *conn;
5294         int err;
5295
5296         hci_req_init(&req, hdev);
5297
5298         if (list_empty(&hdev->pend_le_conns)) {
5299                 /* If there is no pending LE connections, we should stop
5300                  * the background scanning.
5301                  */
5302
5303                 /* If controller is not scanning we are done. */
5304                 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5305                         return;
5306
5307                 hci_req_add_le_scan_disable(&req);
5308
5309                 BT_DBG("%s stopping background scanning", hdev->name);
5310         } else {
5311                 /* If there is at least one pending LE connection, we should
5312                  * keep the background scan running.
5313                  */
5314
5315                 /* If controller is connecting, we should not start scanning
5316                  * since some controllers are not able to scan and connect at
5317                  * the same time.
5318                  */
5319                 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5320                 if (conn)
5321                         return;
5322
5323                 /* If controller is currently scanning, we stop it to ensure we
5324                  * don't miss any advertising (due to duplicates filter).
5325                  */
5326                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5327                         hci_req_add_le_scan_disable(&req);
5328
5329                 hci_req_add_le_passive_scan(&req);
5330
5331                 BT_DBG("%s starting background scanning", hdev->name);
5332         }
5333
5334         err = hci_req_run(&req, update_background_scan_complete);
5335         if (err)
5336                 BT_ERR("Failed to run HCI request: err %d", err);
5337 }