Bluetooth: Pass full hci_dev struct to mgmt callbacks
[cascardo/linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
46 #include <net/sock.h>
47
48 #include <asm/system.h>
49 #include <linux/uaccess.h>
50 #include <asm/unaligned.h>
51
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54
55 #define AUTO_OFF_TIMEOUT 2000
56
57 static void hci_cmd_task(unsigned long arg);
58 static void hci_rx_task(unsigned long arg);
59 static void hci_tx_task(unsigned long arg);
60
61 static DEFINE_RWLOCK(hci_task_lock);
62
63 /* HCI device list */
64 LIST_HEAD(hci_dev_list);
65 DEFINE_RWLOCK(hci_dev_list_lock);
66
67 /* HCI callback list */
68 LIST_HEAD(hci_cb_list);
69 DEFINE_RWLOCK(hci_cb_list_lock);
70
71 /* HCI protocols */
72 #define HCI_MAX_PROTO   2
73 struct hci_proto *hci_proto[HCI_MAX_PROTO];
74
75 /* HCI notifiers list */
76 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
77
78 /* ---- HCI notifications ---- */
79
80 int hci_register_notifier(struct notifier_block *nb)
81 {
82         return atomic_notifier_chain_register(&hci_notifier, nb);
83 }
84
85 int hci_unregister_notifier(struct notifier_block *nb)
86 {
87         return atomic_notifier_chain_unregister(&hci_notifier, nb);
88 }
89
90 static void hci_notify(struct hci_dev *hdev, int event)
91 {
92         atomic_notifier_call_chain(&hci_notifier, event, hdev);
93 }
94
95 /* ---- HCI requests ---- */
96
97 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
98 {
99         BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
100
101         /* If this is the init phase check if the completed command matches
102          * the last init command, and if not just return.
103          */
104         if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
105                 return;
106
107         if (hdev->req_status == HCI_REQ_PEND) {
108                 hdev->req_result = result;
109                 hdev->req_status = HCI_REQ_DONE;
110                 wake_up_interruptible(&hdev->req_wait_q);
111         }
112 }
113
114 static void hci_req_cancel(struct hci_dev *hdev, int err)
115 {
116         BT_DBG("%s err 0x%2.2x", hdev->name, err);
117
118         if (hdev->req_status == HCI_REQ_PEND) {
119                 hdev->req_result = err;
120                 hdev->req_status = HCI_REQ_CANCELED;
121                 wake_up_interruptible(&hdev->req_wait_q);
122         }
123 }
124
125 /* Execute request and wait for completion. */
126 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
127                                         unsigned long opt, __u32 timeout)
128 {
129         DECLARE_WAITQUEUE(wait, current);
130         int err = 0;
131
132         BT_DBG("%s start", hdev->name);
133
134         hdev->req_status = HCI_REQ_PEND;
135
136         add_wait_queue(&hdev->req_wait_q, &wait);
137         set_current_state(TASK_INTERRUPTIBLE);
138
139         req(hdev, opt);
140         schedule_timeout(timeout);
141
142         remove_wait_queue(&hdev->req_wait_q, &wait);
143
144         if (signal_pending(current))
145                 return -EINTR;
146
147         switch (hdev->req_status) {
148         case HCI_REQ_DONE:
149                 err = -bt_to_errno(hdev->req_result);
150                 break;
151
152         case HCI_REQ_CANCELED:
153                 err = -hdev->req_result;
154                 break;
155
156         default:
157                 err = -ETIMEDOUT;
158                 break;
159         }
160
161         hdev->req_status = hdev->req_result = 0;
162
163         BT_DBG("%s end: err %d", hdev->name, err);
164
165         return err;
166 }
167
168 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
169                                         unsigned long opt, __u32 timeout)
170 {
171         int ret;
172
173         if (!test_bit(HCI_UP, &hdev->flags))
174                 return -ENETDOWN;
175
176         /* Serialize all requests */
177         hci_req_lock(hdev);
178         ret = __hci_request(hdev, req, opt, timeout);
179         hci_req_unlock(hdev);
180
181         return ret;
182 }
183
184 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185 {
186         BT_DBG("%s %ld", hdev->name, opt);
187
188         /* Reset device */
189         set_bit(HCI_RESET, &hdev->flags);
190         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
191 }
192
193 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
194 {
195         struct hci_cp_delete_stored_link_key cp;
196         struct sk_buff *skb;
197         __le16 param;
198         __u8 flt_type;
199
200         BT_DBG("%s %ld", hdev->name, opt);
201
202         /* Driver initialization */
203
204         /* Special commands */
205         while ((skb = skb_dequeue(&hdev->driver_init))) {
206                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
207                 skb->dev = (void *) hdev;
208
209                 skb_queue_tail(&hdev->cmd_q, skb);
210                 tasklet_schedule(&hdev->cmd_task);
211         }
212         skb_queue_purge(&hdev->driver_init);
213
214         /* Mandatory initialization */
215
216         /* Reset */
217         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
218                         set_bit(HCI_RESET, &hdev->flags);
219                         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
220         }
221
222         /* Read Local Supported Features */
223         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
224
225         /* Read Local Version */
226         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
227
228         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
229         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
230
231 #if 0
232         /* Host buffer size */
233         {
234                 struct hci_cp_host_buffer_size cp;
235                 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
236                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
237                 cp.acl_max_pkt = cpu_to_le16(0xffff);
238                 cp.sco_max_pkt = cpu_to_le16(0xffff);
239                 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
240         }
241 #endif
242
243         /* Read BD Address */
244         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
245
246         /* Read Class of Device */
247         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
248
249         /* Read Local Name */
250         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
251
252         /* Read Voice Setting */
253         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
254
255         /* Optional initialization */
256
257         /* Clear Event Filters */
258         flt_type = HCI_FLT_CLEAR_ALL;
259         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
260
261         /* Connection accept timeout ~20 secs */
262         param = cpu_to_le16(0x7d00);
263         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
264
265         bacpy(&cp.bdaddr, BDADDR_ANY);
266         cp.delete_all = 1;
267         hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
268 }
269
270 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
271 {
272         BT_DBG("%s", hdev->name);
273
274         /* Read LE buffer size */
275         hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
276 }
277
278 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
279 {
280         __u8 scan = opt;
281
282         BT_DBG("%s %x", hdev->name, scan);
283
284         /* Inquiry and Page scans */
285         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
286 }
287
288 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
289 {
290         __u8 auth = opt;
291
292         BT_DBG("%s %x", hdev->name, auth);
293
294         /* Authentication */
295         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
296 }
297
298 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
299 {
300         __u8 encrypt = opt;
301
302         BT_DBG("%s %x", hdev->name, encrypt);
303
304         /* Encryption */
305         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
306 }
307
308 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
309 {
310         __le16 policy = cpu_to_le16(opt);
311
312         BT_DBG("%s %x", hdev->name, policy);
313
314         /* Default link policy */
315         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
316 }
317
318 /* Get HCI device by index.
319  * Device is held on return. */
320 struct hci_dev *hci_dev_get(int index)
321 {
322         struct hci_dev *hdev = NULL, *d;
323
324         BT_DBG("%d", index);
325
326         if (index < 0)
327                 return NULL;
328
329         read_lock(&hci_dev_list_lock);
330         list_for_each_entry(d, &hci_dev_list, list) {
331                 if (d->id == index) {
332                         hdev = hci_dev_hold(d);
333                         break;
334                 }
335         }
336         read_unlock(&hci_dev_list_lock);
337         return hdev;
338 }
339
340 /* ---- Inquiry support ---- */
341 static void inquiry_cache_flush(struct hci_dev *hdev)
342 {
343         struct inquiry_cache *cache = &hdev->inq_cache;
344         struct inquiry_entry *next  = cache->list, *e;
345
346         BT_DBG("cache %p", cache);
347
348         cache->list = NULL;
349         while ((e = next)) {
350                 next = e->next;
351                 kfree(e);
352         }
353 }
354
355 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
356 {
357         struct inquiry_cache *cache = &hdev->inq_cache;
358         struct inquiry_entry *e;
359
360         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
361
362         for (e = cache->list; e; e = e->next)
363                 if (!bacmp(&e->data.bdaddr, bdaddr))
364                         break;
365         return e;
366 }
367
368 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
369 {
370         struct inquiry_cache *cache = &hdev->inq_cache;
371         struct inquiry_entry *ie;
372
373         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
374
375         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
376         if (!ie) {
377                 /* Entry not in the cache. Add new one. */
378                 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
379                 if (!ie)
380                         return;
381
382                 ie->next = cache->list;
383                 cache->list = ie;
384         }
385
386         memcpy(&ie->data, data, sizeof(*data));
387         ie->timestamp = jiffies;
388         cache->timestamp = jiffies;
389 }
390
391 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
392 {
393         struct inquiry_cache *cache = &hdev->inq_cache;
394         struct inquiry_info *info = (struct inquiry_info *) buf;
395         struct inquiry_entry *e;
396         int copied = 0;
397
398         for (e = cache->list; e && copied < num; e = e->next, copied++) {
399                 struct inquiry_data *data = &e->data;
400                 bacpy(&info->bdaddr, &data->bdaddr);
401                 info->pscan_rep_mode    = data->pscan_rep_mode;
402                 info->pscan_period_mode = data->pscan_period_mode;
403                 info->pscan_mode        = data->pscan_mode;
404                 memcpy(info->dev_class, data->dev_class, 3);
405                 info->clock_offset      = data->clock_offset;
406                 info++;
407         }
408
409         BT_DBG("cache %p, copied %d", cache, copied);
410         return copied;
411 }
412
413 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
414 {
415         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
416         struct hci_cp_inquiry cp;
417
418         BT_DBG("%s", hdev->name);
419
420         if (test_bit(HCI_INQUIRY, &hdev->flags))
421                 return;
422
423         /* Start Inquiry */
424         memcpy(&cp.lap, &ir->lap, 3);
425         cp.length  = ir->length;
426         cp.num_rsp = ir->num_rsp;
427         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
428 }
429
430 int hci_inquiry(void __user *arg)
431 {
432         __u8 __user *ptr = arg;
433         struct hci_inquiry_req ir;
434         struct hci_dev *hdev;
435         int err = 0, do_inquiry = 0, max_rsp;
436         long timeo;
437         __u8 *buf;
438
439         if (copy_from_user(&ir, ptr, sizeof(ir)))
440                 return -EFAULT;
441
442         hdev = hci_dev_get(ir.dev_id);
443         if (!hdev)
444                 return -ENODEV;
445
446         hci_dev_lock_bh(hdev);
447         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
448                                 inquiry_cache_empty(hdev) ||
449                                 ir.flags & IREQ_CACHE_FLUSH) {
450                 inquiry_cache_flush(hdev);
451                 do_inquiry = 1;
452         }
453         hci_dev_unlock_bh(hdev);
454
455         timeo = ir.length * msecs_to_jiffies(2000);
456
457         if (do_inquiry) {
458                 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
459                 if (err < 0)
460                         goto done;
461         }
462
463         /* for unlimited number of responses we will use buffer with 255 entries */
464         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
465
466         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
467          * copy it to the user space.
468          */
469         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
470         if (!buf) {
471                 err = -ENOMEM;
472                 goto done;
473         }
474
475         hci_dev_lock_bh(hdev);
476         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
477         hci_dev_unlock_bh(hdev);
478
479         BT_DBG("num_rsp %d", ir.num_rsp);
480
481         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
482                 ptr += sizeof(ir);
483                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
484                                         ir.num_rsp))
485                         err = -EFAULT;
486         } else
487                 err = -EFAULT;
488
489         kfree(buf);
490
491 done:
492         hci_dev_put(hdev);
493         return err;
494 }
495
496 /* ---- HCI ioctl helpers ---- */
497
498 int hci_dev_open(__u16 dev)
499 {
500         struct hci_dev *hdev;
501         int ret = 0;
502
503         hdev = hci_dev_get(dev);
504         if (!hdev)
505                 return -ENODEV;
506
507         BT_DBG("%s %p", hdev->name, hdev);
508
509         hci_req_lock(hdev);
510
511         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
512                 ret = -ERFKILL;
513                 goto done;
514         }
515
516         if (test_bit(HCI_UP, &hdev->flags)) {
517                 ret = -EALREADY;
518                 goto done;
519         }
520
521         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
522                 set_bit(HCI_RAW, &hdev->flags);
523
524         /* Treat all non BR/EDR controllers as raw devices for now */
525         if (hdev->dev_type != HCI_BREDR)
526                 set_bit(HCI_RAW, &hdev->flags);
527
528         if (hdev->open(hdev)) {
529                 ret = -EIO;
530                 goto done;
531         }
532
533         if (!test_bit(HCI_RAW, &hdev->flags)) {
534                 atomic_set(&hdev->cmd_cnt, 1);
535                 set_bit(HCI_INIT, &hdev->flags);
536                 hdev->init_last_cmd = 0;
537
538                 ret = __hci_request(hdev, hci_init_req, 0,
539                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
540
541                 if (lmp_host_le_capable(hdev))
542                         ret = __hci_request(hdev, hci_le_init_req, 0,
543                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
544
545                 clear_bit(HCI_INIT, &hdev->flags);
546         }
547
548         if (!ret) {
549                 hci_dev_hold(hdev);
550                 set_bit(HCI_UP, &hdev->flags);
551                 hci_notify(hdev, HCI_DEV_UP);
552                 if (!test_bit(HCI_SETUP, &hdev->flags))
553                         mgmt_powered(hdev, 1);
554         } else {
555                 /* Init failed, cleanup */
556                 tasklet_kill(&hdev->rx_task);
557                 tasklet_kill(&hdev->tx_task);
558                 tasklet_kill(&hdev->cmd_task);
559
560                 skb_queue_purge(&hdev->cmd_q);
561                 skb_queue_purge(&hdev->rx_q);
562
563                 if (hdev->flush)
564                         hdev->flush(hdev);
565
566                 if (hdev->sent_cmd) {
567                         kfree_skb(hdev->sent_cmd);
568                         hdev->sent_cmd = NULL;
569                 }
570
571                 hdev->close(hdev);
572                 hdev->flags = 0;
573         }
574
575 done:
576         hci_req_unlock(hdev);
577         hci_dev_put(hdev);
578         return ret;
579 }
580
581 static int hci_dev_do_close(struct hci_dev *hdev)
582 {
583         BT_DBG("%s %p", hdev->name, hdev);
584
585         hci_req_cancel(hdev, ENODEV);
586         hci_req_lock(hdev);
587
588         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
589                 del_timer_sync(&hdev->cmd_timer);
590                 hci_req_unlock(hdev);
591                 return 0;
592         }
593
594         /* Kill RX and TX tasks */
595         tasklet_kill(&hdev->rx_task);
596         tasklet_kill(&hdev->tx_task);
597
598         if (hdev->discov_timeout > 0) {
599                 cancel_delayed_work_sync(&hdev->discov_off);
600                 hdev->discov_timeout = 0;
601         }
602
603         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
604                 cancel_delayed_work_sync(&hdev->power_off);
605
606         hci_dev_lock_bh(hdev);
607         inquiry_cache_flush(hdev);
608         hci_conn_hash_flush(hdev);
609         hci_dev_unlock_bh(hdev);
610
611         hci_notify(hdev, HCI_DEV_DOWN);
612
613         if (hdev->flush)
614                 hdev->flush(hdev);
615
616         /* Reset device */
617         skb_queue_purge(&hdev->cmd_q);
618         atomic_set(&hdev->cmd_cnt, 1);
619         if (!test_bit(HCI_RAW, &hdev->flags)) {
620                 set_bit(HCI_INIT, &hdev->flags);
621                 __hci_request(hdev, hci_reset_req, 0,
622                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
623                 clear_bit(HCI_INIT, &hdev->flags);
624         }
625
626         /* Kill cmd task */
627         tasklet_kill(&hdev->cmd_task);
628
629         /* Drop queues */
630         skb_queue_purge(&hdev->rx_q);
631         skb_queue_purge(&hdev->cmd_q);
632         skb_queue_purge(&hdev->raw_q);
633
634         /* Drop last sent command */
635         if (hdev->sent_cmd) {
636                 del_timer_sync(&hdev->cmd_timer);
637                 kfree_skb(hdev->sent_cmd);
638                 hdev->sent_cmd = NULL;
639         }
640
641         /* After this point our queues are empty
642          * and no tasks are scheduled. */
643         hdev->close(hdev);
644
645         mgmt_powered(hdev, 0);
646
647         /* Clear flags */
648         hdev->flags = 0;
649
650         hci_req_unlock(hdev);
651
652         hci_dev_put(hdev);
653         return 0;
654 }
655
656 int hci_dev_close(__u16 dev)
657 {
658         struct hci_dev *hdev;
659         int err;
660
661         hdev = hci_dev_get(dev);
662         if (!hdev)
663                 return -ENODEV;
664         err = hci_dev_do_close(hdev);
665         hci_dev_put(hdev);
666         return err;
667 }
668
669 int hci_dev_reset(__u16 dev)
670 {
671         struct hci_dev *hdev;
672         int ret = 0;
673
674         hdev = hci_dev_get(dev);
675         if (!hdev)
676                 return -ENODEV;
677
678         hci_req_lock(hdev);
679         tasklet_disable(&hdev->tx_task);
680
681         if (!test_bit(HCI_UP, &hdev->flags))
682                 goto done;
683
684         /* Drop queues */
685         skb_queue_purge(&hdev->rx_q);
686         skb_queue_purge(&hdev->cmd_q);
687
688         hci_dev_lock_bh(hdev);
689         inquiry_cache_flush(hdev);
690         hci_conn_hash_flush(hdev);
691         hci_dev_unlock_bh(hdev);
692
693         if (hdev->flush)
694                 hdev->flush(hdev);
695
696         atomic_set(&hdev->cmd_cnt, 1);
697         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
698
699         if (!test_bit(HCI_RAW, &hdev->flags))
700                 ret = __hci_request(hdev, hci_reset_req, 0,
701                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
702
703 done:
704         tasklet_enable(&hdev->tx_task);
705         hci_req_unlock(hdev);
706         hci_dev_put(hdev);
707         return ret;
708 }
709
710 int hci_dev_reset_stat(__u16 dev)
711 {
712         struct hci_dev *hdev;
713         int ret = 0;
714
715         hdev = hci_dev_get(dev);
716         if (!hdev)
717                 return -ENODEV;
718
719         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
720
721         hci_dev_put(hdev);
722
723         return ret;
724 }
725
726 int hci_dev_cmd(unsigned int cmd, void __user *arg)
727 {
728         struct hci_dev *hdev;
729         struct hci_dev_req dr;
730         int err = 0;
731
732         if (copy_from_user(&dr, arg, sizeof(dr)))
733                 return -EFAULT;
734
735         hdev = hci_dev_get(dr.dev_id);
736         if (!hdev)
737                 return -ENODEV;
738
739         switch (cmd) {
740         case HCISETAUTH:
741                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
742                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
743                 break;
744
745         case HCISETENCRYPT:
746                 if (!lmp_encrypt_capable(hdev)) {
747                         err = -EOPNOTSUPP;
748                         break;
749                 }
750
751                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
752                         /* Auth must be enabled first */
753                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
754                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
755                         if (err)
756                                 break;
757                 }
758
759                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
760                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
761                 break;
762
763         case HCISETSCAN:
764                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
765                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
766                 break;
767
768         case HCISETLINKPOL:
769                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
770                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
771                 break;
772
773         case HCISETLINKMODE:
774                 hdev->link_mode = ((__u16) dr.dev_opt) &
775                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
776                 break;
777
778         case HCISETPTYPE:
779                 hdev->pkt_type = (__u16) dr.dev_opt;
780                 break;
781
782         case HCISETACLMTU:
783                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
784                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
785                 break;
786
787         case HCISETSCOMTU:
788                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
789                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
790                 break;
791
792         default:
793                 err = -EINVAL;
794                 break;
795         }
796
797         hci_dev_put(hdev);
798         return err;
799 }
800
801 int hci_get_dev_list(void __user *arg)
802 {
803         struct hci_dev *hdev;
804         struct hci_dev_list_req *dl;
805         struct hci_dev_req *dr;
806         int n = 0, size, err;
807         __u16 dev_num;
808
809         if (get_user(dev_num, (__u16 __user *) arg))
810                 return -EFAULT;
811
812         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
813                 return -EINVAL;
814
815         size = sizeof(*dl) + dev_num * sizeof(*dr);
816
817         dl = kzalloc(size, GFP_KERNEL);
818         if (!dl)
819                 return -ENOMEM;
820
821         dr = dl->dev_req;
822
823         read_lock_bh(&hci_dev_list_lock);
824         list_for_each_entry(hdev, &hci_dev_list, list) {
825                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
826                         cancel_delayed_work_sync(&hdev->power_off);
827
828                 if (!test_bit(HCI_MGMT, &hdev->flags))
829                         set_bit(HCI_PAIRABLE, &hdev->flags);
830
831                 (dr + n)->dev_id  = hdev->id;
832                 (dr + n)->dev_opt = hdev->flags;
833
834                 if (++n >= dev_num)
835                         break;
836         }
837         read_unlock_bh(&hci_dev_list_lock);
838
839         dl->dev_num = n;
840         size = sizeof(*dl) + n * sizeof(*dr);
841
842         err = copy_to_user(arg, dl, size);
843         kfree(dl);
844
845         return err ? -EFAULT : 0;
846 }
847
848 int hci_get_dev_info(void __user *arg)
849 {
850         struct hci_dev *hdev;
851         struct hci_dev_info di;
852         int err = 0;
853
854         if (copy_from_user(&di, arg, sizeof(di)))
855                 return -EFAULT;
856
857         hdev = hci_dev_get(di.dev_id);
858         if (!hdev)
859                 return -ENODEV;
860
861         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
862                 cancel_delayed_work_sync(&hdev->power_off);
863
864         if (!test_bit(HCI_MGMT, &hdev->flags))
865                 set_bit(HCI_PAIRABLE, &hdev->flags);
866
867         strcpy(di.name, hdev->name);
868         di.bdaddr   = hdev->bdaddr;
869         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
870         di.flags    = hdev->flags;
871         di.pkt_type = hdev->pkt_type;
872         di.acl_mtu  = hdev->acl_mtu;
873         di.acl_pkts = hdev->acl_pkts;
874         di.sco_mtu  = hdev->sco_mtu;
875         di.sco_pkts = hdev->sco_pkts;
876         di.link_policy = hdev->link_policy;
877         di.link_mode   = hdev->link_mode;
878
879         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
880         memcpy(&di.features, &hdev->features, sizeof(di.features));
881
882         if (copy_to_user(arg, &di, sizeof(di)))
883                 err = -EFAULT;
884
885         hci_dev_put(hdev);
886
887         return err;
888 }
889
890 /* ---- Interface to HCI drivers ---- */
891
892 static int hci_rfkill_set_block(void *data, bool blocked)
893 {
894         struct hci_dev *hdev = data;
895
896         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
897
898         if (!blocked)
899                 return 0;
900
901         hci_dev_do_close(hdev);
902
903         return 0;
904 }
905
906 static const struct rfkill_ops hci_rfkill_ops = {
907         .set_block = hci_rfkill_set_block,
908 };
909
910 /* Alloc HCI device */
911 struct hci_dev *hci_alloc_dev(void)
912 {
913         struct hci_dev *hdev;
914
915         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
916         if (!hdev)
917                 return NULL;
918
919         hci_init_sysfs(hdev);
920         skb_queue_head_init(&hdev->driver_init);
921
922         return hdev;
923 }
924 EXPORT_SYMBOL(hci_alloc_dev);
925
926 /* Free HCI device */
927 void hci_free_dev(struct hci_dev *hdev)
928 {
929         skb_queue_purge(&hdev->driver_init);
930
931         /* will free via device release */
932         put_device(&hdev->dev);
933 }
934 EXPORT_SYMBOL(hci_free_dev);
935
936 static void hci_power_on(struct work_struct *work)
937 {
938         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
939
940         BT_DBG("%s", hdev->name);
941
942         if (hci_dev_open(hdev->id) < 0)
943                 return;
944
945         if (test_bit(HCI_AUTO_OFF, &hdev->flags))
946                 queue_delayed_work(hdev->workqueue, &hdev->power_off,
947                                         msecs_to_jiffies(AUTO_OFF_TIMEOUT));
948
949         if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
950                 mgmt_index_added(hdev);
951 }
952
953 static void hci_power_off(struct work_struct *work)
954 {
955         struct hci_dev *hdev = container_of(work, struct hci_dev,
956                                                         power_off.work);
957
958         BT_DBG("%s", hdev->name);
959
960         clear_bit(HCI_AUTO_OFF, &hdev->flags);
961
962         hci_dev_close(hdev->id);
963 }
964
965 static void hci_discov_off(struct work_struct *work)
966 {
967         struct hci_dev *hdev;
968         u8 scan = SCAN_PAGE;
969
970         hdev = container_of(work, struct hci_dev, discov_off.work);
971
972         BT_DBG("%s", hdev->name);
973
974         hci_dev_lock_bh(hdev);
975
976         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
977
978         hdev->discov_timeout = 0;
979
980         hci_dev_unlock_bh(hdev);
981 }
982
983 int hci_uuids_clear(struct hci_dev *hdev)
984 {
985         struct list_head *p, *n;
986
987         list_for_each_safe(p, n, &hdev->uuids) {
988                 struct bt_uuid *uuid;
989
990                 uuid = list_entry(p, struct bt_uuid, list);
991
992                 list_del(p);
993                 kfree(uuid);
994         }
995
996         return 0;
997 }
998
999 int hci_link_keys_clear(struct hci_dev *hdev)
1000 {
1001         struct list_head *p, *n;
1002
1003         list_for_each_safe(p, n, &hdev->link_keys) {
1004                 struct link_key *key;
1005
1006                 key = list_entry(p, struct link_key, list);
1007
1008                 list_del(p);
1009                 kfree(key);
1010         }
1011
1012         return 0;
1013 }
1014
1015 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1016 {
1017         struct link_key *k;
1018
1019         list_for_each_entry(k, &hdev->link_keys, list)
1020                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1021                         return k;
1022
1023         return NULL;
1024 }
1025
1026 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1027                                                 u8 key_type, u8 old_key_type)
1028 {
1029         /* Legacy key */
1030         if (key_type < 0x03)
1031                 return 1;
1032
1033         /* Debug keys are insecure so don't store them persistently */
1034         if (key_type == HCI_LK_DEBUG_COMBINATION)
1035                 return 0;
1036
1037         /* Changed combination key and there's no previous one */
1038         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1039                 return 0;
1040
1041         /* Security mode 3 case */
1042         if (!conn)
1043                 return 1;
1044
1045         /* Neither local nor remote side had no-bonding as requirement */
1046         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1047                 return 1;
1048
1049         /* Local side had dedicated bonding as requirement */
1050         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1051                 return 1;
1052
1053         /* Remote side had dedicated bonding as requirement */
1054         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1055                 return 1;
1056
1057         /* If none of the above criteria match, then don't store the key
1058          * persistently */
1059         return 0;
1060 }
1061
1062 struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1063 {
1064         struct link_key *k;
1065
1066         list_for_each_entry(k, &hdev->link_keys, list) {
1067                 struct key_master_id *id;
1068
1069                 if (k->type != HCI_LK_SMP_LTK)
1070                         continue;
1071
1072                 if (k->dlen != sizeof(*id))
1073                         continue;
1074
1075                 id = (void *) &k->data;
1076                 if (id->ediv == ediv &&
1077                                 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1078                         return k;
1079         }
1080
1081         return NULL;
1082 }
1083 EXPORT_SYMBOL(hci_find_ltk);
1084
1085 struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1086                                         bdaddr_t *bdaddr, u8 type)
1087 {
1088         struct link_key *k;
1089
1090         list_for_each_entry(k, &hdev->link_keys, list)
1091                 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1092                         return k;
1093
1094         return NULL;
1095 }
1096 EXPORT_SYMBOL(hci_find_link_key_type);
1097
1098 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1099                                 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1100 {
1101         struct link_key *key, *old_key;
1102         u8 old_key_type, persistent;
1103
1104         old_key = hci_find_link_key(hdev, bdaddr);
1105         if (old_key) {
1106                 old_key_type = old_key->type;
1107                 key = old_key;
1108         } else {
1109                 old_key_type = conn ? conn->key_type : 0xff;
1110                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1111                 if (!key)
1112                         return -ENOMEM;
1113                 list_add(&key->list, &hdev->link_keys);
1114         }
1115
1116         BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1117
1118         /* Some buggy controller combinations generate a changed
1119          * combination key for legacy pairing even when there's no
1120          * previous key */
1121         if (type == HCI_LK_CHANGED_COMBINATION &&
1122                                         (!conn || conn->remote_auth == 0xff) &&
1123                                         old_key_type == 0xff) {
1124                 type = HCI_LK_COMBINATION;
1125                 if (conn)
1126                         conn->key_type = type;
1127         }
1128
1129         bacpy(&key->bdaddr, bdaddr);
1130         memcpy(key->val, val, 16);
1131         key->pin_len = pin_len;
1132
1133         if (type == HCI_LK_CHANGED_COMBINATION)
1134                 key->type = old_key_type;
1135         else
1136                 key->type = type;
1137
1138         if (!new_key)
1139                 return 0;
1140
1141         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1142
1143         mgmt_new_link_key(hdev, key, persistent);
1144
1145         if (!persistent) {
1146                 list_del(&key->list);
1147                 kfree(key);
1148         }
1149
1150         return 0;
1151 }
1152
1153 int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1154                         u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1155 {
1156         struct link_key *key, *old_key;
1157         struct key_master_id *id;
1158         u8 old_key_type;
1159
1160         BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1161
1162         old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1163         if (old_key) {
1164                 key = old_key;
1165                 old_key_type = old_key->type;
1166         } else {
1167                 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1168                 if (!key)
1169                         return -ENOMEM;
1170                 list_add(&key->list, &hdev->link_keys);
1171                 old_key_type = 0xff;
1172         }
1173
1174         key->dlen = sizeof(*id);
1175
1176         bacpy(&key->bdaddr, bdaddr);
1177         memcpy(key->val, ltk, sizeof(key->val));
1178         key->type = HCI_LK_SMP_LTK;
1179         key->pin_len = key_size;
1180
1181         id = (void *) &key->data;
1182         id->ediv = ediv;
1183         memcpy(id->rand, rand, sizeof(id->rand));
1184
1185         if (new_key)
1186                 mgmt_new_link_key(hdev, key, old_key_type);
1187
1188         return 0;
1189 }
1190
1191 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1192 {
1193         struct link_key *key;
1194
1195         key = hci_find_link_key(hdev, bdaddr);
1196         if (!key)
1197                 return -ENOENT;
1198
1199         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1200
1201         list_del(&key->list);
1202         kfree(key);
1203
1204         return 0;
1205 }
1206
1207 /* HCI command timer function */
1208 static void hci_cmd_timer(unsigned long arg)
1209 {
1210         struct hci_dev *hdev = (void *) arg;
1211
1212         BT_ERR("%s command tx timeout", hdev->name);
1213         atomic_set(&hdev->cmd_cnt, 1);
1214         tasklet_schedule(&hdev->cmd_task);
1215 }
1216
1217 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1218                                                         bdaddr_t *bdaddr)
1219 {
1220         struct oob_data *data;
1221
1222         list_for_each_entry(data, &hdev->remote_oob_data, list)
1223                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1224                         return data;
1225
1226         return NULL;
1227 }
1228
1229 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1230 {
1231         struct oob_data *data;
1232
1233         data = hci_find_remote_oob_data(hdev, bdaddr);
1234         if (!data)
1235                 return -ENOENT;
1236
1237         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1238
1239         list_del(&data->list);
1240         kfree(data);
1241
1242         return 0;
1243 }
1244
1245 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1246 {
1247         struct oob_data *data, *n;
1248
1249         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1250                 list_del(&data->list);
1251                 kfree(data);
1252         }
1253
1254         return 0;
1255 }
1256
1257 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1258                                                                 u8 *randomizer)
1259 {
1260         struct oob_data *data;
1261
1262         data = hci_find_remote_oob_data(hdev, bdaddr);
1263
1264         if (!data) {
1265                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1266                 if (!data)
1267                         return -ENOMEM;
1268
1269                 bacpy(&data->bdaddr, bdaddr);
1270                 list_add(&data->list, &hdev->remote_oob_data);
1271         }
1272
1273         memcpy(data->hash, hash, sizeof(data->hash));
1274         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1275
1276         BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1277
1278         return 0;
1279 }
1280
1281 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1282                                                 bdaddr_t *bdaddr)
1283 {
1284         struct bdaddr_list *b;
1285
1286         list_for_each_entry(b, &hdev->blacklist, list)
1287                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1288                         return b;
1289
1290         return NULL;
1291 }
1292
1293 int hci_blacklist_clear(struct hci_dev *hdev)
1294 {
1295         struct list_head *p, *n;
1296
1297         list_for_each_safe(p, n, &hdev->blacklist) {
1298                 struct bdaddr_list *b;
1299
1300                 b = list_entry(p, struct bdaddr_list, list);
1301
1302                 list_del(p);
1303                 kfree(b);
1304         }
1305
1306         return 0;
1307 }
1308
1309 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1310 {
1311         struct bdaddr_list *entry;
1312
1313         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1314                 return -EBADF;
1315
1316         if (hci_blacklist_lookup(hdev, bdaddr))
1317                 return -EEXIST;
1318
1319         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1320         if (!entry)
1321                 return -ENOMEM;
1322
1323         bacpy(&entry->bdaddr, bdaddr);
1324
1325         list_add(&entry->list, &hdev->blacklist);
1326
1327         return mgmt_device_blocked(hdev, bdaddr);
1328 }
1329
1330 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1331 {
1332         struct bdaddr_list *entry;
1333
1334         if (bacmp(bdaddr, BDADDR_ANY) == 0) {
1335                 return hci_blacklist_clear(hdev);
1336         }
1337
1338         entry = hci_blacklist_lookup(hdev, bdaddr);
1339         if (!entry) {
1340                 return -ENOENT;
1341         }
1342
1343         list_del(&entry->list);
1344         kfree(entry);
1345
1346         return mgmt_device_unblocked(hdev, bdaddr);
1347 }
1348
1349 static void hci_clear_adv_cache(unsigned long arg)
1350 {
1351         struct hci_dev *hdev = (void *) arg;
1352
1353         hci_dev_lock(hdev);
1354
1355         hci_adv_entries_clear(hdev);
1356
1357         hci_dev_unlock(hdev);
1358 }
1359
1360 int hci_adv_entries_clear(struct hci_dev *hdev)
1361 {
1362         struct adv_entry *entry, *tmp;
1363
1364         list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1365                 list_del(&entry->list);
1366                 kfree(entry);
1367         }
1368
1369         BT_DBG("%s adv cache cleared", hdev->name);
1370
1371         return 0;
1372 }
1373
1374 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1375 {
1376         struct adv_entry *entry;
1377
1378         list_for_each_entry(entry, &hdev->adv_entries, list)
1379                 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1380                         return entry;
1381
1382         return NULL;
1383 }
1384
1385 static inline int is_connectable_adv(u8 evt_type)
1386 {
1387         if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1388                 return 1;
1389
1390         return 0;
1391 }
1392
1393 int hci_add_adv_entry(struct hci_dev *hdev,
1394                                         struct hci_ev_le_advertising_info *ev)
1395 {
1396         struct adv_entry *entry;
1397
1398         if (!is_connectable_adv(ev->evt_type))
1399                 return -EINVAL;
1400
1401         /* Only new entries should be added to adv_entries. So, if
1402          * bdaddr was found, don't add it. */
1403         if (hci_find_adv_entry(hdev, &ev->bdaddr))
1404                 return 0;
1405
1406         entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1407         if (!entry)
1408                 return -ENOMEM;
1409
1410         bacpy(&entry->bdaddr, &ev->bdaddr);
1411         entry->bdaddr_type = ev->bdaddr_type;
1412
1413         list_add(&entry->list, &hdev->adv_entries);
1414
1415         BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1416                                 batostr(&entry->bdaddr), entry->bdaddr_type);
1417
1418         return 0;
1419 }
1420
1421 /* Register HCI device */
1422 int hci_register_dev(struct hci_dev *hdev)
1423 {
1424         struct list_head *head = &hci_dev_list, *p;
1425         int i, id, error;
1426
1427         BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1428                                                 hdev->bus, hdev->owner);
1429
1430         if (!hdev->open || !hdev->close || !hdev->destruct)
1431                 return -EINVAL;
1432
1433         /* Do not allow HCI_AMP devices to register at index 0,
1434          * so the index can be used as the AMP controller ID.
1435          */
1436         id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1437
1438         write_lock_bh(&hci_dev_list_lock);
1439
1440         /* Find first available device id */
1441         list_for_each(p, &hci_dev_list) {
1442                 if (list_entry(p, struct hci_dev, list)->id != id)
1443                         break;
1444                 head = p; id++;
1445         }
1446
1447         sprintf(hdev->name, "hci%d", id);
1448         hdev->id = id;
1449         list_add(&hdev->list, head);
1450
1451         atomic_set(&hdev->refcnt, 1);
1452         spin_lock_init(&hdev->lock);
1453
1454         hdev->flags = 0;
1455         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1456         hdev->esco_type = (ESCO_HV1);
1457         hdev->link_mode = (HCI_LM_ACCEPT);
1458         hdev->io_capability = 0x03; /* No Input No Output */
1459
1460         hdev->idle_timeout = 0;
1461         hdev->sniff_max_interval = 800;
1462         hdev->sniff_min_interval = 80;
1463
1464         tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1465         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1466         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1467
1468         skb_queue_head_init(&hdev->rx_q);
1469         skb_queue_head_init(&hdev->cmd_q);
1470         skb_queue_head_init(&hdev->raw_q);
1471
1472         setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1473
1474         for (i = 0; i < NUM_REASSEMBLY; i++)
1475                 hdev->reassembly[i] = NULL;
1476
1477         init_waitqueue_head(&hdev->req_wait_q);
1478         mutex_init(&hdev->req_lock);
1479
1480         inquiry_cache_init(hdev);
1481
1482         hci_conn_hash_init(hdev);
1483
1484         INIT_LIST_HEAD(&hdev->blacklist);
1485
1486         INIT_LIST_HEAD(&hdev->uuids);
1487
1488         INIT_LIST_HEAD(&hdev->link_keys);
1489
1490         INIT_LIST_HEAD(&hdev->remote_oob_data);
1491
1492         INIT_LIST_HEAD(&hdev->adv_entries);
1493         setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
1494                                                 (unsigned long) hdev);
1495
1496         INIT_WORK(&hdev->power_on, hci_power_on);
1497         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1498
1499         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1500
1501         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1502
1503         atomic_set(&hdev->promisc, 0);
1504
1505         write_unlock_bh(&hci_dev_list_lock);
1506
1507         hdev->workqueue = create_singlethread_workqueue(hdev->name);
1508         if (!hdev->workqueue) {
1509                 error = -ENOMEM;
1510                 goto err;
1511         }
1512
1513         error = hci_add_sysfs(hdev);
1514         if (error < 0)
1515                 goto err_wqueue;
1516
1517         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1518                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1519         if (hdev->rfkill) {
1520                 if (rfkill_register(hdev->rfkill) < 0) {
1521                         rfkill_destroy(hdev->rfkill);
1522                         hdev->rfkill = NULL;
1523                 }
1524         }
1525
1526         set_bit(HCI_AUTO_OFF, &hdev->flags);
1527         set_bit(HCI_SETUP, &hdev->flags);
1528         queue_work(hdev->workqueue, &hdev->power_on);
1529
1530         hci_notify(hdev, HCI_DEV_REG);
1531
1532         return id;
1533
1534 err_wqueue:
1535         destroy_workqueue(hdev->workqueue);
1536 err:
1537         write_lock_bh(&hci_dev_list_lock);
1538         list_del(&hdev->list);
1539         write_unlock_bh(&hci_dev_list_lock);
1540
1541         return error;
1542 }
1543 EXPORT_SYMBOL(hci_register_dev);
1544
1545 /* Unregister HCI device */
1546 void hci_unregister_dev(struct hci_dev *hdev)
1547 {
1548         int i;
1549
1550         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1551
1552         write_lock_bh(&hci_dev_list_lock);
1553         list_del(&hdev->list);
1554         write_unlock_bh(&hci_dev_list_lock);
1555
1556         hci_dev_do_close(hdev);
1557
1558         for (i = 0; i < NUM_REASSEMBLY; i++)
1559                 kfree_skb(hdev->reassembly[i]);
1560
1561         if (!test_bit(HCI_INIT, &hdev->flags) &&
1562                                         !test_bit(HCI_SETUP, &hdev->flags))
1563                 mgmt_index_removed(hdev);
1564
1565         hci_notify(hdev, HCI_DEV_UNREG);
1566
1567         if (hdev->rfkill) {
1568                 rfkill_unregister(hdev->rfkill);
1569                 rfkill_destroy(hdev->rfkill);
1570         }
1571
1572         hci_del_sysfs(hdev);
1573
1574         del_timer(&hdev->adv_timer);
1575
1576         destroy_workqueue(hdev->workqueue);
1577
1578         hci_dev_lock_bh(hdev);
1579         hci_blacklist_clear(hdev);
1580         hci_uuids_clear(hdev);
1581         hci_link_keys_clear(hdev);
1582         hci_remote_oob_data_clear(hdev);
1583         hci_adv_entries_clear(hdev);
1584         hci_dev_unlock_bh(hdev);
1585
1586         __hci_dev_put(hdev);
1587 }
1588 EXPORT_SYMBOL(hci_unregister_dev);
1589
1590 /* Suspend HCI device */
1591 int hci_suspend_dev(struct hci_dev *hdev)
1592 {
1593         hci_notify(hdev, HCI_DEV_SUSPEND);
1594         return 0;
1595 }
1596 EXPORT_SYMBOL(hci_suspend_dev);
1597
1598 /* Resume HCI device */
1599 int hci_resume_dev(struct hci_dev *hdev)
1600 {
1601         hci_notify(hdev, HCI_DEV_RESUME);
1602         return 0;
1603 }
1604 EXPORT_SYMBOL(hci_resume_dev);
1605
1606 /* Receive frame from HCI drivers */
1607 int hci_recv_frame(struct sk_buff *skb)
1608 {
1609         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1610         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1611                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1612                 kfree_skb(skb);
1613                 return -ENXIO;
1614         }
1615
1616         /* Incomming skb */
1617         bt_cb(skb)->incoming = 1;
1618
1619         /* Time stamp */
1620         __net_timestamp(skb);
1621
1622         /* Queue frame for rx task */
1623         skb_queue_tail(&hdev->rx_q, skb);
1624         tasklet_schedule(&hdev->rx_task);
1625
1626         return 0;
1627 }
1628 EXPORT_SYMBOL(hci_recv_frame);
1629
1630 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1631                                                   int count, __u8 index)
1632 {
1633         int len = 0;
1634         int hlen = 0;
1635         int remain = count;
1636         struct sk_buff *skb;
1637         struct bt_skb_cb *scb;
1638
1639         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1640                                 index >= NUM_REASSEMBLY)
1641                 return -EILSEQ;
1642
1643         skb = hdev->reassembly[index];
1644
1645         if (!skb) {
1646                 switch (type) {
1647                 case HCI_ACLDATA_PKT:
1648                         len = HCI_MAX_FRAME_SIZE;
1649                         hlen = HCI_ACL_HDR_SIZE;
1650                         break;
1651                 case HCI_EVENT_PKT:
1652                         len = HCI_MAX_EVENT_SIZE;
1653                         hlen = HCI_EVENT_HDR_SIZE;
1654                         break;
1655                 case HCI_SCODATA_PKT:
1656                         len = HCI_MAX_SCO_SIZE;
1657                         hlen = HCI_SCO_HDR_SIZE;
1658                         break;
1659                 }
1660
1661                 skb = bt_skb_alloc(len, GFP_ATOMIC);
1662                 if (!skb)
1663                         return -ENOMEM;
1664
1665                 scb = (void *) skb->cb;
1666                 scb->expect = hlen;
1667                 scb->pkt_type = type;
1668
1669                 skb->dev = (void *) hdev;
1670                 hdev->reassembly[index] = skb;
1671         }
1672
1673         while (count) {
1674                 scb = (void *) skb->cb;
1675                 len = min(scb->expect, (__u16)count);
1676
1677                 memcpy(skb_put(skb, len), data, len);
1678
1679                 count -= len;
1680                 data += len;
1681                 scb->expect -= len;
1682                 remain = count;
1683
1684                 switch (type) {
1685                 case HCI_EVENT_PKT:
1686                         if (skb->len == HCI_EVENT_HDR_SIZE) {
1687                                 struct hci_event_hdr *h = hci_event_hdr(skb);
1688                                 scb->expect = h->plen;
1689
1690                                 if (skb_tailroom(skb) < scb->expect) {
1691                                         kfree_skb(skb);
1692                                         hdev->reassembly[index] = NULL;
1693                                         return -ENOMEM;
1694                                 }
1695                         }
1696                         break;
1697
1698                 case HCI_ACLDATA_PKT:
1699                         if (skb->len  == HCI_ACL_HDR_SIZE) {
1700                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1701                                 scb->expect = __le16_to_cpu(h->dlen);
1702
1703                                 if (skb_tailroom(skb) < scb->expect) {
1704                                         kfree_skb(skb);
1705                                         hdev->reassembly[index] = NULL;
1706                                         return -ENOMEM;
1707                                 }
1708                         }
1709                         break;
1710
1711                 case HCI_SCODATA_PKT:
1712                         if (skb->len == HCI_SCO_HDR_SIZE) {
1713                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1714                                 scb->expect = h->dlen;
1715
1716                                 if (skb_tailroom(skb) < scb->expect) {
1717                                         kfree_skb(skb);
1718                                         hdev->reassembly[index] = NULL;
1719                                         return -ENOMEM;
1720                                 }
1721                         }
1722                         break;
1723                 }
1724
1725                 if (scb->expect == 0) {
1726                         /* Complete frame */
1727
1728                         bt_cb(skb)->pkt_type = type;
1729                         hci_recv_frame(skb);
1730
1731                         hdev->reassembly[index] = NULL;
1732                         return remain;
1733                 }
1734         }
1735
1736         return remain;
1737 }
1738
1739 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1740 {
1741         int rem = 0;
1742
1743         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1744                 return -EILSEQ;
1745
1746         while (count) {
1747                 rem = hci_reassembly(hdev, type, data, count, type - 1);
1748                 if (rem < 0)
1749                         return rem;
1750
1751                 data += (count - rem);
1752                 count = rem;
1753         }
1754
1755         return rem;
1756 }
1757 EXPORT_SYMBOL(hci_recv_fragment);
1758
1759 #define STREAM_REASSEMBLY 0
1760
1761 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1762 {
1763         int type;
1764         int rem = 0;
1765
1766         while (count) {
1767                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1768
1769                 if (!skb) {
1770                         struct { char type; } *pkt;
1771
1772                         /* Start of the frame */
1773                         pkt = data;
1774                         type = pkt->type;
1775
1776                         data++;
1777                         count--;
1778                 } else
1779                         type = bt_cb(skb)->pkt_type;
1780
1781                 rem = hci_reassembly(hdev, type, data, count,
1782                                                         STREAM_REASSEMBLY);
1783                 if (rem < 0)
1784                         return rem;
1785
1786                 data += (count - rem);
1787                 count = rem;
1788         }
1789
1790         return rem;
1791 }
1792 EXPORT_SYMBOL(hci_recv_stream_fragment);
1793
1794 /* ---- Interface to upper protocols ---- */
1795
1796 /* Register/Unregister protocols.
1797  * hci_task_lock is used to ensure that no tasks are running. */
1798 int hci_register_proto(struct hci_proto *hp)
1799 {
1800         int err = 0;
1801
1802         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1803
1804         if (hp->id >= HCI_MAX_PROTO)
1805                 return -EINVAL;
1806
1807         write_lock_bh(&hci_task_lock);
1808
1809         if (!hci_proto[hp->id])
1810                 hci_proto[hp->id] = hp;
1811         else
1812                 err = -EEXIST;
1813
1814         write_unlock_bh(&hci_task_lock);
1815
1816         return err;
1817 }
1818 EXPORT_SYMBOL(hci_register_proto);
1819
1820 int hci_unregister_proto(struct hci_proto *hp)
1821 {
1822         int err = 0;
1823
1824         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1825
1826         if (hp->id >= HCI_MAX_PROTO)
1827                 return -EINVAL;
1828
1829         write_lock_bh(&hci_task_lock);
1830
1831         if (hci_proto[hp->id])
1832                 hci_proto[hp->id] = NULL;
1833         else
1834                 err = -ENOENT;
1835
1836         write_unlock_bh(&hci_task_lock);
1837
1838         return err;
1839 }
1840 EXPORT_SYMBOL(hci_unregister_proto);
1841
1842 int hci_register_cb(struct hci_cb *cb)
1843 {
1844         BT_DBG("%p name %s", cb, cb->name);
1845
1846         write_lock_bh(&hci_cb_list_lock);
1847         list_add(&cb->list, &hci_cb_list);
1848         write_unlock_bh(&hci_cb_list_lock);
1849
1850         return 0;
1851 }
1852 EXPORT_SYMBOL(hci_register_cb);
1853
1854 int hci_unregister_cb(struct hci_cb *cb)
1855 {
1856         BT_DBG("%p name %s", cb, cb->name);
1857
1858         write_lock_bh(&hci_cb_list_lock);
1859         list_del(&cb->list);
1860         write_unlock_bh(&hci_cb_list_lock);
1861
1862         return 0;
1863 }
1864 EXPORT_SYMBOL(hci_unregister_cb);
1865
1866 static int hci_send_frame(struct sk_buff *skb)
1867 {
1868         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1869
1870         if (!hdev) {
1871                 kfree_skb(skb);
1872                 return -ENODEV;
1873         }
1874
1875         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1876
1877         if (atomic_read(&hdev->promisc)) {
1878                 /* Time stamp */
1879                 __net_timestamp(skb);
1880
1881                 hci_send_to_sock(hdev, skb, NULL);
1882         }
1883
1884         /* Get rid of skb owner, prior to sending to the driver. */
1885         skb_orphan(skb);
1886
1887         return hdev->send(skb);
1888 }
1889
1890 /* Send HCI command */
1891 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1892 {
1893         int len = HCI_COMMAND_HDR_SIZE + plen;
1894         struct hci_command_hdr *hdr;
1895         struct sk_buff *skb;
1896
1897         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1898
1899         skb = bt_skb_alloc(len, GFP_ATOMIC);
1900         if (!skb) {
1901                 BT_ERR("%s no memory for command", hdev->name);
1902                 return -ENOMEM;
1903         }
1904
1905         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1906         hdr->opcode = cpu_to_le16(opcode);
1907         hdr->plen   = plen;
1908
1909         if (plen)
1910                 memcpy(skb_put(skb, plen), param, plen);
1911
1912         BT_DBG("skb len %d", skb->len);
1913
1914         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1915         skb->dev = (void *) hdev;
1916
1917         if (test_bit(HCI_INIT, &hdev->flags))
1918                 hdev->init_last_cmd = opcode;
1919
1920         skb_queue_tail(&hdev->cmd_q, skb);
1921         tasklet_schedule(&hdev->cmd_task);
1922
1923         return 0;
1924 }
1925
1926 /* Get data from the previously sent command */
1927 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1928 {
1929         struct hci_command_hdr *hdr;
1930
1931         if (!hdev->sent_cmd)
1932                 return NULL;
1933
1934         hdr = (void *) hdev->sent_cmd->data;
1935
1936         if (hdr->opcode != cpu_to_le16(opcode))
1937                 return NULL;
1938
1939         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1940
1941         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1942 }
1943
1944 /* Send ACL data */
1945 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1946 {
1947         struct hci_acl_hdr *hdr;
1948         int len = skb->len;
1949
1950         skb_push(skb, HCI_ACL_HDR_SIZE);
1951         skb_reset_transport_header(skb);
1952         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1953         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1954         hdr->dlen   = cpu_to_le16(len);
1955 }
1956
1957 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1958                                 struct sk_buff *skb, __u16 flags)
1959 {
1960         struct hci_dev *hdev = conn->hdev;
1961         struct sk_buff *list;
1962
1963         list = skb_shinfo(skb)->frag_list;
1964         if (!list) {
1965                 /* Non fragmented */
1966                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1967
1968                 skb_queue_tail(queue, skb);
1969         } else {
1970                 /* Fragmented */
1971                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1972
1973                 skb_shinfo(skb)->frag_list = NULL;
1974
1975                 /* Queue all fragments atomically */
1976                 spin_lock_bh(&queue->lock);
1977
1978                 __skb_queue_tail(queue, skb);
1979
1980                 flags &= ~ACL_START;
1981                 flags |= ACL_CONT;
1982                 do {
1983                         skb = list; list = list->next;
1984
1985                         skb->dev = (void *) hdev;
1986                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1987                         hci_add_acl_hdr(skb, conn->handle, flags);
1988
1989                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1990
1991                         __skb_queue_tail(queue, skb);
1992                 } while (list);
1993
1994                 spin_unlock_bh(&queue->lock);
1995         }
1996 }
1997
1998 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
1999 {
2000         struct hci_conn *conn = chan->conn;
2001         struct hci_dev *hdev = conn->hdev;
2002
2003         BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2004
2005         skb->dev = (void *) hdev;
2006         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2007         hci_add_acl_hdr(skb, conn->handle, flags);
2008
2009         hci_queue_acl(conn, &chan->data_q, skb, flags);
2010
2011         tasklet_schedule(&hdev->tx_task);
2012 }
2013 EXPORT_SYMBOL(hci_send_acl);
2014
2015 /* Send SCO data */
2016 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2017 {
2018         struct hci_dev *hdev = conn->hdev;
2019         struct hci_sco_hdr hdr;
2020
2021         BT_DBG("%s len %d", hdev->name, skb->len);
2022
2023         hdr.handle = cpu_to_le16(conn->handle);
2024         hdr.dlen   = skb->len;
2025
2026         skb_push(skb, HCI_SCO_HDR_SIZE);
2027         skb_reset_transport_header(skb);
2028         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2029
2030         skb->dev = (void *) hdev;
2031         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2032
2033         skb_queue_tail(&conn->data_q, skb);
2034         tasklet_schedule(&hdev->tx_task);
2035 }
2036 EXPORT_SYMBOL(hci_send_sco);
2037
2038 /* ---- HCI TX task (outgoing data) ---- */
2039
2040 /* HCI Connection scheduler */
2041 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2042 {
2043         struct hci_conn_hash *h = &hdev->conn_hash;
2044         struct hci_conn *conn = NULL, *c;
2045         int num = 0, min = ~0;
2046
2047         /* We don't have to lock device here. Connections are always
2048          * added and removed with TX task disabled. */
2049         list_for_each_entry(c, &h->list, list) {
2050                 if (c->type != type || skb_queue_empty(&c->data_q))
2051                         continue;
2052
2053                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2054                         continue;
2055
2056                 num++;
2057
2058                 if (c->sent < min) {
2059                         min  = c->sent;
2060                         conn = c;
2061                 }
2062
2063                 if (hci_conn_num(hdev, type) == num)
2064                         break;
2065         }
2066
2067         if (conn) {
2068                 int cnt, q;
2069
2070                 switch (conn->type) {
2071                 case ACL_LINK:
2072                         cnt = hdev->acl_cnt;
2073                         break;
2074                 case SCO_LINK:
2075                 case ESCO_LINK:
2076                         cnt = hdev->sco_cnt;
2077                         break;
2078                 case LE_LINK:
2079                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2080                         break;
2081                 default:
2082                         cnt = 0;
2083                         BT_ERR("Unknown link type");
2084                 }
2085
2086                 q = cnt / num;
2087                 *quote = q ? q : 1;
2088         } else
2089                 *quote = 0;
2090
2091         BT_DBG("conn %p quote %d", conn, *quote);
2092         return conn;
2093 }
2094
2095 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2096 {
2097         struct hci_conn_hash *h = &hdev->conn_hash;
2098         struct hci_conn *c;
2099
2100         BT_ERR("%s link tx timeout", hdev->name);
2101
2102         /* Kill stalled connections */
2103         list_for_each_entry(c, &h->list, list) {
2104                 if (c->type == type && c->sent) {
2105                         BT_ERR("%s killing stalled connection %s",
2106                                 hdev->name, batostr(&c->dst));
2107                         hci_acl_disconn(c, 0x13);
2108                 }
2109         }
2110 }
2111
2112 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2113                                                 int *quote)
2114 {
2115         struct hci_conn_hash *h = &hdev->conn_hash;
2116         struct hci_chan *chan = NULL;
2117         int num = 0, min = ~0, cur_prio = 0;
2118         struct hci_conn *conn;
2119         int cnt, q, conn_num = 0;
2120
2121         BT_DBG("%s", hdev->name);
2122
2123         list_for_each_entry(conn, &h->list, list) {
2124                 struct hci_chan_hash *ch;
2125                 struct hci_chan *tmp;
2126
2127                 if (conn->type != type)
2128                         continue;
2129
2130                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2131                         continue;
2132
2133                 conn_num++;
2134
2135                 ch = &conn->chan_hash;
2136
2137                 list_for_each_entry(tmp, &ch->list, list) {
2138                         struct sk_buff *skb;
2139
2140                         if (skb_queue_empty(&tmp->data_q))
2141                                 continue;
2142
2143                         skb = skb_peek(&tmp->data_q);
2144                         if (skb->priority < cur_prio)
2145                                 continue;
2146
2147                         if (skb->priority > cur_prio) {
2148                                 num = 0;
2149                                 min = ~0;
2150                                 cur_prio = skb->priority;
2151                         }
2152
2153                         num++;
2154
2155                         if (conn->sent < min) {
2156                                 min  = conn->sent;
2157                                 chan = tmp;
2158                         }
2159                 }
2160
2161                 if (hci_conn_num(hdev, type) == conn_num)
2162                         break;
2163         }
2164
2165         if (!chan)
2166                 return NULL;
2167
2168         switch (chan->conn->type) {
2169         case ACL_LINK:
2170                 cnt = hdev->acl_cnt;
2171                 break;
2172         case SCO_LINK:
2173         case ESCO_LINK:
2174                 cnt = hdev->sco_cnt;
2175                 break;
2176         case LE_LINK:
2177                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2178                 break;
2179         default:
2180                 cnt = 0;
2181                 BT_ERR("Unknown link type");
2182         }
2183
2184         q = cnt / num;
2185         *quote = q ? q : 1;
2186         BT_DBG("chan %p quote %d", chan, *quote);
2187         return chan;
2188 }
2189
2190 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2191 {
2192         struct hci_conn_hash *h = &hdev->conn_hash;
2193         struct hci_conn *conn;
2194         int num = 0;
2195
2196         BT_DBG("%s", hdev->name);
2197
2198         list_for_each_entry(conn, &h->list, list) {
2199                 struct hci_chan_hash *ch;
2200                 struct hci_chan *chan;
2201
2202                 if (conn->type != type)
2203                         continue;
2204
2205                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2206                         continue;
2207
2208                 num++;
2209
2210                 ch = &conn->chan_hash;
2211                 list_for_each_entry(chan, &ch->list, list) {
2212                         struct sk_buff *skb;
2213
2214                         if (chan->sent) {
2215                                 chan->sent = 0;
2216                                 continue;
2217                         }
2218
2219                         if (skb_queue_empty(&chan->data_q))
2220                                 continue;
2221
2222                         skb = skb_peek(&chan->data_q);
2223                         if (skb->priority >= HCI_PRIO_MAX - 1)
2224                                 continue;
2225
2226                         skb->priority = HCI_PRIO_MAX - 1;
2227
2228                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2229                                                                 skb->priority);
2230                 }
2231
2232                 if (hci_conn_num(hdev, type) == num)
2233                         break;
2234         }
2235 }
2236
2237 static inline void hci_sched_acl(struct hci_dev *hdev)
2238 {
2239         struct hci_chan *chan;
2240         struct sk_buff *skb;
2241         int quote;
2242         unsigned int cnt;
2243
2244         BT_DBG("%s", hdev->name);
2245
2246         if (!hci_conn_num(hdev, ACL_LINK))
2247                 return;
2248
2249         if (!test_bit(HCI_RAW, &hdev->flags)) {
2250                 /* ACL tx timeout must be longer than maximum
2251                  * link supervision timeout (40.9 seconds) */
2252                 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
2253                         hci_link_tx_to(hdev, ACL_LINK);
2254         }
2255
2256         cnt = hdev->acl_cnt;
2257
2258         while (hdev->acl_cnt &&
2259                         (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2260                 u32 priority = (skb_peek(&chan->data_q))->priority;
2261                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2262                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2263                                         skb->len, skb->priority);
2264
2265                         /* Stop if priority has changed */
2266                         if (skb->priority < priority)
2267                                 break;
2268
2269                         skb = skb_dequeue(&chan->data_q);
2270
2271                         hci_conn_enter_active_mode(chan->conn,
2272                                                 bt_cb(skb)->force_active);
2273
2274                         hci_send_frame(skb);
2275                         hdev->acl_last_tx = jiffies;
2276
2277                         hdev->acl_cnt--;
2278                         chan->sent++;
2279                         chan->conn->sent++;
2280                 }
2281         }
2282
2283         if (cnt != hdev->acl_cnt)
2284                 hci_prio_recalculate(hdev, ACL_LINK);
2285 }
2286
2287 /* Schedule SCO */
2288 static inline void hci_sched_sco(struct hci_dev *hdev)
2289 {
2290         struct hci_conn *conn;
2291         struct sk_buff *skb;
2292         int quote;
2293
2294         BT_DBG("%s", hdev->name);
2295
2296         if (!hci_conn_num(hdev, SCO_LINK))
2297                 return;
2298
2299         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2300                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2301                         BT_DBG("skb %p len %d", skb, skb->len);
2302                         hci_send_frame(skb);
2303
2304                         conn->sent++;
2305                         if (conn->sent == ~0)
2306                                 conn->sent = 0;
2307                 }
2308         }
2309 }
2310
2311 static inline void hci_sched_esco(struct hci_dev *hdev)
2312 {
2313         struct hci_conn *conn;
2314         struct sk_buff *skb;
2315         int quote;
2316
2317         BT_DBG("%s", hdev->name);
2318
2319         if (!hci_conn_num(hdev, ESCO_LINK))
2320                 return;
2321
2322         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2323                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2324                         BT_DBG("skb %p len %d", skb, skb->len);
2325                         hci_send_frame(skb);
2326
2327                         conn->sent++;
2328                         if (conn->sent == ~0)
2329                                 conn->sent = 0;
2330                 }
2331         }
2332 }
2333
2334 static inline void hci_sched_le(struct hci_dev *hdev)
2335 {
2336         struct hci_chan *chan;
2337         struct sk_buff *skb;
2338         int quote, cnt, tmp;
2339
2340         BT_DBG("%s", hdev->name);
2341
2342         if (!hci_conn_num(hdev, LE_LINK))
2343                 return;
2344
2345         if (!test_bit(HCI_RAW, &hdev->flags)) {
2346                 /* LE tx timeout must be longer than maximum
2347                  * link supervision timeout (40.9 seconds) */
2348                 if (!hdev->le_cnt && hdev->le_pkts &&
2349                                 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2350                         hci_link_tx_to(hdev, LE_LINK);
2351         }
2352
2353         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2354         tmp = cnt;
2355         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2356                 u32 priority = (skb_peek(&chan->data_q))->priority;
2357                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2358                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2359                                         skb->len, skb->priority);
2360
2361                         /* Stop if priority has changed */
2362                         if (skb->priority < priority)
2363                                 break;
2364
2365                         skb = skb_dequeue(&chan->data_q);
2366
2367                         hci_send_frame(skb);
2368                         hdev->le_last_tx = jiffies;
2369
2370                         cnt--;
2371                         chan->sent++;
2372                         chan->conn->sent++;
2373                 }
2374         }
2375
2376         if (hdev->le_pkts)
2377                 hdev->le_cnt = cnt;
2378         else
2379                 hdev->acl_cnt = cnt;
2380
2381         if (cnt != tmp)
2382                 hci_prio_recalculate(hdev, LE_LINK);
2383 }
2384
2385 static void hci_tx_task(unsigned long arg)
2386 {
2387         struct hci_dev *hdev = (struct hci_dev *) arg;
2388         struct sk_buff *skb;
2389
2390         read_lock(&hci_task_lock);
2391
2392         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2393                 hdev->sco_cnt, hdev->le_cnt);
2394
2395         /* Schedule queues and send stuff to HCI driver */
2396
2397         hci_sched_acl(hdev);
2398
2399         hci_sched_sco(hdev);
2400
2401         hci_sched_esco(hdev);
2402
2403         hci_sched_le(hdev);
2404
2405         /* Send next queued raw (unknown type) packet */
2406         while ((skb = skb_dequeue(&hdev->raw_q)))
2407                 hci_send_frame(skb);
2408
2409         read_unlock(&hci_task_lock);
2410 }
2411
2412 /* ----- HCI RX task (incoming data processing) ----- */
2413
2414 /* ACL data packet */
2415 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2416 {
2417         struct hci_acl_hdr *hdr = (void *) skb->data;
2418         struct hci_conn *conn;
2419         __u16 handle, flags;
2420
2421         skb_pull(skb, HCI_ACL_HDR_SIZE);
2422
2423         handle = __le16_to_cpu(hdr->handle);
2424         flags  = hci_flags(handle);
2425         handle = hci_handle(handle);
2426
2427         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2428
2429         hdev->stat.acl_rx++;
2430
2431         hci_dev_lock(hdev);
2432         conn = hci_conn_hash_lookup_handle(hdev, handle);
2433         hci_dev_unlock(hdev);
2434
2435         if (conn) {
2436                 register struct hci_proto *hp;
2437
2438                 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
2439
2440                 /* Send to upper protocol */
2441                 hp = hci_proto[HCI_PROTO_L2CAP];
2442                 if (hp && hp->recv_acldata) {
2443                         hp->recv_acldata(conn, skb, flags);
2444                         return;
2445                 }
2446         } else {
2447                 BT_ERR("%s ACL packet for unknown connection handle %d",
2448                         hdev->name, handle);
2449         }
2450
2451         kfree_skb(skb);
2452 }
2453
2454 /* SCO data packet */
2455 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2456 {
2457         struct hci_sco_hdr *hdr = (void *) skb->data;
2458         struct hci_conn *conn;
2459         __u16 handle;
2460
2461         skb_pull(skb, HCI_SCO_HDR_SIZE);
2462
2463         handle = __le16_to_cpu(hdr->handle);
2464
2465         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2466
2467         hdev->stat.sco_rx++;
2468
2469         hci_dev_lock(hdev);
2470         conn = hci_conn_hash_lookup_handle(hdev, handle);
2471         hci_dev_unlock(hdev);
2472
2473         if (conn) {
2474                 register struct hci_proto *hp;
2475
2476                 /* Send to upper protocol */
2477                 hp = hci_proto[HCI_PROTO_SCO];
2478                 if (hp && hp->recv_scodata) {
2479                         hp->recv_scodata(conn, skb);
2480                         return;
2481                 }
2482         } else {
2483                 BT_ERR("%s SCO packet for unknown connection handle %d",
2484                         hdev->name, handle);
2485         }
2486
2487         kfree_skb(skb);
2488 }
2489
2490 static void hci_rx_task(unsigned long arg)
2491 {
2492         struct hci_dev *hdev = (struct hci_dev *) arg;
2493         struct sk_buff *skb;
2494
2495         BT_DBG("%s", hdev->name);
2496
2497         read_lock(&hci_task_lock);
2498
2499         while ((skb = skb_dequeue(&hdev->rx_q))) {
2500                 if (atomic_read(&hdev->promisc)) {
2501                         /* Send copy to the sockets */
2502                         hci_send_to_sock(hdev, skb, NULL);
2503                 }
2504
2505                 if (test_bit(HCI_RAW, &hdev->flags)) {
2506                         kfree_skb(skb);
2507                         continue;
2508                 }
2509
2510                 if (test_bit(HCI_INIT, &hdev->flags)) {
2511                         /* Don't process data packets in this states. */
2512                         switch (bt_cb(skb)->pkt_type) {
2513                         case HCI_ACLDATA_PKT:
2514                         case HCI_SCODATA_PKT:
2515                                 kfree_skb(skb);
2516                                 continue;
2517                         }
2518                 }
2519
2520                 /* Process frame */
2521                 switch (bt_cb(skb)->pkt_type) {
2522                 case HCI_EVENT_PKT:
2523                         hci_event_packet(hdev, skb);
2524                         break;
2525
2526                 case HCI_ACLDATA_PKT:
2527                         BT_DBG("%s ACL data packet", hdev->name);
2528                         hci_acldata_packet(hdev, skb);
2529                         break;
2530
2531                 case HCI_SCODATA_PKT:
2532                         BT_DBG("%s SCO data packet", hdev->name);
2533                         hci_scodata_packet(hdev, skb);
2534                         break;
2535
2536                 default:
2537                         kfree_skb(skb);
2538                         break;
2539                 }
2540         }
2541
2542         read_unlock(&hci_task_lock);
2543 }
2544
2545 static void hci_cmd_task(unsigned long arg)
2546 {
2547         struct hci_dev *hdev = (struct hci_dev *) arg;
2548         struct sk_buff *skb;
2549
2550         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2551
2552         /* Send queued commands */
2553         if (atomic_read(&hdev->cmd_cnt)) {
2554                 skb = skb_dequeue(&hdev->cmd_q);
2555                 if (!skb)
2556                         return;
2557
2558                 kfree_skb(hdev->sent_cmd);
2559
2560                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2561                 if (hdev->sent_cmd) {
2562                         atomic_dec(&hdev->cmd_cnt);
2563                         hci_send_frame(skb);
2564                         if (test_bit(HCI_RESET, &hdev->flags))
2565                                 del_timer(&hdev->cmd_timer);
2566                         else
2567                                 mod_timer(&hdev->cmd_timer,
2568                                   jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2569                 } else {
2570                         skb_queue_head(&hdev->cmd_q, skb);
2571                         tasklet_schedule(&hdev->cmd_task);
2572                 }
2573         }
2574 }
2575
2576 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2577 {
2578         /* General inquiry access code (GIAC) */
2579         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2580         struct hci_cp_inquiry cp;
2581
2582         BT_DBG("%s", hdev->name);
2583
2584         if (test_bit(HCI_INQUIRY, &hdev->flags))
2585                 return -EINPROGRESS;
2586
2587         memset(&cp, 0, sizeof(cp));
2588         memcpy(&cp.lap, lap, sizeof(cp.lap));
2589         cp.length  = length;
2590
2591         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2592 }
2593
2594 int hci_cancel_inquiry(struct hci_dev *hdev)
2595 {
2596         BT_DBG("%s", hdev->name);
2597
2598         if (!test_bit(HCI_INQUIRY, &hdev->flags))
2599                 return -EPERM;
2600
2601         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2602 }