Bluetooth: Add address type to device blacklist table
[cascardo/linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55         hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI debugfs entries ---- */
59
60 static int inquiry_cache_show(struct seq_file *f, void *p)
61 {
62         struct hci_dev *hdev = f->private;
63         struct discovery_state *cache = &hdev->discovery;
64         struct inquiry_entry *e;
65
66         hci_dev_lock(hdev);
67
68         list_for_each_entry(e, &cache->all, all) {
69                 struct inquiry_data *data = &e->data;
70                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
71                            &data->bdaddr,
72                            data->pscan_rep_mode, data->pscan_period_mode,
73                            data->pscan_mode, data->dev_class[2],
74                            data->dev_class[1], data->dev_class[0],
75                            __le16_to_cpu(data->clock_offset),
76                            data->rssi, data->ssp_mode, e->timestamp);
77         }
78
79         hci_dev_unlock(hdev);
80
81         return 0;
82 }
83
84 static int inquiry_cache_open(struct inode *inode, struct file *file)
85 {
86         return single_open(file, inquiry_cache_show, inode->i_private);
87 }
88
89 static const struct file_operations inquiry_cache_fops = {
90         .open           = inquiry_cache_open,
91         .read           = seq_read,
92         .llseek         = seq_lseek,
93         .release        = single_release,
94 };
95
96 static int voice_setting_get(void *data, u64 *val)
97 {
98         struct hci_dev *hdev = data;
99
100         hci_dev_lock(hdev);
101         *val = hdev->voice_setting;
102         hci_dev_unlock(hdev);
103
104         return 0;
105 }
106
107 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
108                         NULL, "0x%4.4llx\n");
109
110 static int auto_accept_delay_set(void *data, u64 val)
111 {
112         struct hci_dev *hdev = data;
113
114         hci_dev_lock(hdev);
115         hdev->auto_accept_delay = val;
116         hci_dev_unlock(hdev);
117
118         return 0;
119 }
120
121 static int auto_accept_delay_get(void *data, u64 *val)
122 {
123         struct hci_dev *hdev = data;
124
125         hci_dev_lock(hdev);
126         *val = hdev->auto_accept_delay;
127         hci_dev_unlock(hdev);
128
129         return 0;
130 }
131
132 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
133                         auto_accept_delay_set, "%llu\n");
134
135 static int static_address_show(struct seq_file *f, void *p)
136 {
137         struct hci_dev *hdev = f->private;
138
139         hci_dev_lock(hdev);
140         seq_printf(f, "%pMR\n", &hdev->static_addr);
141         hci_dev_unlock(hdev);
142
143         return 0;
144 }
145
146 static int static_address_open(struct inode *inode, struct file *file)
147 {
148         return single_open(file, static_address_show, inode->i_private);
149 }
150
151 static const struct file_operations static_address_fops = {
152         .open           = static_address_open,
153         .read           = seq_read,
154         .llseek         = seq_lseek,
155         .release        = single_release,
156 };
157
158 /* ---- HCI requests ---- */
159
160 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
161 {
162         BT_DBG("%s result 0x%2.2x", hdev->name, result);
163
164         if (hdev->req_status == HCI_REQ_PEND) {
165                 hdev->req_result = result;
166                 hdev->req_status = HCI_REQ_DONE;
167                 wake_up_interruptible(&hdev->req_wait_q);
168         }
169 }
170
171 static void hci_req_cancel(struct hci_dev *hdev, int err)
172 {
173         BT_DBG("%s err 0x%2.2x", hdev->name, err);
174
175         if (hdev->req_status == HCI_REQ_PEND) {
176                 hdev->req_result = err;
177                 hdev->req_status = HCI_REQ_CANCELED;
178                 wake_up_interruptible(&hdev->req_wait_q);
179         }
180 }
181
182 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
183                                             u8 event)
184 {
185         struct hci_ev_cmd_complete *ev;
186         struct hci_event_hdr *hdr;
187         struct sk_buff *skb;
188
189         hci_dev_lock(hdev);
190
191         skb = hdev->recv_evt;
192         hdev->recv_evt = NULL;
193
194         hci_dev_unlock(hdev);
195
196         if (!skb)
197                 return ERR_PTR(-ENODATA);
198
199         if (skb->len < sizeof(*hdr)) {
200                 BT_ERR("Too short HCI event");
201                 goto failed;
202         }
203
204         hdr = (void *) skb->data;
205         skb_pull(skb, HCI_EVENT_HDR_SIZE);
206
207         if (event) {
208                 if (hdr->evt != event)
209                         goto failed;
210                 return skb;
211         }
212
213         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
214                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
215                 goto failed;
216         }
217
218         if (skb->len < sizeof(*ev)) {
219                 BT_ERR("Too short cmd_complete event");
220                 goto failed;
221         }
222
223         ev = (void *) skb->data;
224         skb_pull(skb, sizeof(*ev));
225
226         if (opcode == __le16_to_cpu(ev->opcode))
227                 return skb;
228
229         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
230                __le16_to_cpu(ev->opcode));
231
232 failed:
233         kfree_skb(skb);
234         return ERR_PTR(-ENODATA);
235 }
236
237 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
238                                   const void *param, u8 event, u32 timeout)
239 {
240         DECLARE_WAITQUEUE(wait, current);
241         struct hci_request req;
242         int err = 0;
243
244         BT_DBG("%s", hdev->name);
245
246         hci_req_init(&req, hdev);
247
248         hci_req_add_ev(&req, opcode, plen, param, event);
249
250         hdev->req_status = HCI_REQ_PEND;
251
252         err = hci_req_run(&req, hci_req_sync_complete);
253         if (err < 0)
254                 return ERR_PTR(err);
255
256         add_wait_queue(&hdev->req_wait_q, &wait);
257         set_current_state(TASK_INTERRUPTIBLE);
258
259         schedule_timeout(timeout);
260
261         remove_wait_queue(&hdev->req_wait_q, &wait);
262
263         if (signal_pending(current))
264                 return ERR_PTR(-EINTR);
265
266         switch (hdev->req_status) {
267         case HCI_REQ_DONE:
268                 err = -bt_to_errno(hdev->req_result);
269                 break;
270
271         case HCI_REQ_CANCELED:
272                 err = -hdev->req_result;
273                 break;
274
275         default:
276                 err = -ETIMEDOUT;
277                 break;
278         }
279
280         hdev->req_status = hdev->req_result = 0;
281
282         BT_DBG("%s end: err %d", hdev->name, err);
283
284         if (err < 0)
285                 return ERR_PTR(err);
286
287         return hci_get_cmd_complete(hdev, opcode, event);
288 }
289 EXPORT_SYMBOL(__hci_cmd_sync_ev);
290
291 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
292                                const void *param, u32 timeout)
293 {
294         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
295 }
296 EXPORT_SYMBOL(__hci_cmd_sync);
297
298 /* Execute request and wait for completion. */
299 static int __hci_req_sync(struct hci_dev *hdev,
300                           void (*func)(struct hci_request *req,
301                                       unsigned long opt),
302                           unsigned long opt, __u32 timeout)
303 {
304         struct hci_request req;
305         DECLARE_WAITQUEUE(wait, current);
306         int err = 0;
307
308         BT_DBG("%s start", hdev->name);
309
310         hci_req_init(&req, hdev);
311
312         hdev->req_status = HCI_REQ_PEND;
313
314         func(&req, opt);
315
316         err = hci_req_run(&req, hci_req_sync_complete);
317         if (err < 0) {
318                 hdev->req_status = 0;
319
320                 /* ENODATA means the HCI request command queue is empty.
321                  * This can happen when a request with conditionals doesn't
322                  * trigger any commands to be sent. This is normal behavior
323                  * and should not trigger an error return.
324                  */
325                 if (err == -ENODATA)
326                         return 0;
327
328                 return err;
329         }
330
331         add_wait_queue(&hdev->req_wait_q, &wait);
332         set_current_state(TASK_INTERRUPTIBLE);
333
334         schedule_timeout(timeout);
335
336         remove_wait_queue(&hdev->req_wait_q, &wait);
337
338         if (signal_pending(current))
339                 return -EINTR;
340
341         switch (hdev->req_status) {
342         case HCI_REQ_DONE:
343                 err = -bt_to_errno(hdev->req_result);
344                 break;
345
346         case HCI_REQ_CANCELED:
347                 err = -hdev->req_result;
348                 break;
349
350         default:
351                 err = -ETIMEDOUT;
352                 break;
353         }
354
355         hdev->req_status = hdev->req_result = 0;
356
357         BT_DBG("%s end: err %d", hdev->name, err);
358
359         return err;
360 }
361
362 static int hci_req_sync(struct hci_dev *hdev,
363                         void (*req)(struct hci_request *req,
364                                     unsigned long opt),
365                         unsigned long opt, __u32 timeout)
366 {
367         int ret;
368
369         if (!test_bit(HCI_UP, &hdev->flags))
370                 return -ENETDOWN;
371
372         /* Serialize all requests */
373         hci_req_lock(hdev);
374         ret = __hci_req_sync(hdev, req, opt, timeout);
375         hci_req_unlock(hdev);
376
377         return ret;
378 }
379
380 static void hci_reset_req(struct hci_request *req, unsigned long opt)
381 {
382         BT_DBG("%s %ld", req->hdev->name, opt);
383
384         /* Reset device */
385         set_bit(HCI_RESET, &req->hdev->flags);
386         hci_req_add(req, HCI_OP_RESET, 0, NULL);
387 }
388
389 static void bredr_init(struct hci_request *req)
390 {
391         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
392
393         /* Read Local Supported Features */
394         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
395
396         /* Read Local Version */
397         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
398
399         /* Read BD Address */
400         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
401 }
402
403 static void amp_init(struct hci_request *req)
404 {
405         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
406
407         /* Read Local Version */
408         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
409
410         /* Read Local Supported Commands */
411         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
412
413         /* Read Local Supported Features */
414         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
415
416         /* Read Local AMP Info */
417         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
418
419         /* Read Data Blk size */
420         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
421
422         /* Read Flow Control Mode */
423         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
424
425         /* Read Location Data */
426         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
427 }
428
429 static void hci_init1_req(struct hci_request *req, unsigned long opt)
430 {
431         struct hci_dev *hdev = req->hdev;
432
433         BT_DBG("%s %ld", hdev->name, opt);
434
435         /* Reset */
436         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
437                 hci_reset_req(req, 0);
438
439         switch (hdev->dev_type) {
440         case HCI_BREDR:
441                 bredr_init(req);
442                 break;
443
444         case HCI_AMP:
445                 amp_init(req);
446                 break;
447
448         default:
449                 BT_ERR("Unknown device type %d", hdev->dev_type);
450                 break;
451         }
452 }
453
454 static void bredr_setup(struct hci_request *req)
455 {
456         struct hci_dev *hdev = req->hdev;
457
458         __le16 param;
459         __u8 flt_type;
460
461         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
462         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
463
464         /* Read Class of Device */
465         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
466
467         /* Read Local Name */
468         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
469
470         /* Read Voice Setting */
471         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
472
473         /* Read Number of Supported IAC */
474         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
475
476         /* Read Current IAC LAP */
477         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
478
479         /* Clear Event Filters */
480         flt_type = HCI_FLT_CLEAR_ALL;
481         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
482
483         /* Connection accept timeout ~20 secs */
484         param = __constant_cpu_to_le16(0x7d00);
485         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
486
487         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
488          * but it does not support page scan related HCI commands.
489          */
490         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
491                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
492                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
493         }
494 }
495
496 static void le_setup(struct hci_request *req)
497 {
498         struct hci_dev *hdev = req->hdev;
499
500         /* Read LE Buffer Size */
501         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
502
503         /* Read LE Local Supported Features */
504         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
505
506         /* Read LE Advertising Channel TX Power */
507         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
508
509         /* Read LE White List Size */
510         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
511
512         /* Read LE Supported States */
513         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
514
515         /* LE-only controllers have LE implicitly enabled */
516         if (!lmp_bredr_capable(hdev))
517                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
518 }
519
520 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
521 {
522         if (lmp_ext_inq_capable(hdev))
523                 return 0x02;
524
525         if (lmp_inq_rssi_capable(hdev))
526                 return 0x01;
527
528         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
529             hdev->lmp_subver == 0x0757)
530                 return 0x01;
531
532         if (hdev->manufacturer == 15) {
533                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
534                         return 0x01;
535                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
536                         return 0x01;
537                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
538                         return 0x01;
539         }
540
541         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
542             hdev->lmp_subver == 0x1805)
543                 return 0x01;
544
545         return 0x00;
546 }
547
548 static void hci_setup_inquiry_mode(struct hci_request *req)
549 {
550         u8 mode;
551
552         mode = hci_get_inquiry_mode(req->hdev);
553
554         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
555 }
556
557 static void hci_setup_event_mask(struct hci_request *req)
558 {
559         struct hci_dev *hdev = req->hdev;
560
561         /* The second byte is 0xff instead of 0x9f (two reserved bits
562          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
563          * command otherwise.
564          */
565         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
566
567         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
568          * any event mask for pre 1.2 devices.
569          */
570         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
571                 return;
572
573         if (lmp_bredr_capable(hdev)) {
574                 events[4] |= 0x01; /* Flow Specification Complete */
575                 events[4] |= 0x02; /* Inquiry Result with RSSI */
576                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
577                 events[5] |= 0x08; /* Synchronous Connection Complete */
578                 events[5] |= 0x10; /* Synchronous Connection Changed */
579         } else {
580                 /* Use a different default for LE-only devices */
581                 memset(events, 0, sizeof(events));
582                 events[0] |= 0x10; /* Disconnection Complete */
583                 events[0] |= 0x80; /* Encryption Change */
584                 events[1] |= 0x08; /* Read Remote Version Information Complete */
585                 events[1] |= 0x20; /* Command Complete */
586                 events[1] |= 0x40; /* Command Status */
587                 events[1] |= 0x80; /* Hardware Error */
588                 events[2] |= 0x04; /* Number of Completed Packets */
589                 events[3] |= 0x02; /* Data Buffer Overflow */
590                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
591         }
592
593         if (lmp_inq_rssi_capable(hdev))
594                 events[4] |= 0x02; /* Inquiry Result with RSSI */
595
596         if (lmp_sniffsubr_capable(hdev))
597                 events[5] |= 0x20; /* Sniff Subrating */
598
599         if (lmp_pause_enc_capable(hdev))
600                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
601
602         if (lmp_ext_inq_capable(hdev))
603                 events[5] |= 0x40; /* Extended Inquiry Result */
604
605         if (lmp_no_flush_capable(hdev))
606                 events[7] |= 0x01; /* Enhanced Flush Complete */
607
608         if (lmp_lsto_capable(hdev))
609                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
610
611         if (lmp_ssp_capable(hdev)) {
612                 events[6] |= 0x01;      /* IO Capability Request */
613                 events[6] |= 0x02;      /* IO Capability Response */
614                 events[6] |= 0x04;      /* User Confirmation Request */
615                 events[6] |= 0x08;      /* User Passkey Request */
616                 events[6] |= 0x10;      /* Remote OOB Data Request */
617                 events[6] |= 0x20;      /* Simple Pairing Complete */
618                 events[7] |= 0x04;      /* User Passkey Notification */
619                 events[7] |= 0x08;      /* Keypress Notification */
620                 events[7] |= 0x10;      /* Remote Host Supported
621                                          * Features Notification
622                                          */
623         }
624
625         if (lmp_le_capable(hdev))
626                 events[7] |= 0x20;      /* LE Meta-Event */
627
628         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
629
630         if (lmp_le_capable(hdev)) {
631                 memset(events, 0, sizeof(events));
632                 events[0] = 0x1f;
633                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
634                             sizeof(events), events);
635         }
636 }
637
638 static void hci_init2_req(struct hci_request *req, unsigned long opt)
639 {
640         struct hci_dev *hdev = req->hdev;
641
642         if (lmp_bredr_capable(hdev))
643                 bredr_setup(req);
644         else
645                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
646
647         if (lmp_le_capable(hdev))
648                 le_setup(req);
649
650         hci_setup_event_mask(req);
651
652         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
653          * local supported commands HCI command.
654          */
655         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
656                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
657
658         if (lmp_ssp_capable(hdev)) {
659                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
660                         u8 mode = 0x01;
661                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
662                                     sizeof(mode), &mode);
663                 } else {
664                         struct hci_cp_write_eir cp;
665
666                         memset(hdev->eir, 0, sizeof(hdev->eir));
667                         memset(&cp, 0, sizeof(cp));
668
669                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
670                 }
671         }
672
673         if (lmp_inq_rssi_capable(hdev))
674                 hci_setup_inquiry_mode(req);
675
676         if (lmp_inq_tx_pwr_capable(hdev))
677                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
678
679         if (lmp_ext_feat_capable(hdev)) {
680                 struct hci_cp_read_local_ext_features cp;
681
682                 cp.page = 0x01;
683                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
684                             sizeof(cp), &cp);
685         }
686
687         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
688                 u8 enable = 1;
689                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
690                             &enable);
691         }
692 }
693
694 static void hci_setup_link_policy(struct hci_request *req)
695 {
696         struct hci_dev *hdev = req->hdev;
697         struct hci_cp_write_def_link_policy cp;
698         u16 link_policy = 0;
699
700         if (lmp_rswitch_capable(hdev))
701                 link_policy |= HCI_LP_RSWITCH;
702         if (lmp_hold_capable(hdev))
703                 link_policy |= HCI_LP_HOLD;
704         if (lmp_sniff_capable(hdev))
705                 link_policy |= HCI_LP_SNIFF;
706         if (lmp_park_capable(hdev))
707                 link_policy |= HCI_LP_PARK;
708
709         cp.policy = cpu_to_le16(link_policy);
710         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
711 }
712
713 static void hci_set_le_support(struct hci_request *req)
714 {
715         struct hci_dev *hdev = req->hdev;
716         struct hci_cp_write_le_host_supported cp;
717
718         /* LE-only devices do not support explicit enablement */
719         if (!lmp_bredr_capable(hdev))
720                 return;
721
722         memset(&cp, 0, sizeof(cp));
723
724         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
725                 cp.le = 0x01;
726                 cp.simul = lmp_le_br_capable(hdev);
727         }
728
729         if (cp.le != lmp_host_le_capable(hdev))
730                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
731                             &cp);
732 }
733
734 static void hci_set_event_mask_page_2(struct hci_request *req)
735 {
736         struct hci_dev *hdev = req->hdev;
737         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
738
739         /* If Connectionless Slave Broadcast master role is supported
740          * enable all necessary events for it.
741          */
742         if (hdev->features[2][0] & 0x01) {
743                 events[1] |= 0x40;      /* Triggered Clock Capture */
744                 events[1] |= 0x80;      /* Synchronization Train Complete */
745                 events[2] |= 0x10;      /* Slave Page Response Timeout */
746                 events[2] |= 0x20;      /* CSB Channel Map Change */
747         }
748
749         /* If Connectionless Slave Broadcast slave role is supported
750          * enable all necessary events for it.
751          */
752         if (hdev->features[2][0] & 0x02) {
753                 events[2] |= 0x01;      /* Synchronization Train Received */
754                 events[2] |= 0x02;      /* CSB Receive */
755                 events[2] |= 0x04;      /* CSB Timeout */
756                 events[2] |= 0x08;      /* Truncated Page Complete */
757         }
758
759         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
760 }
761
762 static void hci_init3_req(struct hci_request *req, unsigned long opt)
763 {
764         struct hci_dev *hdev = req->hdev;
765         u8 p;
766
767         /* Some Broadcom based Bluetooth controllers do not support the
768          * Delete Stored Link Key command. They are clearly indicating its
769          * absence in the bit mask of supported commands.
770          *
771          * Check the supported commands and only if the the command is marked
772          * as supported send it. If not supported assume that the controller
773          * does not have actual support for stored link keys which makes this
774          * command redundant anyway.
775          */
776         if (hdev->commands[6] & 0x80) {
777                 struct hci_cp_delete_stored_link_key cp;
778
779                 bacpy(&cp.bdaddr, BDADDR_ANY);
780                 cp.delete_all = 0x01;
781                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
782                             sizeof(cp), &cp);
783         }
784
785         if (hdev->commands[5] & 0x10)
786                 hci_setup_link_policy(req);
787
788         if (lmp_le_capable(hdev))
789                 hci_set_le_support(req);
790
791         /* Read features beyond page 1 if available */
792         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
793                 struct hci_cp_read_local_ext_features cp;
794
795                 cp.page = p;
796                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
797                             sizeof(cp), &cp);
798         }
799 }
800
801 static void hci_init4_req(struct hci_request *req, unsigned long opt)
802 {
803         struct hci_dev *hdev = req->hdev;
804
805         /* Set event mask page 2 if the HCI command for it is supported */
806         if (hdev->commands[22] & 0x04)
807                 hci_set_event_mask_page_2(req);
808
809         /* Check for Synchronization Train support */
810         if (hdev->features[2][0] & 0x04)
811                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
812 }
813
814 static int __hci_init(struct hci_dev *hdev)
815 {
816         int err;
817
818         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
819         if (err < 0)
820                 return err;
821
822         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
823          * BR/EDR/LE type controllers. AMP controllers only need the
824          * first stage init.
825          */
826         if (hdev->dev_type != HCI_BREDR)
827                 return 0;
828
829         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
830         if (err < 0)
831                 return err;
832
833         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
834         if (err < 0)
835                 return err;
836
837         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
838         if (err < 0)
839                 return err;
840
841         /* Only create debugfs entries during the initial setup
842          * phase and not every time the controller gets powered on.
843          */
844         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
845                 return 0;
846
847         if (lmp_bredr_capable(hdev)) {
848                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
849                                     hdev, &inquiry_cache_fops);
850                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
851                                     hdev, &voice_setting_fops);
852         }
853
854         if (lmp_ssp_capable(hdev))
855                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
856                                     hdev, &auto_accept_delay_fops);
857
858         if (lmp_le_capable(hdev))
859                 debugfs_create_file("static_address", 0444, hdev->debugfs,
860                                    hdev, &static_address_fops);
861
862         return 0;
863 }
864
865 static void hci_scan_req(struct hci_request *req, unsigned long opt)
866 {
867         __u8 scan = opt;
868
869         BT_DBG("%s %x", req->hdev->name, scan);
870
871         /* Inquiry and Page scans */
872         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
873 }
874
875 static void hci_auth_req(struct hci_request *req, unsigned long opt)
876 {
877         __u8 auth = opt;
878
879         BT_DBG("%s %x", req->hdev->name, auth);
880
881         /* Authentication */
882         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
883 }
884
885 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
886 {
887         __u8 encrypt = opt;
888
889         BT_DBG("%s %x", req->hdev->name, encrypt);
890
891         /* Encryption */
892         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
893 }
894
895 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
896 {
897         __le16 policy = cpu_to_le16(opt);
898
899         BT_DBG("%s %x", req->hdev->name, policy);
900
901         /* Default link policy */
902         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
903 }
904
905 /* Get HCI device by index.
906  * Device is held on return. */
907 struct hci_dev *hci_dev_get(int index)
908 {
909         struct hci_dev *hdev = NULL, *d;
910
911         BT_DBG("%d", index);
912
913         if (index < 0)
914                 return NULL;
915
916         read_lock(&hci_dev_list_lock);
917         list_for_each_entry(d, &hci_dev_list, list) {
918                 if (d->id == index) {
919                         hdev = hci_dev_hold(d);
920                         break;
921                 }
922         }
923         read_unlock(&hci_dev_list_lock);
924         return hdev;
925 }
926
927 /* ---- Inquiry support ---- */
928
929 bool hci_discovery_active(struct hci_dev *hdev)
930 {
931         struct discovery_state *discov = &hdev->discovery;
932
933         switch (discov->state) {
934         case DISCOVERY_FINDING:
935         case DISCOVERY_RESOLVING:
936                 return true;
937
938         default:
939                 return false;
940         }
941 }
942
943 void hci_discovery_set_state(struct hci_dev *hdev, int state)
944 {
945         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
946
947         if (hdev->discovery.state == state)
948                 return;
949
950         switch (state) {
951         case DISCOVERY_STOPPED:
952                 if (hdev->discovery.state != DISCOVERY_STARTING)
953                         mgmt_discovering(hdev, 0);
954                 break;
955         case DISCOVERY_STARTING:
956                 break;
957         case DISCOVERY_FINDING:
958                 mgmt_discovering(hdev, 1);
959                 break;
960         case DISCOVERY_RESOLVING:
961                 break;
962         case DISCOVERY_STOPPING:
963                 break;
964         }
965
966         hdev->discovery.state = state;
967 }
968
969 void hci_inquiry_cache_flush(struct hci_dev *hdev)
970 {
971         struct discovery_state *cache = &hdev->discovery;
972         struct inquiry_entry *p, *n;
973
974         list_for_each_entry_safe(p, n, &cache->all, all) {
975                 list_del(&p->all);
976                 kfree(p);
977         }
978
979         INIT_LIST_HEAD(&cache->unknown);
980         INIT_LIST_HEAD(&cache->resolve);
981 }
982
983 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
984                                                bdaddr_t *bdaddr)
985 {
986         struct discovery_state *cache = &hdev->discovery;
987         struct inquiry_entry *e;
988
989         BT_DBG("cache %p, %pMR", cache, bdaddr);
990
991         list_for_each_entry(e, &cache->all, all) {
992                 if (!bacmp(&e->data.bdaddr, bdaddr))
993                         return e;
994         }
995
996         return NULL;
997 }
998
999 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1000                                                        bdaddr_t *bdaddr)
1001 {
1002         struct discovery_state *cache = &hdev->discovery;
1003         struct inquiry_entry *e;
1004
1005         BT_DBG("cache %p, %pMR", cache, bdaddr);
1006
1007         list_for_each_entry(e, &cache->unknown, list) {
1008                 if (!bacmp(&e->data.bdaddr, bdaddr))
1009                         return e;
1010         }
1011
1012         return NULL;
1013 }
1014
1015 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1016                                                        bdaddr_t *bdaddr,
1017                                                        int state)
1018 {
1019         struct discovery_state *cache = &hdev->discovery;
1020         struct inquiry_entry *e;
1021
1022         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1023
1024         list_for_each_entry(e, &cache->resolve, list) {
1025                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1026                         return e;
1027                 if (!bacmp(&e->data.bdaddr, bdaddr))
1028                         return e;
1029         }
1030
1031         return NULL;
1032 }
1033
1034 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1035                                       struct inquiry_entry *ie)
1036 {
1037         struct discovery_state *cache = &hdev->discovery;
1038         struct list_head *pos = &cache->resolve;
1039         struct inquiry_entry *p;
1040
1041         list_del(&ie->list);
1042
1043         list_for_each_entry(p, &cache->resolve, list) {
1044                 if (p->name_state != NAME_PENDING &&
1045                     abs(p->data.rssi) >= abs(ie->data.rssi))
1046                         break;
1047                 pos = &p->list;
1048         }
1049
1050         list_add(&ie->list, pos);
1051 }
1052
1053 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1054                               bool name_known, bool *ssp)
1055 {
1056         struct discovery_state *cache = &hdev->discovery;
1057         struct inquiry_entry *ie;
1058
1059         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1060
1061         hci_remove_remote_oob_data(hdev, &data->bdaddr);
1062
1063         if (ssp)
1064                 *ssp = data->ssp_mode;
1065
1066         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1067         if (ie) {
1068                 if (ie->data.ssp_mode && ssp)
1069                         *ssp = true;
1070
1071                 if (ie->name_state == NAME_NEEDED &&
1072                     data->rssi != ie->data.rssi) {
1073                         ie->data.rssi = data->rssi;
1074                         hci_inquiry_cache_update_resolve(hdev, ie);
1075                 }
1076
1077                 goto update;
1078         }
1079
1080         /* Entry not in the cache. Add new one. */
1081         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1082         if (!ie)
1083                 return false;
1084
1085         list_add(&ie->all, &cache->all);
1086
1087         if (name_known) {
1088                 ie->name_state = NAME_KNOWN;
1089         } else {
1090                 ie->name_state = NAME_NOT_KNOWN;
1091                 list_add(&ie->list, &cache->unknown);
1092         }
1093
1094 update:
1095         if (name_known && ie->name_state != NAME_KNOWN &&
1096             ie->name_state != NAME_PENDING) {
1097                 ie->name_state = NAME_KNOWN;
1098                 list_del(&ie->list);
1099         }
1100
1101         memcpy(&ie->data, data, sizeof(*data));
1102         ie->timestamp = jiffies;
1103         cache->timestamp = jiffies;
1104
1105         if (ie->name_state == NAME_NOT_KNOWN)
1106                 return false;
1107
1108         return true;
1109 }
1110
1111 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1112 {
1113         struct discovery_state *cache = &hdev->discovery;
1114         struct inquiry_info *info = (struct inquiry_info *) buf;
1115         struct inquiry_entry *e;
1116         int copied = 0;
1117
1118         list_for_each_entry(e, &cache->all, all) {
1119                 struct inquiry_data *data = &e->data;
1120
1121                 if (copied >= num)
1122                         break;
1123
1124                 bacpy(&info->bdaddr, &data->bdaddr);
1125                 info->pscan_rep_mode    = data->pscan_rep_mode;
1126                 info->pscan_period_mode = data->pscan_period_mode;
1127                 info->pscan_mode        = data->pscan_mode;
1128                 memcpy(info->dev_class, data->dev_class, 3);
1129                 info->clock_offset      = data->clock_offset;
1130
1131                 info++;
1132                 copied++;
1133         }
1134
1135         BT_DBG("cache %p, copied %d", cache, copied);
1136         return copied;
1137 }
1138
1139 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1140 {
1141         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1142         struct hci_dev *hdev = req->hdev;
1143         struct hci_cp_inquiry cp;
1144
1145         BT_DBG("%s", hdev->name);
1146
1147         if (test_bit(HCI_INQUIRY, &hdev->flags))
1148                 return;
1149
1150         /* Start Inquiry */
1151         memcpy(&cp.lap, &ir->lap, 3);
1152         cp.length  = ir->length;
1153         cp.num_rsp = ir->num_rsp;
1154         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1155 }
1156
1157 static int wait_inquiry(void *word)
1158 {
1159         schedule();
1160         return signal_pending(current);
1161 }
1162
1163 int hci_inquiry(void __user *arg)
1164 {
1165         __u8 __user *ptr = arg;
1166         struct hci_inquiry_req ir;
1167         struct hci_dev *hdev;
1168         int err = 0, do_inquiry = 0, max_rsp;
1169         long timeo;
1170         __u8 *buf;
1171
1172         if (copy_from_user(&ir, ptr, sizeof(ir)))
1173                 return -EFAULT;
1174
1175         hdev = hci_dev_get(ir.dev_id);
1176         if (!hdev)
1177                 return -ENODEV;
1178
1179         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1180                 err = -EBUSY;
1181                 goto done;
1182         }
1183
1184         if (hdev->dev_type != HCI_BREDR) {
1185                 err = -EOPNOTSUPP;
1186                 goto done;
1187         }
1188
1189         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1190                 err = -EOPNOTSUPP;
1191                 goto done;
1192         }
1193
1194         hci_dev_lock(hdev);
1195         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1196             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1197                 hci_inquiry_cache_flush(hdev);
1198                 do_inquiry = 1;
1199         }
1200         hci_dev_unlock(hdev);
1201
1202         timeo = ir.length * msecs_to_jiffies(2000);
1203
1204         if (do_inquiry) {
1205                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1206                                    timeo);
1207                 if (err < 0)
1208                         goto done;
1209
1210                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1211                  * cleared). If it is interrupted by a signal, return -EINTR.
1212                  */
1213                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1214                                 TASK_INTERRUPTIBLE))
1215                         return -EINTR;
1216         }
1217
1218         /* for unlimited number of responses we will use buffer with
1219          * 255 entries
1220          */
1221         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1222
1223         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1224          * copy it to the user space.
1225          */
1226         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1227         if (!buf) {
1228                 err = -ENOMEM;
1229                 goto done;
1230         }
1231
1232         hci_dev_lock(hdev);
1233         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1234         hci_dev_unlock(hdev);
1235
1236         BT_DBG("num_rsp %d", ir.num_rsp);
1237
1238         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1239                 ptr += sizeof(ir);
1240                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1241                                  ir.num_rsp))
1242                         err = -EFAULT;
1243         } else
1244                 err = -EFAULT;
1245
1246         kfree(buf);
1247
1248 done:
1249         hci_dev_put(hdev);
1250         return err;
1251 }
1252
1253 static int hci_dev_do_open(struct hci_dev *hdev)
1254 {
1255         int ret = 0;
1256
1257         BT_DBG("%s %p", hdev->name, hdev);
1258
1259         hci_req_lock(hdev);
1260
1261         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1262                 ret = -ENODEV;
1263                 goto done;
1264         }
1265
1266         if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1267                 /* Check for rfkill but allow the HCI setup stage to
1268                  * proceed (which in itself doesn't cause any RF activity).
1269                  */
1270                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1271                         ret = -ERFKILL;
1272                         goto done;
1273                 }
1274
1275                 /* Check for valid public address or a configured static
1276                  * random adddress, but let the HCI setup proceed to
1277                  * be able to determine if there is a public address
1278                  * or not.
1279                  *
1280                  * This check is only valid for BR/EDR controllers
1281                  * since AMP controllers do not have an address.
1282                  */
1283                 if (hdev->dev_type == HCI_BREDR &&
1284                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1285                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1286                         ret = -EADDRNOTAVAIL;
1287                         goto done;
1288                 }
1289         }
1290
1291         if (test_bit(HCI_UP, &hdev->flags)) {
1292                 ret = -EALREADY;
1293                 goto done;
1294         }
1295
1296         if (hdev->open(hdev)) {
1297                 ret = -EIO;
1298                 goto done;
1299         }
1300
1301         atomic_set(&hdev->cmd_cnt, 1);
1302         set_bit(HCI_INIT, &hdev->flags);
1303
1304         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1305                 ret = hdev->setup(hdev);
1306
1307         if (!ret) {
1308                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1309                         set_bit(HCI_RAW, &hdev->flags);
1310
1311                 if (!test_bit(HCI_RAW, &hdev->flags) &&
1312                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1313                         ret = __hci_init(hdev);
1314         }
1315
1316         clear_bit(HCI_INIT, &hdev->flags);
1317
1318         if (!ret) {
1319                 hci_dev_hold(hdev);
1320                 set_bit(HCI_UP, &hdev->flags);
1321                 hci_notify(hdev, HCI_DEV_UP);
1322                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1323                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1324                     hdev->dev_type == HCI_BREDR) {
1325                         hci_dev_lock(hdev);
1326                         mgmt_powered(hdev, 1);
1327                         hci_dev_unlock(hdev);
1328                 }
1329         } else {
1330                 /* Init failed, cleanup */
1331                 flush_work(&hdev->tx_work);
1332                 flush_work(&hdev->cmd_work);
1333                 flush_work(&hdev->rx_work);
1334
1335                 skb_queue_purge(&hdev->cmd_q);
1336                 skb_queue_purge(&hdev->rx_q);
1337
1338                 if (hdev->flush)
1339                         hdev->flush(hdev);
1340
1341                 if (hdev->sent_cmd) {
1342                         kfree_skb(hdev->sent_cmd);
1343                         hdev->sent_cmd = NULL;
1344                 }
1345
1346                 hdev->close(hdev);
1347                 hdev->flags = 0;
1348         }
1349
1350 done:
1351         hci_req_unlock(hdev);
1352         return ret;
1353 }
1354
1355 /* ---- HCI ioctl helpers ---- */
1356
1357 int hci_dev_open(__u16 dev)
1358 {
1359         struct hci_dev *hdev;
1360         int err;
1361
1362         hdev = hci_dev_get(dev);
1363         if (!hdev)
1364                 return -ENODEV;
1365
1366         /* We need to ensure that no other power on/off work is pending
1367          * before proceeding to call hci_dev_do_open. This is
1368          * particularly important if the setup procedure has not yet
1369          * completed.
1370          */
1371         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1372                 cancel_delayed_work(&hdev->power_off);
1373
1374         /* After this call it is guaranteed that the setup procedure
1375          * has finished. This means that error conditions like RFKILL
1376          * or no valid public or static random address apply.
1377          */
1378         flush_workqueue(hdev->req_workqueue);
1379
1380         err = hci_dev_do_open(hdev);
1381
1382         hci_dev_put(hdev);
1383
1384         return err;
1385 }
1386
1387 static int hci_dev_do_close(struct hci_dev *hdev)
1388 {
1389         BT_DBG("%s %p", hdev->name, hdev);
1390
1391         cancel_delayed_work(&hdev->power_off);
1392
1393         hci_req_cancel(hdev, ENODEV);
1394         hci_req_lock(hdev);
1395
1396         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1397                 del_timer_sync(&hdev->cmd_timer);
1398                 hci_req_unlock(hdev);
1399                 return 0;
1400         }
1401
1402         /* Flush RX and TX works */
1403         flush_work(&hdev->tx_work);
1404         flush_work(&hdev->rx_work);
1405
1406         if (hdev->discov_timeout > 0) {
1407                 cancel_delayed_work(&hdev->discov_off);
1408                 hdev->discov_timeout = 0;
1409                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1410                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1411         }
1412
1413         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1414                 cancel_delayed_work(&hdev->service_cache);
1415
1416         cancel_delayed_work_sync(&hdev->le_scan_disable);
1417
1418         hci_dev_lock(hdev);
1419         hci_inquiry_cache_flush(hdev);
1420         hci_conn_hash_flush(hdev);
1421         hci_dev_unlock(hdev);
1422
1423         hci_notify(hdev, HCI_DEV_DOWN);
1424
1425         if (hdev->flush)
1426                 hdev->flush(hdev);
1427
1428         /* Reset device */
1429         skb_queue_purge(&hdev->cmd_q);
1430         atomic_set(&hdev->cmd_cnt, 1);
1431         if (!test_bit(HCI_RAW, &hdev->flags) &&
1432             !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1433             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1434                 set_bit(HCI_INIT, &hdev->flags);
1435                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1436                 clear_bit(HCI_INIT, &hdev->flags);
1437         }
1438
1439         /* flush cmd  work */
1440         flush_work(&hdev->cmd_work);
1441
1442         /* Drop queues */
1443         skb_queue_purge(&hdev->rx_q);
1444         skb_queue_purge(&hdev->cmd_q);
1445         skb_queue_purge(&hdev->raw_q);
1446
1447         /* Drop last sent command */
1448         if (hdev->sent_cmd) {
1449                 del_timer_sync(&hdev->cmd_timer);
1450                 kfree_skb(hdev->sent_cmd);
1451                 hdev->sent_cmd = NULL;
1452         }
1453
1454         kfree_skb(hdev->recv_evt);
1455         hdev->recv_evt = NULL;
1456
1457         /* After this point our queues are empty
1458          * and no tasks are scheduled. */
1459         hdev->close(hdev);
1460
1461         /* Clear flags */
1462         hdev->flags = 0;
1463         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1464
1465         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1466                 if (hdev->dev_type == HCI_BREDR) {
1467                         hci_dev_lock(hdev);
1468                         mgmt_powered(hdev, 0);
1469                         hci_dev_unlock(hdev);
1470                 }
1471         }
1472
1473         /* Controller radio is available but is currently powered down */
1474         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1475
1476         memset(hdev->eir, 0, sizeof(hdev->eir));
1477         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1478
1479         hci_req_unlock(hdev);
1480
1481         hci_dev_put(hdev);
1482         return 0;
1483 }
1484
1485 int hci_dev_close(__u16 dev)
1486 {
1487         struct hci_dev *hdev;
1488         int err;
1489
1490         hdev = hci_dev_get(dev);
1491         if (!hdev)
1492                 return -ENODEV;
1493
1494         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1495                 err = -EBUSY;
1496                 goto done;
1497         }
1498
1499         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1500                 cancel_delayed_work(&hdev->power_off);
1501
1502         err = hci_dev_do_close(hdev);
1503
1504 done:
1505         hci_dev_put(hdev);
1506         return err;
1507 }
1508
1509 int hci_dev_reset(__u16 dev)
1510 {
1511         struct hci_dev *hdev;
1512         int ret = 0;
1513
1514         hdev = hci_dev_get(dev);
1515         if (!hdev)
1516                 return -ENODEV;
1517
1518         hci_req_lock(hdev);
1519
1520         if (!test_bit(HCI_UP, &hdev->flags)) {
1521                 ret = -ENETDOWN;
1522                 goto done;
1523         }
1524
1525         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1526                 ret = -EBUSY;
1527                 goto done;
1528         }
1529
1530         /* Drop queues */
1531         skb_queue_purge(&hdev->rx_q);
1532         skb_queue_purge(&hdev->cmd_q);
1533
1534         hci_dev_lock(hdev);
1535         hci_inquiry_cache_flush(hdev);
1536         hci_conn_hash_flush(hdev);
1537         hci_dev_unlock(hdev);
1538
1539         if (hdev->flush)
1540                 hdev->flush(hdev);
1541
1542         atomic_set(&hdev->cmd_cnt, 1);
1543         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1544
1545         if (!test_bit(HCI_RAW, &hdev->flags))
1546                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1547
1548 done:
1549         hci_req_unlock(hdev);
1550         hci_dev_put(hdev);
1551         return ret;
1552 }
1553
1554 int hci_dev_reset_stat(__u16 dev)
1555 {
1556         struct hci_dev *hdev;
1557         int ret = 0;
1558
1559         hdev = hci_dev_get(dev);
1560         if (!hdev)
1561                 return -ENODEV;
1562
1563         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1564                 ret = -EBUSY;
1565                 goto done;
1566         }
1567
1568         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1569
1570 done:
1571         hci_dev_put(hdev);
1572         return ret;
1573 }
1574
1575 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1576 {
1577         struct hci_dev *hdev;
1578         struct hci_dev_req dr;
1579         int err = 0;
1580
1581         if (copy_from_user(&dr, arg, sizeof(dr)))
1582                 return -EFAULT;
1583
1584         hdev = hci_dev_get(dr.dev_id);
1585         if (!hdev)
1586                 return -ENODEV;
1587
1588         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1589                 err = -EBUSY;
1590                 goto done;
1591         }
1592
1593         if (hdev->dev_type != HCI_BREDR) {
1594                 err = -EOPNOTSUPP;
1595                 goto done;
1596         }
1597
1598         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1599                 err = -EOPNOTSUPP;
1600                 goto done;
1601         }
1602
1603         switch (cmd) {
1604         case HCISETAUTH:
1605                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1606                                    HCI_INIT_TIMEOUT);
1607                 break;
1608
1609         case HCISETENCRYPT:
1610                 if (!lmp_encrypt_capable(hdev)) {
1611                         err = -EOPNOTSUPP;
1612                         break;
1613                 }
1614
1615                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1616                         /* Auth must be enabled first */
1617                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1618                                            HCI_INIT_TIMEOUT);
1619                         if (err)
1620                                 break;
1621                 }
1622
1623                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1624                                    HCI_INIT_TIMEOUT);
1625                 break;
1626
1627         case HCISETSCAN:
1628                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1629                                    HCI_INIT_TIMEOUT);
1630                 break;
1631
1632         case HCISETLINKPOL:
1633                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1634                                    HCI_INIT_TIMEOUT);
1635                 break;
1636
1637         case HCISETLINKMODE:
1638                 hdev->link_mode = ((__u16) dr.dev_opt) &
1639                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1640                 break;
1641
1642         case HCISETPTYPE:
1643                 hdev->pkt_type = (__u16) dr.dev_opt;
1644                 break;
1645
1646         case HCISETACLMTU:
1647                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1648                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1649                 break;
1650
1651         case HCISETSCOMTU:
1652                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1653                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1654                 break;
1655
1656         default:
1657                 err = -EINVAL;
1658                 break;
1659         }
1660
1661 done:
1662         hci_dev_put(hdev);
1663         return err;
1664 }
1665
1666 int hci_get_dev_list(void __user *arg)
1667 {
1668         struct hci_dev *hdev;
1669         struct hci_dev_list_req *dl;
1670         struct hci_dev_req *dr;
1671         int n = 0, size, err;
1672         __u16 dev_num;
1673
1674         if (get_user(dev_num, (__u16 __user *) arg))
1675                 return -EFAULT;
1676
1677         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1678                 return -EINVAL;
1679
1680         size = sizeof(*dl) + dev_num * sizeof(*dr);
1681
1682         dl = kzalloc(size, GFP_KERNEL);
1683         if (!dl)
1684                 return -ENOMEM;
1685
1686         dr = dl->dev_req;
1687
1688         read_lock(&hci_dev_list_lock);
1689         list_for_each_entry(hdev, &hci_dev_list, list) {
1690                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1691                         cancel_delayed_work(&hdev->power_off);
1692
1693                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1694                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1695
1696                 (dr + n)->dev_id  = hdev->id;
1697                 (dr + n)->dev_opt = hdev->flags;
1698
1699                 if (++n >= dev_num)
1700                         break;
1701         }
1702         read_unlock(&hci_dev_list_lock);
1703
1704         dl->dev_num = n;
1705         size = sizeof(*dl) + n * sizeof(*dr);
1706
1707         err = copy_to_user(arg, dl, size);
1708         kfree(dl);
1709
1710         return err ? -EFAULT : 0;
1711 }
1712
1713 int hci_get_dev_info(void __user *arg)
1714 {
1715         struct hci_dev *hdev;
1716         struct hci_dev_info di;
1717         int err = 0;
1718
1719         if (copy_from_user(&di, arg, sizeof(di)))
1720                 return -EFAULT;
1721
1722         hdev = hci_dev_get(di.dev_id);
1723         if (!hdev)
1724                 return -ENODEV;
1725
1726         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1727                 cancel_delayed_work_sync(&hdev->power_off);
1728
1729         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1730                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1731
1732         strcpy(di.name, hdev->name);
1733         di.bdaddr   = hdev->bdaddr;
1734         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1735         di.flags    = hdev->flags;
1736         di.pkt_type = hdev->pkt_type;
1737         if (lmp_bredr_capable(hdev)) {
1738                 di.acl_mtu  = hdev->acl_mtu;
1739                 di.acl_pkts = hdev->acl_pkts;
1740                 di.sco_mtu  = hdev->sco_mtu;
1741                 di.sco_pkts = hdev->sco_pkts;
1742         } else {
1743                 di.acl_mtu  = hdev->le_mtu;
1744                 di.acl_pkts = hdev->le_pkts;
1745                 di.sco_mtu  = 0;
1746                 di.sco_pkts = 0;
1747         }
1748         di.link_policy = hdev->link_policy;
1749         di.link_mode   = hdev->link_mode;
1750
1751         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1752         memcpy(&di.features, &hdev->features, sizeof(di.features));
1753
1754         if (copy_to_user(arg, &di, sizeof(di)))
1755                 err = -EFAULT;
1756
1757         hci_dev_put(hdev);
1758
1759         return err;
1760 }
1761
1762 /* ---- Interface to HCI drivers ---- */
1763
1764 static int hci_rfkill_set_block(void *data, bool blocked)
1765 {
1766         struct hci_dev *hdev = data;
1767
1768         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1769
1770         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1771                 return -EBUSY;
1772
1773         if (blocked) {
1774                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
1775                 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1776                         hci_dev_do_close(hdev);
1777         } else {
1778                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1779         }
1780
1781         return 0;
1782 }
1783
1784 static const struct rfkill_ops hci_rfkill_ops = {
1785         .set_block = hci_rfkill_set_block,
1786 };
1787
1788 static void hci_power_on(struct work_struct *work)
1789 {
1790         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1791         int err;
1792
1793         BT_DBG("%s", hdev->name);
1794
1795         err = hci_dev_do_open(hdev);
1796         if (err < 0) {
1797                 mgmt_set_powered_failed(hdev, err);
1798                 return;
1799         }
1800
1801         /* During the HCI setup phase, a few error conditions are
1802          * ignored and they need to be checked now. If they are still
1803          * valid, it is important to turn the device back off.
1804          */
1805         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1806             (hdev->dev_type == HCI_BREDR &&
1807              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1808              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
1809                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1810                 hci_dev_do_close(hdev);
1811         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1812                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1813                                    HCI_AUTO_OFF_TIMEOUT);
1814         }
1815
1816         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1817                 mgmt_index_added(hdev);
1818 }
1819
1820 static void hci_power_off(struct work_struct *work)
1821 {
1822         struct hci_dev *hdev = container_of(work, struct hci_dev,
1823                                             power_off.work);
1824
1825         BT_DBG("%s", hdev->name);
1826
1827         hci_dev_do_close(hdev);
1828 }
1829
1830 static void hci_discov_off(struct work_struct *work)
1831 {
1832         struct hci_dev *hdev;
1833
1834         hdev = container_of(work, struct hci_dev, discov_off.work);
1835
1836         BT_DBG("%s", hdev->name);
1837
1838         mgmt_discoverable_timeout(hdev);
1839 }
1840
1841 int hci_uuids_clear(struct hci_dev *hdev)
1842 {
1843         struct bt_uuid *uuid, *tmp;
1844
1845         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1846                 list_del(&uuid->list);
1847                 kfree(uuid);
1848         }
1849
1850         return 0;
1851 }
1852
1853 int hci_link_keys_clear(struct hci_dev *hdev)
1854 {
1855         struct list_head *p, *n;
1856
1857         list_for_each_safe(p, n, &hdev->link_keys) {
1858                 struct link_key *key;
1859
1860                 key = list_entry(p, struct link_key, list);
1861
1862                 list_del(p);
1863                 kfree(key);
1864         }
1865
1866         return 0;
1867 }
1868
1869 int hci_smp_ltks_clear(struct hci_dev *hdev)
1870 {
1871         struct smp_ltk *k, *tmp;
1872
1873         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1874                 list_del(&k->list);
1875                 kfree(k);
1876         }
1877
1878         return 0;
1879 }
1880
1881 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1882 {
1883         struct link_key *k;
1884
1885         list_for_each_entry(k, &hdev->link_keys, list)
1886                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1887                         return k;
1888
1889         return NULL;
1890 }
1891
1892 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1893                                u8 key_type, u8 old_key_type)
1894 {
1895         /* Legacy key */
1896         if (key_type < 0x03)
1897                 return true;
1898
1899         /* Debug keys are insecure so don't store them persistently */
1900         if (key_type == HCI_LK_DEBUG_COMBINATION)
1901                 return false;
1902
1903         /* Changed combination key and there's no previous one */
1904         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1905                 return false;
1906
1907         /* Security mode 3 case */
1908         if (!conn)
1909                 return true;
1910
1911         /* Neither local nor remote side had no-bonding as requirement */
1912         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1913                 return true;
1914
1915         /* Local side had dedicated bonding as requirement */
1916         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1917                 return true;
1918
1919         /* Remote side had dedicated bonding as requirement */
1920         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1921                 return true;
1922
1923         /* If none of the above criteria match, then don't store the key
1924          * persistently */
1925         return false;
1926 }
1927
1928 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1929 {
1930         struct smp_ltk *k;
1931
1932         list_for_each_entry(k, &hdev->long_term_keys, list) {
1933                 if (k->ediv != ediv ||
1934                     memcmp(rand, k->rand, sizeof(k->rand)))
1935                         continue;
1936
1937                 return k;
1938         }
1939
1940         return NULL;
1941 }
1942
1943 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1944                                      u8 addr_type)
1945 {
1946         struct smp_ltk *k;
1947
1948         list_for_each_entry(k, &hdev->long_term_keys, list)
1949                 if (addr_type == k->bdaddr_type &&
1950                     bacmp(bdaddr, &k->bdaddr) == 0)
1951                         return k;
1952
1953         return NULL;
1954 }
1955
1956 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1957                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1958 {
1959         struct link_key *key, *old_key;
1960         u8 old_key_type;
1961         bool persistent;
1962
1963         old_key = hci_find_link_key(hdev, bdaddr);
1964         if (old_key) {
1965                 old_key_type = old_key->type;
1966                 key = old_key;
1967         } else {
1968                 old_key_type = conn ? conn->key_type : 0xff;
1969                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1970                 if (!key)
1971                         return -ENOMEM;
1972                 list_add(&key->list, &hdev->link_keys);
1973         }
1974
1975         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1976
1977         /* Some buggy controller combinations generate a changed
1978          * combination key for legacy pairing even when there's no
1979          * previous key */
1980         if (type == HCI_LK_CHANGED_COMBINATION &&
1981             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1982                 type = HCI_LK_COMBINATION;
1983                 if (conn)
1984                         conn->key_type = type;
1985         }
1986
1987         bacpy(&key->bdaddr, bdaddr);
1988         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1989         key->pin_len = pin_len;
1990
1991         if (type == HCI_LK_CHANGED_COMBINATION)
1992                 key->type = old_key_type;
1993         else
1994                 key->type = type;
1995
1996         if (!new_key)
1997                 return 0;
1998
1999         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2000
2001         mgmt_new_link_key(hdev, key, persistent);
2002
2003         if (conn)
2004                 conn->flush_key = !persistent;
2005
2006         return 0;
2007 }
2008
2009 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
2010                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
2011                 ediv, u8 rand[8])
2012 {
2013         struct smp_ltk *key, *old_key;
2014
2015         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2016                 return 0;
2017
2018         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2019         if (old_key)
2020                 key = old_key;
2021         else {
2022                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2023                 if (!key)
2024                         return -ENOMEM;
2025                 list_add(&key->list, &hdev->long_term_keys);
2026         }
2027
2028         bacpy(&key->bdaddr, bdaddr);
2029         key->bdaddr_type = addr_type;
2030         memcpy(key->val, tk, sizeof(key->val));
2031         key->authenticated = authenticated;
2032         key->ediv = ediv;
2033         key->enc_size = enc_size;
2034         key->type = type;
2035         memcpy(key->rand, rand, sizeof(key->rand));
2036
2037         if (!new_key)
2038                 return 0;
2039
2040         if (type & HCI_SMP_LTK)
2041                 mgmt_new_ltk(hdev, key, 1);
2042
2043         return 0;
2044 }
2045
2046 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2047 {
2048         struct link_key *key;
2049
2050         key = hci_find_link_key(hdev, bdaddr);
2051         if (!key)
2052                 return -ENOENT;
2053
2054         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2055
2056         list_del(&key->list);
2057         kfree(key);
2058
2059         return 0;
2060 }
2061
2062 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2063 {
2064         struct smp_ltk *k, *tmp;
2065
2066         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2067                 if (bacmp(bdaddr, &k->bdaddr))
2068                         continue;
2069
2070                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2071
2072                 list_del(&k->list);
2073                 kfree(k);
2074         }
2075
2076         return 0;
2077 }
2078
2079 /* HCI command timer function */
2080 static void hci_cmd_timeout(unsigned long arg)
2081 {
2082         struct hci_dev *hdev = (void *) arg;
2083
2084         if (hdev->sent_cmd) {
2085                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2086                 u16 opcode = __le16_to_cpu(sent->opcode);
2087
2088                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2089         } else {
2090                 BT_ERR("%s command tx timeout", hdev->name);
2091         }
2092
2093         atomic_set(&hdev->cmd_cnt, 1);
2094         queue_work(hdev->workqueue, &hdev->cmd_work);
2095 }
2096
2097 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2098                                           bdaddr_t *bdaddr)
2099 {
2100         struct oob_data *data;
2101
2102         list_for_each_entry(data, &hdev->remote_oob_data, list)
2103                 if (bacmp(bdaddr, &data->bdaddr) == 0)
2104                         return data;
2105
2106         return NULL;
2107 }
2108
2109 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2110 {
2111         struct oob_data *data;
2112
2113         data = hci_find_remote_oob_data(hdev, bdaddr);
2114         if (!data)
2115                 return -ENOENT;
2116
2117         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2118
2119         list_del(&data->list);
2120         kfree(data);
2121
2122         return 0;
2123 }
2124
2125 int hci_remote_oob_data_clear(struct hci_dev *hdev)
2126 {
2127         struct oob_data *data, *n;
2128
2129         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2130                 list_del(&data->list);
2131                 kfree(data);
2132         }
2133
2134         return 0;
2135 }
2136
2137 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
2138                             u8 *randomizer)
2139 {
2140         struct oob_data *data;
2141
2142         data = hci_find_remote_oob_data(hdev, bdaddr);
2143
2144         if (!data) {
2145                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2146                 if (!data)
2147                         return -ENOMEM;
2148
2149                 bacpy(&data->bdaddr, bdaddr);
2150                 list_add(&data->list, &hdev->remote_oob_data);
2151         }
2152
2153         memcpy(data->hash, hash, sizeof(data->hash));
2154         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2155
2156         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2157
2158         return 0;
2159 }
2160
2161 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2162                                          bdaddr_t *bdaddr, u8 type)
2163 {
2164         struct bdaddr_list *b;
2165
2166         list_for_each_entry(b, &hdev->blacklist, list) {
2167                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2168                         return b;
2169         }
2170
2171         return NULL;
2172 }
2173
2174 int hci_blacklist_clear(struct hci_dev *hdev)
2175 {
2176         struct list_head *p, *n;
2177
2178         list_for_each_safe(p, n, &hdev->blacklist) {
2179                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2180
2181                 list_del(p);
2182                 kfree(b);
2183         }
2184
2185         return 0;
2186 }
2187
2188 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2189 {
2190         struct bdaddr_list *entry;
2191
2192         if (!bacmp(bdaddr, BDADDR_ANY))
2193                 return -EBADF;
2194
2195         if (hci_blacklist_lookup(hdev, bdaddr, type))
2196                 return -EEXIST;
2197
2198         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
2199         if (!entry)
2200                 return -ENOMEM;
2201
2202         bacpy(&entry->bdaddr, bdaddr);
2203         entry->bdaddr_type = type;
2204
2205         list_add(&entry->list, &hdev->blacklist);
2206
2207         return mgmt_device_blocked(hdev, bdaddr, type);
2208 }
2209
2210 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2211 {
2212         struct bdaddr_list *entry;
2213
2214         if (!bacmp(bdaddr, BDADDR_ANY))
2215                 return hci_blacklist_clear(hdev);
2216
2217         entry = hci_blacklist_lookup(hdev, bdaddr, type);
2218         if (!entry)
2219                 return -ENOENT;
2220
2221         list_del(&entry->list);
2222         kfree(entry);
2223
2224         return mgmt_device_unblocked(hdev, bdaddr, type);
2225 }
2226
2227 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2228 {
2229         if (status) {
2230                 BT_ERR("Failed to start inquiry: status %d", status);
2231
2232                 hci_dev_lock(hdev);
2233                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2234                 hci_dev_unlock(hdev);
2235                 return;
2236         }
2237 }
2238
2239 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2240 {
2241         /* General inquiry access code (GIAC) */
2242         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2243         struct hci_request req;
2244         struct hci_cp_inquiry cp;
2245         int err;
2246
2247         if (status) {
2248                 BT_ERR("Failed to disable LE scanning: status %d", status);
2249                 return;
2250         }
2251
2252         switch (hdev->discovery.type) {
2253         case DISCOV_TYPE_LE:
2254                 hci_dev_lock(hdev);
2255                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2256                 hci_dev_unlock(hdev);
2257                 break;
2258
2259         case DISCOV_TYPE_INTERLEAVED:
2260                 hci_req_init(&req, hdev);
2261
2262                 memset(&cp, 0, sizeof(cp));
2263                 memcpy(&cp.lap, lap, sizeof(cp.lap));
2264                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2265                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2266
2267                 hci_dev_lock(hdev);
2268
2269                 hci_inquiry_cache_flush(hdev);
2270
2271                 err = hci_req_run(&req, inquiry_complete);
2272                 if (err) {
2273                         BT_ERR("Inquiry request failed: err %d", err);
2274                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2275                 }
2276
2277                 hci_dev_unlock(hdev);
2278                 break;
2279         }
2280 }
2281
2282 static void le_scan_disable_work(struct work_struct *work)
2283 {
2284         struct hci_dev *hdev = container_of(work, struct hci_dev,
2285                                             le_scan_disable.work);
2286         struct hci_cp_le_set_scan_enable cp;
2287         struct hci_request req;
2288         int err;
2289
2290         BT_DBG("%s", hdev->name);
2291
2292         hci_req_init(&req, hdev);
2293
2294         memset(&cp, 0, sizeof(cp));
2295         cp.enable = LE_SCAN_DISABLE;
2296         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2297
2298         err = hci_req_run(&req, le_scan_disable_work_complete);
2299         if (err)
2300                 BT_ERR("Disable LE scanning request failed: err %d", err);
2301 }
2302
2303 /* Alloc HCI device */
2304 struct hci_dev *hci_alloc_dev(void)
2305 {
2306         struct hci_dev *hdev;
2307
2308         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2309         if (!hdev)
2310                 return NULL;
2311
2312         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2313         hdev->esco_type = (ESCO_HV1);
2314         hdev->link_mode = (HCI_LM_ACCEPT);
2315         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2316         hdev->io_capability = 0x03;     /* No Input No Output */
2317         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2318         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2319
2320         hdev->sniff_max_interval = 800;
2321         hdev->sniff_min_interval = 80;
2322
2323         hdev->le_scan_interval = 0x0060;
2324         hdev->le_scan_window = 0x0030;
2325
2326         mutex_init(&hdev->lock);
2327         mutex_init(&hdev->req_lock);
2328
2329         INIT_LIST_HEAD(&hdev->mgmt_pending);
2330         INIT_LIST_HEAD(&hdev->blacklist);
2331         INIT_LIST_HEAD(&hdev->uuids);
2332         INIT_LIST_HEAD(&hdev->link_keys);
2333         INIT_LIST_HEAD(&hdev->long_term_keys);
2334         INIT_LIST_HEAD(&hdev->remote_oob_data);
2335         INIT_LIST_HEAD(&hdev->conn_hash.list);
2336
2337         INIT_WORK(&hdev->rx_work, hci_rx_work);
2338         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2339         INIT_WORK(&hdev->tx_work, hci_tx_work);
2340         INIT_WORK(&hdev->power_on, hci_power_on);
2341
2342         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2343         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2344         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2345
2346         skb_queue_head_init(&hdev->rx_q);
2347         skb_queue_head_init(&hdev->cmd_q);
2348         skb_queue_head_init(&hdev->raw_q);
2349
2350         init_waitqueue_head(&hdev->req_wait_q);
2351
2352         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2353
2354         hci_init_sysfs(hdev);
2355         discovery_init(hdev);
2356
2357         return hdev;
2358 }
2359 EXPORT_SYMBOL(hci_alloc_dev);
2360
2361 /* Free HCI device */
2362 void hci_free_dev(struct hci_dev *hdev)
2363 {
2364         /* will free via device release */
2365         put_device(&hdev->dev);
2366 }
2367 EXPORT_SYMBOL(hci_free_dev);
2368
2369 /* Register HCI device */
2370 int hci_register_dev(struct hci_dev *hdev)
2371 {
2372         int id, error;
2373
2374         if (!hdev->open || !hdev->close)
2375                 return -EINVAL;
2376
2377         /* Do not allow HCI_AMP devices to register at index 0,
2378          * so the index can be used as the AMP controller ID.
2379          */
2380         switch (hdev->dev_type) {
2381         case HCI_BREDR:
2382                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2383                 break;
2384         case HCI_AMP:
2385                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2386                 break;
2387         default:
2388                 return -EINVAL;
2389         }
2390
2391         if (id < 0)
2392                 return id;
2393
2394         sprintf(hdev->name, "hci%d", id);
2395         hdev->id = id;
2396
2397         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2398
2399         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2400                                           WQ_MEM_RECLAIM, 1, hdev->name);
2401         if (!hdev->workqueue) {
2402                 error = -ENOMEM;
2403                 goto err;
2404         }
2405
2406         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2407                                               WQ_MEM_RECLAIM, 1, hdev->name);
2408         if (!hdev->req_workqueue) {
2409                 destroy_workqueue(hdev->workqueue);
2410                 error = -ENOMEM;
2411                 goto err;
2412         }
2413
2414         error = hci_add_sysfs(hdev);
2415         if (error < 0)
2416                 goto err_wqueue;
2417
2418         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2419                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2420                                     hdev);
2421         if (hdev->rfkill) {
2422                 if (rfkill_register(hdev->rfkill) < 0) {
2423                         rfkill_destroy(hdev->rfkill);
2424                         hdev->rfkill = NULL;
2425                 }
2426         }
2427
2428         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2429                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2430
2431         set_bit(HCI_SETUP, &hdev->dev_flags);
2432         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2433
2434         if (hdev->dev_type == HCI_BREDR) {
2435                 /* Assume BR/EDR support until proven otherwise (such as
2436                  * through reading supported features during init.
2437                  */
2438                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2439         }
2440
2441         write_lock(&hci_dev_list_lock);
2442         list_add(&hdev->list, &hci_dev_list);
2443         write_unlock(&hci_dev_list_lock);
2444
2445         hci_notify(hdev, HCI_DEV_REG);
2446         hci_dev_hold(hdev);
2447
2448         queue_work(hdev->req_workqueue, &hdev->power_on);
2449
2450         return id;
2451
2452 err_wqueue:
2453         destroy_workqueue(hdev->workqueue);
2454         destroy_workqueue(hdev->req_workqueue);
2455 err:
2456         ida_simple_remove(&hci_index_ida, hdev->id);
2457
2458         return error;
2459 }
2460 EXPORT_SYMBOL(hci_register_dev);
2461
2462 /* Unregister HCI device */
2463 void hci_unregister_dev(struct hci_dev *hdev)
2464 {
2465         int i, id;
2466
2467         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2468
2469         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2470
2471         id = hdev->id;
2472
2473         write_lock(&hci_dev_list_lock);
2474         list_del(&hdev->list);
2475         write_unlock(&hci_dev_list_lock);
2476
2477         hci_dev_do_close(hdev);
2478
2479         for (i = 0; i < NUM_REASSEMBLY; i++)
2480                 kfree_skb(hdev->reassembly[i]);
2481
2482         cancel_work_sync(&hdev->power_on);
2483
2484         if (!test_bit(HCI_INIT, &hdev->flags) &&
2485             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2486                 hci_dev_lock(hdev);
2487                 mgmt_index_removed(hdev);
2488                 hci_dev_unlock(hdev);
2489         }
2490
2491         /* mgmt_index_removed should take care of emptying the
2492          * pending list */
2493         BUG_ON(!list_empty(&hdev->mgmt_pending));
2494
2495         hci_notify(hdev, HCI_DEV_UNREG);
2496
2497         if (hdev->rfkill) {
2498                 rfkill_unregister(hdev->rfkill);
2499                 rfkill_destroy(hdev->rfkill);
2500         }
2501
2502         hci_del_sysfs(hdev);
2503
2504         destroy_workqueue(hdev->workqueue);
2505         destroy_workqueue(hdev->req_workqueue);
2506
2507         hci_dev_lock(hdev);
2508         hci_blacklist_clear(hdev);
2509         hci_uuids_clear(hdev);
2510         hci_link_keys_clear(hdev);
2511         hci_smp_ltks_clear(hdev);
2512         hci_remote_oob_data_clear(hdev);
2513         hci_dev_unlock(hdev);
2514
2515         hci_dev_put(hdev);
2516
2517         ida_simple_remove(&hci_index_ida, id);
2518 }
2519 EXPORT_SYMBOL(hci_unregister_dev);
2520
2521 /* Suspend HCI device */
2522 int hci_suspend_dev(struct hci_dev *hdev)
2523 {
2524         hci_notify(hdev, HCI_DEV_SUSPEND);
2525         return 0;
2526 }
2527 EXPORT_SYMBOL(hci_suspend_dev);
2528
2529 /* Resume HCI device */
2530 int hci_resume_dev(struct hci_dev *hdev)
2531 {
2532         hci_notify(hdev, HCI_DEV_RESUME);
2533         return 0;
2534 }
2535 EXPORT_SYMBOL(hci_resume_dev);
2536
2537 /* Receive frame from HCI drivers */
2538 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2539 {
2540         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2541                       && !test_bit(HCI_INIT, &hdev->flags))) {
2542                 kfree_skb(skb);
2543                 return -ENXIO;
2544         }
2545
2546         /* Incoming skb */
2547         bt_cb(skb)->incoming = 1;
2548
2549         /* Time stamp */
2550         __net_timestamp(skb);
2551
2552         skb_queue_tail(&hdev->rx_q, skb);
2553         queue_work(hdev->workqueue, &hdev->rx_work);
2554
2555         return 0;
2556 }
2557 EXPORT_SYMBOL(hci_recv_frame);
2558
2559 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2560                           int count, __u8 index)
2561 {
2562         int len = 0;
2563         int hlen = 0;
2564         int remain = count;
2565         struct sk_buff *skb;
2566         struct bt_skb_cb *scb;
2567
2568         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2569             index >= NUM_REASSEMBLY)
2570                 return -EILSEQ;
2571
2572         skb = hdev->reassembly[index];
2573
2574         if (!skb) {
2575                 switch (type) {
2576                 case HCI_ACLDATA_PKT:
2577                         len = HCI_MAX_FRAME_SIZE;
2578                         hlen = HCI_ACL_HDR_SIZE;
2579                         break;
2580                 case HCI_EVENT_PKT:
2581                         len = HCI_MAX_EVENT_SIZE;
2582                         hlen = HCI_EVENT_HDR_SIZE;
2583                         break;
2584                 case HCI_SCODATA_PKT:
2585                         len = HCI_MAX_SCO_SIZE;
2586                         hlen = HCI_SCO_HDR_SIZE;
2587                         break;
2588                 }
2589
2590                 skb = bt_skb_alloc(len, GFP_ATOMIC);
2591                 if (!skb)
2592                         return -ENOMEM;
2593
2594                 scb = (void *) skb->cb;
2595                 scb->expect = hlen;
2596                 scb->pkt_type = type;
2597
2598                 hdev->reassembly[index] = skb;
2599         }
2600
2601         while (count) {
2602                 scb = (void *) skb->cb;
2603                 len = min_t(uint, scb->expect, count);
2604
2605                 memcpy(skb_put(skb, len), data, len);
2606
2607                 count -= len;
2608                 data += len;
2609                 scb->expect -= len;
2610                 remain = count;
2611
2612                 switch (type) {
2613                 case HCI_EVENT_PKT:
2614                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2615                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2616                                 scb->expect = h->plen;
2617
2618                                 if (skb_tailroom(skb) < scb->expect) {
2619                                         kfree_skb(skb);
2620                                         hdev->reassembly[index] = NULL;
2621                                         return -ENOMEM;
2622                                 }
2623                         }
2624                         break;
2625
2626                 case HCI_ACLDATA_PKT:
2627                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2628                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2629                                 scb->expect = __le16_to_cpu(h->dlen);
2630
2631                                 if (skb_tailroom(skb) < scb->expect) {
2632                                         kfree_skb(skb);
2633                                         hdev->reassembly[index] = NULL;
2634                                         return -ENOMEM;
2635                                 }
2636                         }
2637                         break;
2638
2639                 case HCI_SCODATA_PKT:
2640                         if (skb->len == HCI_SCO_HDR_SIZE) {
2641                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2642                                 scb->expect = h->dlen;
2643
2644                                 if (skb_tailroom(skb) < scb->expect) {
2645                                         kfree_skb(skb);
2646                                         hdev->reassembly[index] = NULL;
2647                                         return -ENOMEM;
2648                                 }
2649                         }
2650                         break;
2651                 }
2652
2653                 if (scb->expect == 0) {
2654                         /* Complete frame */
2655
2656                         bt_cb(skb)->pkt_type = type;
2657                         hci_recv_frame(hdev, skb);
2658
2659                         hdev->reassembly[index] = NULL;
2660                         return remain;
2661                 }
2662         }
2663
2664         return remain;
2665 }
2666
2667 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2668 {
2669         int rem = 0;
2670
2671         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2672                 return -EILSEQ;
2673
2674         while (count) {
2675                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2676                 if (rem < 0)
2677                         return rem;
2678
2679                 data += (count - rem);
2680                 count = rem;
2681         }
2682
2683         return rem;
2684 }
2685 EXPORT_SYMBOL(hci_recv_fragment);
2686
2687 #define STREAM_REASSEMBLY 0
2688
2689 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2690 {
2691         int type;
2692         int rem = 0;
2693
2694         while (count) {
2695                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2696
2697                 if (!skb) {
2698                         struct { char type; } *pkt;
2699
2700                         /* Start of the frame */
2701                         pkt = data;
2702                         type = pkt->type;
2703
2704                         data++;
2705                         count--;
2706                 } else
2707                         type = bt_cb(skb)->pkt_type;
2708
2709                 rem = hci_reassembly(hdev, type, data, count,
2710                                      STREAM_REASSEMBLY);
2711                 if (rem < 0)
2712                         return rem;
2713
2714                 data += (count - rem);
2715                 count = rem;
2716         }
2717
2718         return rem;
2719 }
2720 EXPORT_SYMBOL(hci_recv_stream_fragment);
2721
2722 /* ---- Interface to upper protocols ---- */
2723
2724 int hci_register_cb(struct hci_cb *cb)
2725 {
2726         BT_DBG("%p name %s", cb, cb->name);
2727
2728         write_lock(&hci_cb_list_lock);
2729         list_add(&cb->list, &hci_cb_list);
2730         write_unlock(&hci_cb_list_lock);
2731
2732         return 0;
2733 }
2734 EXPORT_SYMBOL(hci_register_cb);
2735
2736 int hci_unregister_cb(struct hci_cb *cb)
2737 {
2738         BT_DBG("%p name %s", cb, cb->name);
2739
2740         write_lock(&hci_cb_list_lock);
2741         list_del(&cb->list);
2742         write_unlock(&hci_cb_list_lock);
2743
2744         return 0;
2745 }
2746 EXPORT_SYMBOL(hci_unregister_cb);
2747
2748 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
2749 {
2750         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2751
2752         /* Time stamp */
2753         __net_timestamp(skb);
2754
2755         /* Send copy to monitor */
2756         hci_send_to_monitor(hdev, skb);
2757
2758         if (atomic_read(&hdev->promisc)) {
2759                 /* Send copy to the sockets */
2760                 hci_send_to_sock(hdev, skb);
2761         }
2762
2763         /* Get rid of skb owner, prior to sending to the driver. */
2764         skb_orphan(skb);
2765
2766         if (hdev->send(hdev, skb) < 0)
2767                 BT_ERR("%s sending frame failed", hdev->name);
2768 }
2769
2770 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2771 {
2772         skb_queue_head_init(&req->cmd_q);
2773         req->hdev = hdev;
2774         req->err = 0;
2775 }
2776
2777 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2778 {
2779         struct hci_dev *hdev = req->hdev;
2780         struct sk_buff *skb;
2781         unsigned long flags;
2782
2783         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2784
2785         /* If an error occured during request building, remove all HCI
2786          * commands queued on the HCI request queue.
2787          */
2788         if (req->err) {
2789                 skb_queue_purge(&req->cmd_q);
2790                 return req->err;
2791         }
2792
2793         /* Do not allow empty requests */
2794         if (skb_queue_empty(&req->cmd_q))
2795                 return -ENODATA;
2796
2797         skb = skb_peek_tail(&req->cmd_q);
2798         bt_cb(skb)->req.complete = complete;
2799
2800         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2801         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2802         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2803
2804         queue_work(hdev->workqueue, &hdev->cmd_work);
2805
2806         return 0;
2807 }
2808
2809 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2810                                        u32 plen, const void *param)
2811 {
2812         int len = HCI_COMMAND_HDR_SIZE + plen;
2813         struct hci_command_hdr *hdr;
2814         struct sk_buff *skb;
2815
2816         skb = bt_skb_alloc(len, GFP_ATOMIC);
2817         if (!skb)
2818                 return NULL;
2819
2820         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2821         hdr->opcode = cpu_to_le16(opcode);
2822         hdr->plen   = plen;
2823
2824         if (plen)
2825                 memcpy(skb_put(skb, plen), param, plen);
2826
2827         BT_DBG("skb len %d", skb->len);
2828
2829         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2830
2831         return skb;
2832 }
2833
2834 /* Send HCI command */
2835 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2836                  const void *param)
2837 {
2838         struct sk_buff *skb;
2839
2840         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2841
2842         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2843         if (!skb) {
2844                 BT_ERR("%s no memory for command", hdev->name);
2845                 return -ENOMEM;
2846         }
2847
2848         /* Stand-alone HCI commands must be flaged as
2849          * single-command requests.
2850          */
2851         bt_cb(skb)->req.start = true;
2852
2853         skb_queue_tail(&hdev->cmd_q, skb);
2854         queue_work(hdev->workqueue, &hdev->cmd_work);
2855
2856         return 0;
2857 }
2858
2859 /* Queue a command to an asynchronous HCI request */
2860 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2861                     const void *param, u8 event)
2862 {
2863         struct hci_dev *hdev = req->hdev;
2864         struct sk_buff *skb;
2865
2866         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2867
2868         /* If an error occured during request building, there is no point in
2869          * queueing the HCI command. We can simply return.
2870          */
2871         if (req->err)
2872                 return;
2873
2874         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2875         if (!skb) {
2876                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2877                        hdev->name, opcode);
2878                 req->err = -ENOMEM;
2879                 return;
2880         }
2881
2882         if (skb_queue_empty(&req->cmd_q))
2883                 bt_cb(skb)->req.start = true;
2884
2885         bt_cb(skb)->req.event = event;
2886
2887         skb_queue_tail(&req->cmd_q, skb);
2888 }
2889
2890 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2891                  const void *param)
2892 {
2893         hci_req_add_ev(req, opcode, plen, param, 0);
2894 }
2895
2896 /* Get data from the previously sent command */
2897 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2898 {
2899         struct hci_command_hdr *hdr;
2900
2901         if (!hdev->sent_cmd)
2902                 return NULL;
2903
2904         hdr = (void *) hdev->sent_cmd->data;
2905
2906         if (hdr->opcode != cpu_to_le16(opcode))
2907                 return NULL;
2908
2909         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2910
2911         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2912 }
2913
2914 /* Send ACL data */
2915 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2916 {
2917         struct hci_acl_hdr *hdr;
2918         int len = skb->len;
2919
2920         skb_push(skb, HCI_ACL_HDR_SIZE);
2921         skb_reset_transport_header(skb);
2922         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2923         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2924         hdr->dlen   = cpu_to_le16(len);
2925 }
2926
2927 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2928                           struct sk_buff *skb, __u16 flags)
2929 {
2930         struct hci_conn *conn = chan->conn;
2931         struct hci_dev *hdev = conn->hdev;
2932         struct sk_buff *list;
2933
2934         skb->len = skb_headlen(skb);
2935         skb->data_len = 0;
2936
2937         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2938
2939         switch (hdev->dev_type) {
2940         case HCI_BREDR:
2941                 hci_add_acl_hdr(skb, conn->handle, flags);
2942                 break;
2943         case HCI_AMP:
2944                 hci_add_acl_hdr(skb, chan->handle, flags);
2945                 break;
2946         default:
2947                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2948                 return;
2949         }
2950
2951         list = skb_shinfo(skb)->frag_list;
2952         if (!list) {
2953                 /* Non fragmented */
2954                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2955
2956                 skb_queue_tail(queue, skb);
2957         } else {
2958                 /* Fragmented */
2959                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2960
2961                 skb_shinfo(skb)->frag_list = NULL;
2962
2963                 /* Queue all fragments atomically */
2964                 spin_lock(&queue->lock);
2965
2966                 __skb_queue_tail(queue, skb);
2967
2968                 flags &= ~ACL_START;
2969                 flags |= ACL_CONT;
2970                 do {
2971                         skb = list; list = list->next;
2972
2973                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2974                         hci_add_acl_hdr(skb, conn->handle, flags);
2975
2976                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2977
2978                         __skb_queue_tail(queue, skb);
2979                 } while (list);
2980
2981                 spin_unlock(&queue->lock);
2982         }
2983 }
2984
2985 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2986 {
2987         struct hci_dev *hdev = chan->conn->hdev;
2988
2989         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2990
2991         hci_queue_acl(chan, &chan->data_q, skb, flags);
2992
2993         queue_work(hdev->workqueue, &hdev->tx_work);
2994 }
2995
2996 /* Send SCO data */
2997 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2998 {
2999         struct hci_dev *hdev = conn->hdev;
3000         struct hci_sco_hdr hdr;
3001
3002         BT_DBG("%s len %d", hdev->name, skb->len);
3003
3004         hdr.handle = cpu_to_le16(conn->handle);
3005         hdr.dlen   = skb->len;
3006
3007         skb_push(skb, HCI_SCO_HDR_SIZE);
3008         skb_reset_transport_header(skb);
3009         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3010
3011         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3012
3013         skb_queue_tail(&conn->data_q, skb);
3014         queue_work(hdev->workqueue, &hdev->tx_work);
3015 }
3016
3017 /* ---- HCI TX task (outgoing data) ---- */
3018
3019 /* HCI Connection scheduler */
3020 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3021                                      int *quote)
3022 {
3023         struct hci_conn_hash *h = &hdev->conn_hash;
3024         struct hci_conn *conn = NULL, *c;
3025         unsigned int num = 0, min = ~0;
3026
3027         /* We don't have to lock device here. Connections are always
3028          * added and removed with TX task disabled. */
3029
3030         rcu_read_lock();
3031
3032         list_for_each_entry_rcu(c, &h->list, list) {
3033                 if (c->type != type || skb_queue_empty(&c->data_q))
3034                         continue;
3035
3036                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3037                         continue;
3038
3039                 num++;
3040
3041                 if (c->sent < min) {
3042                         min  = c->sent;
3043                         conn = c;
3044                 }
3045
3046                 if (hci_conn_num(hdev, type) == num)
3047                         break;
3048         }
3049
3050         rcu_read_unlock();
3051
3052         if (conn) {
3053                 int cnt, q;
3054
3055                 switch (conn->type) {
3056                 case ACL_LINK:
3057                         cnt = hdev->acl_cnt;
3058                         break;
3059                 case SCO_LINK:
3060                 case ESCO_LINK:
3061                         cnt = hdev->sco_cnt;
3062                         break;
3063                 case LE_LINK:
3064                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3065                         break;
3066                 default:
3067                         cnt = 0;
3068                         BT_ERR("Unknown link type");
3069                 }
3070
3071                 q = cnt / num;
3072                 *quote = q ? q : 1;
3073         } else
3074                 *quote = 0;
3075
3076         BT_DBG("conn %p quote %d", conn, *quote);
3077         return conn;
3078 }
3079
3080 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3081 {
3082         struct hci_conn_hash *h = &hdev->conn_hash;
3083         struct hci_conn *c;
3084
3085         BT_ERR("%s link tx timeout", hdev->name);
3086
3087         rcu_read_lock();
3088
3089         /* Kill stalled connections */
3090         list_for_each_entry_rcu(c, &h->list, list) {
3091                 if (c->type == type && c->sent) {
3092                         BT_ERR("%s killing stalled connection %pMR",
3093                                hdev->name, &c->dst);
3094                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3095                 }
3096         }
3097
3098         rcu_read_unlock();
3099 }
3100
3101 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3102                                       int *quote)
3103 {
3104         struct hci_conn_hash *h = &hdev->conn_hash;
3105         struct hci_chan *chan = NULL;
3106         unsigned int num = 0, min = ~0, cur_prio = 0;
3107         struct hci_conn *conn;
3108         int cnt, q, conn_num = 0;
3109
3110         BT_DBG("%s", hdev->name);
3111
3112         rcu_read_lock();
3113
3114         list_for_each_entry_rcu(conn, &h->list, list) {
3115                 struct hci_chan *tmp;
3116
3117                 if (conn->type != type)
3118                         continue;
3119
3120                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3121                         continue;
3122
3123                 conn_num++;
3124
3125                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3126                         struct sk_buff *skb;
3127
3128                         if (skb_queue_empty(&tmp->data_q))
3129                                 continue;
3130
3131                         skb = skb_peek(&tmp->data_q);
3132                         if (skb->priority < cur_prio)
3133                                 continue;
3134
3135                         if (skb->priority > cur_prio) {
3136                                 num = 0;
3137                                 min = ~0;
3138                                 cur_prio = skb->priority;
3139                         }
3140
3141                         num++;
3142
3143                         if (conn->sent < min) {
3144                                 min  = conn->sent;
3145                                 chan = tmp;
3146                         }
3147                 }
3148
3149                 if (hci_conn_num(hdev, type) == conn_num)
3150                         break;
3151         }
3152
3153         rcu_read_unlock();
3154
3155         if (!chan)
3156                 return NULL;
3157
3158         switch (chan->conn->type) {
3159         case ACL_LINK:
3160                 cnt = hdev->acl_cnt;
3161                 break;
3162         case AMP_LINK:
3163                 cnt = hdev->block_cnt;
3164                 break;
3165         case SCO_LINK:
3166         case ESCO_LINK:
3167                 cnt = hdev->sco_cnt;
3168                 break;
3169         case LE_LINK:
3170                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3171                 break;
3172         default:
3173                 cnt = 0;
3174                 BT_ERR("Unknown link type");
3175         }
3176
3177         q = cnt / num;
3178         *quote = q ? q : 1;
3179         BT_DBG("chan %p quote %d", chan, *quote);
3180         return chan;
3181 }
3182
3183 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3184 {
3185         struct hci_conn_hash *h = &hdev->conn_hash;
3186         struct hci_conn *conn;
3187         int num = 0;
3188
3189         BT_DBG("%s", hdev->name);
3190
3191         rcu_read_lock();
3192
3193         list_for_each_entry_rcu(conn, &h->list, list) {
3194                 struct hci_chan *chan;
3195
3196                 if (conn->type != type)
3197                         continue;
3198
3199                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3200                         continue;
3201
3202                 num++;
3203
3204                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3205                         struct sk_buff *skb;
3206
3207                         if (chan->sent) {
3208                                 chan->sent = 0;
3209                                 continue;
3210                         }
3211
3212                         if (skb_queue_empty(&chan->data_q))
3213                                 continue;
3214
3215                         skb = skb_peek(&chan->data_q);
3216                         if (skb->priority >= HCI_PRIO_MAX - 1)
3217                                 continue;
3218
3219                         skb->priority = HCI_PRIO_MAX - 1;
3220
3221                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3222                                skb->priority);
3223                 }
3224
3225                 if (hci_conn_num(hdev, type) == num)
3226                         break;
3227         }
3228
3229         rcu_read_unlock();
3230
3231 }
3232
3233 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3234 {
3235         /* Calculate count of blocks used by this packet */
3236         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3237 }
3238
3239 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3240 {
3241         if (!test_bit(HCI_RAW, &hdev->flags)) {
3242                 /* ACL tx timeout must be longer than maximum
3243                  * link supervision timeout (40.9 seconds) */
3244                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3245                                        HCI_ACL_TX_TIMEOUT))
3246                         hci_link_tx_to(hdev, ACL_LINK);
3247         }
3248 }
3249
3250 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3251 {
3252         unsigned int cnt = hdev->acl_cnt;
3253         struct hci_chan *chan;
3254         struct sk_buff *skb;
3255         int quote;
3256
3257         __check_timeout(hdev, cnt);
3258
3259         while (hdev->acl_cnt &&
3260                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3261                 u32 priority = (skb_peek(&chan->data_q))->priority;
3262                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3263                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3264                                skb->len, skb->priority);
3265
3266                         /* Stop if priority has changed */
3267                         if (skb->priority < priority)
3268                                 break;
3269
3270                         skb = skb_dequeue(&chan->data_q);
3271
3272                         hci_conn_enter_active_mode(chan->conn,
3273                                                    bt_cb(skb)->force_active);
3274
3275                         hci_send_frame(hdev, skb);
3276                         hdev->acl_last_tx = jiffies;
3277
3278                         hdev->acl_cnt--;
3279                         chan->sent++;
3280                         chan->conn->sent++;
3281                 }
3282         }
3283
3284         if (cnt != hdev->acl_cnt)
3285                 hci_prio_recalculate(hdev, ACL_LINK);
3286 }
3287
3288 static void hci_sched_acl_blk(struct hci_dev *hdev)
3289 {
3290         unsigned int cnt = hdev->block_cnt;
3291         struct hci_chan *chan;
3292         struct sk_buff *skb;
3293         int quote;
3294         u8 type;
3295
3296         __check_timeout(hdev, cnt);
3297
3298         BT_DBG("%s", hdev->name);
3299
3300         if (hdev->dev_type == HCI_AMP)
3301                 type = AMP_LINK;
3302         else
3303                 type = ACL_LINK;
3304
3305         while (hdev->block_cnt > 0 &&
3306                (chan = hci_chan_sent(hdev, type, &quote))) {
3307                 u32 priority = (skb_peek(&chan->data_q))->priority;
3308                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3309                         int blocks;
3310
3311                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3312                                skb->len, skb->priority);
3313
3314                         /* Stop if priority has changed */
3315                         if (skb->priority < priority)
3316                                 break;
3317
3318                         skb = skb_dequeue(&chan->data_q);
3319
3320                         blocks = __get_blocks(hdev, skb);
3321                         if (blocks > hdev->block_cnt)
3322                                 return;
3323
3324                         hci_conn_enter_active_mode(chan->conn,
3325                                                    bt_cb(skb)->force_active);
3326
3327                         hci_send_frame(hdev, skb);
3328                         hdev->acl_last_tx = jiffies;
3329
3330                         hdev->block_cnt -= blocks;
3331                         quote -= blocks;
3332
3333                         chan->sent += blocks;
3334                         chan->conn->sent += blocks;
3335                 }
3336         }
3337
3338         if (cnt != hdev->block_cnt)
3339                 hci_prio_recalculate(hdev, type);
3340 }
3341
3342 static void hci_sched_acl(struct hci_dev *hdev)
3343 {
3344         BT_DBG("%s", hdev->name);
3345
3346         /* No ACL link over BR/EDR controller */
3347         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3348                 return;
3349
3350         /* No AMP link over AMP controller */
3351         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3352                 return;
3353
3354         switch (hdev->flow_ctl_mode) {
3355         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3356                 hci_sched_acl_pkt(hdev);
3357                 break;
3358
3359         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3360                 hci_sched_acl_blk(hdev);
3361                 break;
3362         }
3363 }
3364
3365 /* Schedule SCO */
3366 static void hci_sched_sco(struct hci_dev *hdev)
3367 {
3368         struct hci_conn *conn;
3369         struct sk_buff *skb;
3370         int quote;
3371
3372         BT_DBG("%s", hdev->name);
3373
3374         if (!hci_conn_num(hdev, SCO_LINK))
3375                 return;
3376
3377         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3378                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3379                         BT_DBG("skb %p len %d", skb, skb->len);
3380                         hci_send_frame(hdev, skb);
3381
3382                         conn->sent++;
3383                         if (conn->sent == ~0)
3384                                 conn->sent = 0;
3385                 }
3386         }
3387 }
3388
3389 static void hci_sched_esco(struct hci_dev *hdev)
3390 {
3391         struct hci_conn *conn;
3392         struct sk_buff *skb;
3393         int quote;
3394
3395         BT_DBG("%s", hdev->name);
3396
3397         if (!hci_conn_num(hdev, ESCO_LINK))
3398                 return;
3399
3400         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3401                                                      &quote))) {
3402                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3403                         BT_DBG("skb %p len %d", skb, skb->len);
3404                         hci_send_frame(hdev, skb);
3405
3406                         conn->sent++;
3407                         if (conn->sent == ~0)
3408                                 conn->sent = 0;
3409                 }
3410         }
3411 }
3412
3413 static void hci_sched_le(struct hci_dev *hdev)
3414 {
3415         struct hci_chan *chan;
3416         struct sk_buff *skb;
3417         int quote, cnt, tmp;
3418
3419         BT_DBG("%s", hdev->name);
3420
3421         if (!hci_conn_num(hdev, LE_LINK))
3422                 return;
3423
3424         if (!test_bit(HCI_RAW, &hdev->flags)) {
3425                 /* LE tx timeout must be longer than maximum
3426                  * link supervision timeout (40.9 seconds) */
3427                 if (!hdev->le_cnt && hdev->le_pkts &&
3428                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3429                         hci_link_tx_to(hdev, LE_LINK);
3430         }
3431
3432         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3433         tmp = cnt;
3434         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3435                 u32 priority = (skb_peek(&chan->data_q))->priority;
3436                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3437                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3438                                skb->len, skb->priority);
3439
3440                         /* Stop if priority has changed */
3441                         if (skb->priority < priority)
3442                                 break;
3443
3444                         skb = skb_dequeue(&chan->data_q);
3445
3446                         hci_send_frame(hdev, skb);
3447                         hdev->le_last_tx = jiffies;
3448
3449                         cnt--;
3450                         chan->sent++;
3451                         chan->conn->sent++;
3452                 }
3453         }
3454
3455         if (hdev->le_pkts)
3456                 hdev->le_cnt = cnt;
3457         else
3458                 hdev->acl_cnt = cnt;
3459
3460         if (cnt != tmp)
3461                 hci_prio_recalculate(hdev, LE_LINK);
3462 }
3463
3464 static void hci_tx_work(struct work_struct *work)
3465 {
3466         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3467         struct sk_buff *skb;
3468
3469         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3470                hdev->sco_cnt, hdev->le_cnt);
3471
3472         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3473                 /* Schedule queues and send stuff to HCI driver */
3474                 hci_sched_acl(hdev);
3475                 hci_sched_sco(hdev);
3476                 hci_sched_esco(hdev);
3477                 hci_sched_le(hdev);
3478         }
3479
3480         /* Send next queued raw (unknown type) packet */
3481         while ((skb = skb_dequeue(&hdev->raw_q)))
3482                 hci_send_frame(hdev, skb);
3483 }
3484
3485 /* ----- HCI RX task (incoming data processing) ----- */
3486
3487 /* ACL data packet */
3488 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3489 {
3490         struct hci_acl_hdr *hdr = (void *) skb->data;
3491         struct hci_conn *conn;
3492         __u16 handle, flags;
3493
3494         skb_pull(skb, HCI_ACL_HDR_SIZE);
3495
3496         handle = __le16_to_cpu(hdr->handle);
3497         flags  = hci_flags(handle);
3498         handle = hci_handle(handle);
3499
3500         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3501                handle, flags);
3502
3503         hdev->stat.acl_rx++;
3504
3505         hci_dev_lock(hdev);
3506         conn = hci_conn_hash_lookup_handle(hdev, handle);
3507         hci_dev_unlock(hdev);
3508
3509         if (conn) {
3510                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3511
3512                 /* Send to upper protocol */
3513                 l2cap_recv_acldata(conn, skb, flags);
3514                 return;
3515         } else {
3516                 BT_ERR("%s ACL packet for unknown connection handle %d",
3517                        hdev->name, handle);
3518         }
3519
3520         kfree_skb(skb);
3521 }
3522
3523 /* SCO data packet */
3524 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3525 {
3526         struct hci_sco_hdr *hdr = (void *) skb->data;
3527         struct hci_conn *conn;
3528         __u16 handle;
3529
3530         skb_pull(skb, HCI_SCO_HDR_SIZE);
3531
3532         handle = __le16_to_cpu(hdr->handle);
3533
3534         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3535
3536         hdev->stat.sco_rx++;
3537
3538         hci_dev_lock(hdev);
3539         conn = hci_conn_hash_lookup_handle(hdev, handle);
3540         hci_dev_unlock(hdev);
3541
3542         if (conn) {
3543                 /* Send to upper protocol */
3544                 sco_recv_scodata(conn, skb);
3545                 return;
3546         } else {
3547                 BT_ERR("%s SCO packet for unknown connection handle %d",
3548                        hdev->name, handle);
3549         }
3550
3551         kfree_skb(skb);
3552 }
3553
3554 static bool hci_req_is_complete(struct hci_dev *hdev)
3555 {
3556         struct sk_buff *skb;
3557
3558         skb = skb_peek(&hdev->cmd_q);
3559         if (!skb)
3560                 return true;
3561
3562         return bt_cb(skb)->req.start;
3563 }
3564
3565 static void hci_resend_last(struct hci_dev *hdev)
3566 {
3567         struct hci_command_hdr *sent;
3568         struct sk_buff *skb;
3569         u16 opcode;
3570
3571         if (!hdev->sent_cmd)
3572                 return;
3573
3574         sent = (void *) hdev->sent_cmd->data;
3575         opcode = __le16_to_cpu(sent->opcode);
3576         if (opcode == HCI_OP_RESET)
3577                 return;
3578
3579         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3580         if (!skb)
3581                 return;
3582
3583         skb_queue_head(&hdev->cmd_q, skb);
3584         queue_work(hdev->workqueue, &hdev->cmd_work);
3585 }
3586
3587 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3588 {
3589         hci_req_complete_t req_complete = NULL;
3590         struct sk_buff *skb;
3591         unsigned long flags;
3592
3593         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3594
3595         /* If the completed command doesn't match the last one that was
3596          * sent we need to do special handling of it.
3597          */
3598         if (!hci_sent_cmd_data(hdev, opcode)) {
3599                 /* Some CSR based controllers generate a spontaneous
3600                  * reset complete event during init and any pending
3601                  * command will never be completed. In such a case we
3602                  * need to resend whatever was the last sent
3603                  * command.
3604                  */
3605                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3606                         hci_resend_last(hdev);
3607
3608                 return;
3609         }
3610
3611         /* If the command succeeded and there's still more commands in
3612          * this request the request is not yet complete.
3613          */
3614         if (!status && !hci_req_is_complete(hdev))
3615                 return;
3616
3617         /* If this was the last command in a request the complete
3618          * callback would be found in hdev->sent_cmd instead of the
3619          * command queue (hdev->cmd_q).
3620          */
3621         if (hdev->sent_cmd) {
3622                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3623
3624                 if (req_complete) {
3625                         /* We must set the complete callback to NULL to
3626                          * avoid calling the callback more than once if
3627                          * this function gets called again.
3628                          */
3629                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
3630
3631                         goto call_complete;
3632                 }
3633         }
3634
3635         /* Remove all pending commands belonging to this request */
3636         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3637         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3638                 if (bt_cb(skb)->req.start) {
3639                         __skb_queue_head(&hdev->cmd_q, skb);
3640                         break;
3641                 }
3642
3643                 req_complete = bt_cb(skb)->req.complete;
3644                 kfree_skb(skb);
3645         }
3646         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3647
3648 call_complete:
3649         if (req_complete)
3650                 req_complete(hdev, status);
3651 }
3652
3653 static void hci_rx_work(struct work_struct *work)
3654 {
3655         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3656         struct sk_buff *skb;
3657
3658         BT_DBG("%s", hdev->name);
3659
3660         while ((skb = skb_dequeue(&hdev->rx_q))) {
3661                 /* Send copy to monitor */
3662                 hci_send_to_monitor(hdev, skb);
3663
3664                 if (atomic_read(&hdev->promisc)) {
3665                         /* Send copy to the sockets */
3666                         hci_send_to_sock(hdev, skb);
3667                 }
3668
3669                 if (test_bit(HCI_RAW, &hdev->flags) ||
3670                     test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3671                         kfree_skb(skb);
3672                         continue;
3673                 }
3674
3675                 if (test_bit(HCI_INIT, &hdev->flags)) {
3676                         /* Don't process data packets in this states. */
3677                         switch (bt_cb(skb)->pkt_type) {
3678                         case HCI_ACLDATA_PKT:
3679                         case HCI_SCODATA_PKT:
3680                                 kfree_skb(skb);
3681                                 continue;
3682                         }
3683                 }
3684
3685                 /* Process frame */
3686                 switch (bt_cb(skb)->pkt_type) {
3687                 case HCI_EVENT_PKT:
3688                         BT_DBG("%s Event packet", hdev->name);
3689                         hci_event_packet(hdev, skb);
3690                         break;
3691
3692                 case HCI_ACLDATA_PKT:
3693                         BT_DBG("%s ACL data packet", hdev->name);
3694                         hci_acldata_packet(hdev, skb);
3695                         break;
3696
3697                 case HCI_SCODATA_PKT:
3698                         BT_DBG("%s SCO data packet", hdev->name);
3699                         hci_scodata_packet(hdev, skb);
3700                         break;
3701
3702                 default:
3703                         kfree_skb(skb);
3704                         break;
3705                 }
3706         }
3707 }
3708
3709 static void hci_cmd_work(struct work_struct *work)
3710 {
3711         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3712         struct sk_buff *skb;
3713
3714         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3715                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3716
3717         /* Send queued commands */
3718         if (atomic_read(&hdev->cmd_cnt)) {
3719                 skb = skb_dequeue(&hdev->cmd_q);
3720                 if (!skb)
3721                         return;
3722
3723                 kfree_skb(hdev->sent_cmd);
3724
3725                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
3726                 if (hdev->sent_cmd) {
3727                         atomic_dec(&hdev->cmd_cnt);
3728                         hci_send_frame(hdev, skb);
3729                         if (test_bit(HCI_RESET, &hdev->flags))
3730                                 del_timer(&hdev->cmd_timer);
3731                         else
3732                                 mod_timer(&hdev->cmd_timer,
3733                                           jiffies + HCI_CMD_TIMEOUT);
3734                 } else {
3735                         skb_queue_head(&hdev->cmd_q, skb);
3736                         queue_work(hdev->workqueue, &hdev->cmd_work);
3737                 }
3738         }
3739 }