MAINTAINERS: change email of TI WiLink drivers' maintainer
[cascardo/linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55         hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI requests ---- */
59
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
61 {
62         BT_DBG("%s result 0x%2.2x", hdev->name, result);
63
64         if (hdev->req_status == HCI_REQ_PEND) {
65                 hdev->req_result = result;
66                 hdev->req_status = HCI_REQ_DONE;
67                 wake_up_interruptible(&hdev->req_wait_q);
68         }
69 }
70
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
72 {
73         BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75         if (hdev->req_status == HCI_REQ_PEND) {
76                 hdev->req_result = err;
77                 hdev->req_status = HCI_REQ_CANCELED;
78                 wake_up_interruptible(&hdev->req_wait_q);
79         }
80 }
81
82 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83                                             u8 event)
84 {
85         struct hci_ev_cmd_complete *ev;
86         struct hci_event_hdr *hdr;
87         struct sk_buff *skb;
88
89         hci_dev_lock(hdev);
90
91         skb = hdev->recv_evt;
92         hdev->recv_evt = NULL;
93
94         hci_dev_unlock(hdev);
95
96         if (!skb)
97                 return ERR_PTR(-ENODATA);
98
99         if (skb->len < sizeof(*hdr)) {
100                 BT_ERR("Too short HCI event");
101                 goto failed;
102         }
103
104         hdr = (void *) skb->data;
105         skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
107         if (event) {
108                 if (hdr->evt != event)
109                         goto failed;
110                 return skb;
111         }
112
113         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115                 goto failed;
116         }
117
118         if (skb->len < sizeof(*ev)) {
119                 BT_ERR("Too short cmd_complete event");
120                 goto failed;
121         }
122
123         ev = (void *) skb->data;
124         skb_pull(skb, sizeof(*ev));
125
126         if (opcode == __le16_to_cpu(ev->opcode))
127                 return skb;
128
129         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130                __le16_to_cpu(ev->opcode));
131
132 failed:
133         kfree_skb(skb);
134         return ERR_PTR(-ENODATA);
135 }
136
137 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
138                                   const void *param, u8 event, u32 timeout)
139 {
140         DECLARE_WAITQUEUE(wait, current);
141         struct hci_request req;
142         int err = 0;
143
144         BT_DBG("%s", hdev->name);
145
146         hci_req_init(&req, hdev);
147
148         hci_req_add_ev(&req, opcode, plen, param, event);
149
150         hdev->req_status = HCI_REQ_PEND;
151
152         err = hci_req_run(&req, hci_req_sync_complete);
153         if (err < 0)
154                 return ERR_PTR(err);
155
156         add_wait_queue(&hdev->req_wait_q, &wait);
157         set_current_state(TASK_INTERRUPTIBLE);
158
159         schedule_timeout(timeout);
160
161         remove_wait_queue(&hdev->req_wait_q, &wait);
162
163         if (signal_pending(current))
164                 return ERR_PTR(-EINTR);
165
166         switch (hdev->req_status) {
167         case HCI_REQ_DONE:
168                 err = -bt_to_errno(hdev->req_result);
169                 break;
170
171         case HCI_REQ_CANCELED:
172                 err = -hdev->req_result;
173                 break;
174
175         default:
176                 err = -ETIMEDOUT;
177                 break;
178         }
179
180         hdev->req_status = hdev->req_result = 0;
181
182         BT_DBG("%s end: err %d", hdev->name, err);
183
184         if (err < 0)
185                 return ERR_PTR(err);
186
187         return hci_get_cmd_complete(hdev, opcode, event);
188 }
189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
192                                const void *param, u32 timeout)
193 {
194         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
195 }
196 EXPORT_SYMBOL(__hci_cmd_sync);
197
198 /* Execute request and wait for completion. */
199 static int __hci_req_sync(struct hci_dev *hdev,
200                           void (*func)(struct hci_request *req,
201                                       unsigned long opt),
202                           unsigned long opt, __u32 timeout)
203 {
204         struct hci_request req;
205         DECLARE_WAITQUEUE(wait, current);
206         int err = 0;
207
208         BT_DBG("%s start", hdev->name);
209
210         hci_req_init(&req, hdev);
211
212         hdev->req_status = HCI_REQ_PEND;
213
214         func(&req, opt);
215
216         err = hci_req_run(&req, hci_req_sync_complete);
217         if (err < 0) {
218                 hdev->req_status = 0;
219
220                 /* ENODATA means the HCI request command queue is empty.
221                  * This can happen when a request with conditionals doesn't
222                  * trigger any commands to be sent. This is normal behavior
223                  * and should not trigger an error return.
224                  */
225                 if (err == -ENODATA)
226                         return 0;
227
228                 return err;
229         }
230
231         add_wait_queue(&hdev->req_wait_q, &wait);
232         set_current_state(TASK_INTERRUPTIBLE);
233
234         schedule_timeout(timeout);
235
236         remove_wait_queue(&hdev->req_wait_q, &wait);
237
238         if (signal_pending(current))
239                 return -EINTR;
240
241         switch (hdev->req_status) {
242         case HCI_REQ_DONE:
243                 err = -bt_to_errno(hdev->req_result);
244                 break;
245
246         case HCI_REQ_CANCELED:
247                 err = -hdev->req_result;
248                 break;
249
250         default:
251                 err = -ETIMEDOUT;
252                 break;
253         }
254
255         hdev->req_status = hdev->req_result = 0;
256
257         BT_DBG("%s end: err %d", hdev->name, err);
258
259         return err;
260 }
261
262 static int hci_req_sync(struct hci_dev *hdev,
263                         void (*req)(struct hci_request *req,
264                                     unsigned long opt),
265                         unsigned long opt, __u32 timeout)
266 {
267         int ret;
268
269         if (!test_bit(HCI_UP, &hdev->flags))
270                 return -ENETDOWN;
271
272         /* Serialize all requests */
273         hci_req_lock(hdev);
274         ret = __hci_req_sync(hdev, req, opt, timeout);
275         hci_req_unlock(hdev);
276
277         return ret;
278 }
279
280 static void hci_reset_req(struct hci_request *req, unsigned long opt)
281 {
282         BT_DBG("%s %ld", req->hdev->name, opt);
283
284         /* Reset device */
285         set_bit(HCI_RESET, &req->hdev->flags);
286         hci_req_add(req, HCI_OP_RESET, 0, NULL);
287 }
288
289 static void bredr_init(struct hci_request *req)
290 {
291         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
292
293         /* Read Local Supported Features */
294         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
295
296         /* Read Local Version */
297         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
298
299         /* Read BD Address */
300         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
301 }
302
303 static void amp_init(struct hci_request *req)
304 {
305         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
306
307         /* Read Local Version */
308         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
309
310         /* Read Local AMP Info */
311         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
312
313         /* Read Data Blk size */
314         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
315 }
316
317 static void hci_init1_req(struct hci_request *req, unsigned long opt)
318 {
319         struct hci_dev *hdev = req->hdev;
320
321         BT_DBG("%s %ld", hdev->name, opt);
322
323         /* Reset */
324         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
325                 hci_reset_req(req, 0);
326
327         switch (hdev->dev_type) {
328         case HCI_BREDR:
329                 bredr_init(req);
330                 break;
331
332         case HCI_AMP:
333                 amp_init(req);
334                 break;
335
336         default:
337                 BT_ERR("Unknown device type %d", hdev->dev_type);
338                 break;
339         }
340 }
341
342 static void bredr_setup(struct hci_request *req)
343 {
344         __le16 param;
345         __u8 flt_type;
346
347         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
348         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
349
350         /* Read Class of Device */
351         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
352
353         /* Read Local Name */
354         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
355
356         /* Read Voice Setting */
357         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
358
359         /* Clear Event Filters */
360         flt_type = HCI_FLT_CLEAR_ALL;
361         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
362
363         /* Connection accept timeout ~20 secs */
364         param = __constant_cpu_to_le16(0x7d00);
365         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
366
367         /* Read page scan parameters */
368         if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371         }
372 }
373
374 static void le_setup(struct hci_request *req)
375 {
376         struct hci_dev *hdev = req->hdev;
377
378         /* Read LE Buffer Size */
379         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
380
381         /* Read LE Local Supported Features */
382         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
383
384         /* Read LE Advertising Channel TX Power */
385         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
386
387         /* Read LE White List Size */
388         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
389
390         /* Read LE Supported States */
391         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
392
393         /* LE-only controllers have LE implicitly enabled */
394         if (!lmp_bredr_capable(hdev))
395                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
396 }
397
398 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399 {
400         if (lmp_ext_inq_capable(hdev))
401                 return 0x02;
402
403         if (lmp_inq_rssi_capable(hdev))
404                 return 0x01;
405
406         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407             hdev->lmp_subver == 0x0757)
408                 return 0x01;
409
410         if (hdev->manufacturer == 15) {
411                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412                         return 0x01;
413                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414                         return 0x01;
415                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416                         return 0x01;
417         }
418
419         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420             hdev->lmp_subver == 0x1805)
421                 return 0x01;
422
423         return 0x00;
424 }
425
426 static void hci_setup_inquiry_mode(struct hci_request *req)
427 {
428         u8 mode;
429
430         mode = hci_get_inquiry_mode(req->hdev);
431
432         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
433 }
434
435 static void hci_setup_event_mask(struct hci_request *req)
436 {
437         struct hci_dev *hdev = req->hdev;
438
439         /* The second byte is 0xff instead of 0x9f (two reserved bits
440          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441          * command otherwise.
442          */
443         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446          * any event mask for pre 1.2 devices.
447          */
448         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449                 return;
450
451         if (lmp_bredr_capable(hdev)) {
452                 events[4] |= 0x01; /* Flow Specification Complete */
453                 events[4] |= 0x02; /* Inquiry Result with RSSI */
454                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455                 events[5] |= 0x08; /* Synchronous Connection Complete */
456                 events[5] |= 0x10; /* Synchronous Connection Changed */
457         }
458
459         if (lmp_inq_rssi_capable(hdev))
460                 events[4] |= 0x02; /* Inquiry Result with RSSI */
461
462         if (lmp_sniffsubr_capable(hdev))
463                 events[5] |= 0x20; /* Sniff Subrating */
464
465         if (lmp_pause_enc_capable(hdev))
466                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
467
468         if (lmp_ext_inq_capable(hdev))
469                 events[5] |= 0x40; /* Extended Inquiry Result */
470
471         if (lmp_no_flush_capable(hdev))
472                 events[7] |= 0x01; /* Enhanced Flush Complete */
473
474         if (lmp_lsto_capable(hdev))
475                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
476
477         if (lmp_ssp_capable(hdev)) {
478                 events[6] |= 0x01;      /* IO Capability Request */
479                 events[6] |= 0x02;      /* IO Capability Response */
480                 events[6] |= 0x04;      /* User Confirmation Request */
481                 events[6] |= 0x08;      /* User Passkey Request */
482                 events[6] |= 0x10;      /* Remote OOB Data Request */
483                 events[6] |= 0x20;      /* Simple Pairing Complete */
484                 events[7] |= 0x04;      /* User Passkey Notification */
485                 events[7] |= 0x08;      /* Keypress Notification */
486                 events[7] |= 0x10;      /* Remote Host Supported
487                                          * Features Notification
488                                          */
489         }
490
491         if (lmp_le_capable(hdev))
492                 events[7] |= 0x20;      /* LE Meta-Event */
493
494         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
495
496         if (lmp_le_capable(hdev)) {
497                 memset(events, 0, sizeof(events));
498                 events[0] = 0x1f;
499                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
500                             sizeof(events), events);
501         }
502 }
503
504 static void hci_init2_req(struct hci_request *req, unsigned long opt)
505 {
506         struct hci_dev *hdev = req->hdev;
507
508         if (lmp_bredr_capable(hdev))
509                 bredr_setup(req);
510
511         if (lmp_le_capable(hdev))
512                 le_setup(req);
513
514         hci_setup_event_mask(req);
515
516         if (hdev->hci_ver > BLUETOOTH_VER_1_1)
517                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
518
519         if (lmp_ssp_capable(hdev)) {
520                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
521                         u8 mode = 0x01;
522                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
523                                     sizeof(mode), &mode);
524                 } else {
525                         struct hci_cp_write_eir cp;
526
527                         memset(hdev->eir, 0, sizeof(hdev->eir));
528                         memset(&cp, 0, sizeof(cp));
529
530                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
531                 }
532         }
533
534         if (lmp_inq_rssi_capable(hdev))
535                 hci_setup_inquiry_mode(req);
536
537         if (lmp_inq_tx_pwr_capable(hdev))
538                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
539
540         if (lmp_ext_feat_capable(hdev)) {
541                 struct hci_cp_read_local_ext_features cp;
542
543                 cp.page = 0x01;
544                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
545                             sizeof(cp), &cp);
546         }
547
548         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
549                 u8 enable = 1;
550                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
551                             &enable);
552         }
553 }
554
555 static void hci_setup_link_policy(struct hci_request *req)
556 {
557         struct hci_dev *hdev = req->hdev;
558         struct hci_cp_write_def_link_policy cp;
559         u16 link_policy = 0;
560
561         if (lmp_rswitch_capable(hdev))
562                 link_policy |= HCI_LP_RSWITCH;
563         if (lmp_hold_capable(hdev))
564                 link_policy |= HCI_LP_HOLD;
565         if (lmp_sniff_capable(hdev))
566                 link_policy |= HCI_LP_SNIFF;
567         if (lmp_park_capable(hdev))
568                 link_policy |= HCI_LP_PARK;
569
570         cp.policy = cpu_to_le16(link_policy);
571         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
572 }
573
574 static void hci_set_le_support(struct hci_request *req)
575 {
576         struct hci_dev *hdev = req->hdev;
577         struct hci_cp_write_le_host_supported cp;
578
579         /* LE-only devices do not support explicit enablement */
580         if (!lmp_bredr_capable(hdev))
581                 return;
582
583         memset(&cp, 0, sizeof(cp));
584
585         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
586                 cp.le = 0x01;
587                 cp.simul = lmp_le_br_capable(hdev);
588         }
589
590         if (cp.le != lmp_host_le_capable(hdev))
591                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
592                             &cp);
593 }
594
595 static void hci_init3_req(struct hci_request *req, unsigned long opt)
596 {
597         struct hci_dev *hdev = req->hdev;
598         u8 p;
599
600         /* Some Broadcom based Bluetooth controllers do not support the
601          * Delete Stored Link Key command. They are clearly indicating its
602          * absence in the bit mask of supported commands.
603          *
604          * Check the supported commands and only if the the command is marked
605          * as supported send it. If not supported assume that the controller
606          * does not have actual support for stored link keys which makes this
607          * command redundant anyway.
608          */
609         if (hdev->commands[6] & 0x80) {
610                 struct hci_cp_delete_stored_link_key cp;
611
612                 bacpy(&cp.bdaddr, BDADDR_ANY);
613                 cp.delete_all = 0x01;
614                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
615                             sizeof(cp), &cp);
616         }
617
618         if (hdev->commands[5] & 0x10)
619                 hci_setup_link_policy(req);
620
621         if (lmp_le_capable(hdev)) {
622                 hci_set_le_support(req);
623                 hci_update_ad(req);
624         }
625
626         /* Read features beyond page 1 if available */
627         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
628                 struct hci_cp_read_local_ext_features cp;
629
630                 cp.page = p;
631                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
632                             sizeof(cp), &cp);
633         }
634 }
635
636 static int __hci_init(struct hci_dev *hdev)
637 {
638         int err;
639
640         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
641         if (err < 0)
642                 return err;
643
644         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
645          * BR/EDR/LE type controllers. AMP controllers only need the
646          * first stage init.
647          */
648         if (hdev->dev_type != HCI_BREDR)
649                 return 0;
650
651         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
652         if (err < 0)
653                 return err;
654
655         return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
656 }
657
658 static void hci_scan_req(struct hci_request *req, unsigned long opt)
659 {
660         __u8 scan = opt;
661
662         BT_DBG("%s %x", req->hdev->name, scan);
663
664         /* Inquiry and Page scans */
665         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
666 }
667
668 static void hci_auth_req(struct hci_request *req, unsigned long opt)
669 {
670         __u8 auth = opt;
671
672         BT_DBG("%s %x", req->hdev->name, auth);
673
674         /* Authentication */
675         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
676 }
677
678 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
679 {
680         __u8 encrypt = opt;
681
682         BT_DBG("%s %x", req->hdev->name, encrypt);
683
684         /* Encryption */
685         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
686 }
687
688 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
689 {
690         __le16 policy = cpu_to_le16(opt);
691
692         BT_DBG("%s %x", req->hdev->name, policy);
693
694         /* Default link policy */
695         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
696 }
697
698 /* Get HCI device by index.
699  * Device is held on return. */
700 struct hci_dev *hci_dev_get(int index)
701 {
702         struct hci_dev *hdev = NULL, *d;
703
704         BT_DBG("%d", index);
705
706         if (index < 0)
707                 return NULL;
708
709         read_lock(&hci_dev_list_lock);
710         list_for_each_entry(d, &hci_dev_list, list) {
711                 if (d->id == index) {
712                         hdev = hci_dev_hold(d);
713                         break;
714                 }
715         }
716         read_unlock(&hci_dev_list_lock);
717         return hdev;
718 }
719
720 /* ---- Inquiry support ---- */
721
722 bool hci_discovery_active(struct hci_dev *hdev)
723 {
724         struct discovery_state *discov = &hdev->discovery;
725
726         switch (discov->state) {
727         case DISCOVERY_FINDING:
728         case DISCOVERY_RESOLVING:
729                 return true;
730
731         default:
732                 return false;
733         }
734 }
735
736 void hci_discovery_set_state(struct hci_dev *hdev, int state)
737 {
738         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
739
740         if (hdev->discovery.state == state)
741                 return;
742
743         switch (state) {
744         case DISCOVERY_STOPPED:
745                 if (hdev->discovery.state != DISCOVERY_STARTING)
746                         mgmt_discovering(hdev, 0);
747                 break;
748         case DISCOVERY_STARTING:
749                 break;
750         case DISCOVERY_FINDING:
751                 mgmt_discovering(hdev, 1);
752                 break;
753         case DISCOVERY_RESOLVING:
754                 break;
755         case DISCOVERY_STOPPING:
756                 break;
757         }
758
759         hdev->discovery.state = state;
760 }
761
762 void hci_inquiry_cache_flush(struct hci_dev *hdev)
763 {
764         struct discovery_state *cache = &hdev->discovery;
765         struct inquiry_entry *p, *n;
766
767         list_for_each_entry_safe(p, n, &cache->all, all) {
768                 list_del(&p->all);
769                 kfree(p);
770         }
771
772         INIT_LIST_HEAD(&cache->unknown);
773         INIT_LIST_HEAD(&cache->resolve);
774 }
775
776 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
777                                                bdaddr_t *bdaddr)
778 {
779         struct discovery_state *cache = &hdev->discovery;
780         struct inquiry_entry *e;
781
782         BT_DBG("cache %p, %pMR", cache, bdaddr);
783
784         list_for_each_entry(e, &cache->all, all) {
785                 if (!bacmp(&e->data.bdaddr, bdaddr))
786                         return e;
787         }
788
789         return NULL;
790 }
791
792 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
793                                                        bdaddr_t *bdaddr)
794 {
795         struct discovery_state *cache = &hdev->discovery;
796         struct inquiry_entry *e;
797
798         BT_DBG("cache %p, %pMR", cache, bdaddr);
799
800         list_for_each_entry(e, &cache->unknown, list) {
801                 if (!bacmp(&e->data.bdaddr, bdaddr))
802                         return e;
803         }
804
805         return NULL;
806 }
807
808 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
809                                                        bdaddr_t *bdaddr,
810                                                        int state)
811 {
812         struct discovery_state *cache = &hdev->discovery;
813         struct inquiry_entry *e;
814
815         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
816
817         list_for_each_entry(e, &cache->resolve, list) {
818                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
819                         return e;
820                 if (!bacmp(&e->data.bdaddr, bdaddr))
821                         return e;
822         }
823
824         return NULL;
825 }
826
827 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
828                                       struct inquiry_entry *ie)
829 {
830         struct discovery_state *cache = &hdev->discovery;
831         struct list_head *pos = &cache->resolve;
832         struct inquiry_entry *p;
833
834         list_del(&ie->list);
835
836         list_for_each_entry(p, &cache->resolve, list) {
837                 if (p->name_state != NAME_PENDING &&
838                     abs(p->data.rssi) >= abs(ie->data.rssi))
839                         break;
840                 pos = &p->list;
841         }
842
843         list_add(&ie->list, pos);
844 }
845
846 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
847                               bool name_known, bool *ssp)
848 {
849         struct discovery_state *cache = &hdev->discovery;
850         struct inquiry_entry *ie;
851
852         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
853
854         hci_remove_remote_oob_data(hdev, &data->bdaddr);
855
856         if (ssp)
857                 *ssp = data->ssp_mode;
858
859         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
860         if (ie) {
861                 if (ie->data.ssp_mode && ssp)
862                         *ssp = true;
863
864                 if (ie->name_state == NAME_NEEDED &&
865                     data->rssi != ie->data.rssi) {
866                         ie->data.rssi = data->rssi;
867                         hci_inquiry_cache_update_resolve(hdev, ie);
868                 }
869
870                 goto update;
871         }
872
873         /* Entry not in the cache. Add new one. */
874         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
875         if (!ie)
876                 return false;
877
878         list_add(&ie->all, &cache->all);
879
880         if (name_known) {
881                 ie->name_state = NAME_KNOWN;
882         } else {
883                 ie->name_state = NAME_NOT_KNOWN;
884                 list_add(&ie->list, &cache->unknown);
885         }
886
887 update:
888         if (name_known && ie->name_state != NAME_KNOWN &&
889             ie->name_state != NAME_PENDING) {
890                 ie->name_state = NAME_KNOWN;
891                 list_del(&ie->list);
892         }
893
894         memcpy(&ie->data, data, sizeof(*data));
895         ie->timestamp = jiffies;
896         cache->timestamp = jiffies;
897
898         if (ie->name_state == NAME_NOT_KNOWN)
899                 return false;
900
901         return true;
902 }
903
904 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
905 {
906         struct discovery_state *cache = &hdev->discovery;
907         struct inquiry_info *info = (struct inquiry_info *) buf;
908         struct inquiry_entry *e;
909         int copied = 0;
910
911         list_for_each_entry(e, &cache->all, all) {
912                 struct inquiry_data *data = &e->data;
913
914                 if (copied >= num)
915                         break;
916
917                 bacpy(&info->bdaddr, &data->bdaddr);
918                 info->pscan_rep_mode    = data->pscan_rep_mode;
919                 info->pscan_period_mode = data->pscan_period_mode;
920                 info->pscan_mode        = data->pscan_mode;
921                 memcpy(info->dev_class, data->dev_class, 3);
922                 info->clock_offset      = data->clock_offset;
923
924                 info++;
925                 copied++;
926         }
927
928         BT_DBG("cache %p, copied %d", cache, copied);
929         return copied;
930 }
931
932 static void hci_inq_req(struct hci_request *req, unsigned long opt)
933 {
934         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
935         struct hci_dev *hdev = req->hdev;
936         struct hci_cp_inquiry cp;
937
938         BT_DBG("%s", hdev->name);
939
940         if (test_bit(HCI_INQUIRY, &hdev->flags))
941                 return;
942
943         /* Start Inquiry */
944         memcpy(&cp.lap, &ir->lap, 3);
945         cp.length  = ir->length;
946         cp.num_rsp = ir->num_rsp;
947         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
948 }
949
950 static int wait_inquiry(void *word)
951 {
952         schedule();
953         return signal_pending(current);
954 }
955
956 int hci_inquiry(void __user *arg)
957 {
958         __u8 __user *ptr = arg;
959         struct hci_inquiry_req ir;
960         struct hci_dev *hdev;
961         int err = 0, do_inquiry = 0, max_rsp;
962         long timeo;
963         __u8 *buf;
964
965         if (copy_from_user(&ir, ptr, sizeof(ir)))
966                 return -EFAULT;
967
968         hdev = hci_dev_get(ir.dev_id);
969         if (!hdev)
970                 return -ENODEV;
971
972         hci_dev_lock(hdev);
973         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
974             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
975                 hci_inquiry_cache_flush(hdev);
976                 do_inquiry = 1;
977         }
978         hci_dev_unlock(hdev);
979
980         timeo = ir.length * msecs_to_jiffies(2000);
981
982         if (do_inquiry) {
983                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
984                                    timeo);
985                 if (err < 0)
986                         goto done;
987
988                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
989                  * cleared). If it is interrupted by a signal, return -EINTR.
990                  */
991                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
992                                 TASK_INTERRUPTIBLE))
993                         return -EINTR;
994         }
995
996         /* for unlimited number of responses we will use buffer with
997          * 255 entries
998          */
999         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1000
1001         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1002          * copy it to the user space.
1003          */
1004         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1005         if (!buf) {
1006                 err = -ENOMEM;
1007                 goto done;
1008         }
1009
1010         hci_dev_lock(hdev);
1011         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1012         hci_dev_unlock(hdev);
1013
1014         BT_DBG("num_rsp %d", ir.num_rsp);
1015
1016         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1017                 ptr += sizeof(ir);
1018                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1019                                  ir.num_rsp))
1020                         err = -EFAULT;
1021         } else
1022                 err = -EFAULT;
1023
1024         kfree(buf);
1025
1026 done:
1027         hci_dev_put(hdev);
1028         return err;
1029 }
1030
1031 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1032 {
1033         u8 ad_len = 0, flags = 0;
1034         size_t name_len;
1035
1036         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1037                 flags |= LE_AD_GENERAL;
1038
1039         if (!lmp_bredr_capable(hdev))
1040                 flags |= LE_AD_NO_BREDR;
1041
1042         if (lmp_le_br_capable(hdev))
1043                 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1044
1045         if (lmp_host_le_br_capable(hdev))
1046                 flags |= LE_AD_SIM_LE_BREDR_HOST;
1047
1048         if (flags) {
1049                 BT_DBG("adv flags 0x%02x", flags);
1050
1051                 ptr[0] = 2;
1052                 ptr[1] = EIR_FLAGS;
1053                 ptr[2] = flags;
1054
1055                 ad_len += 3;
1056                 ptr += 3;
1057         }
1058
1059         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1060                 ptr[0] = 2;
1061                 ptr[1] = EIR_TX_POWER;
1062                 ptr[2] = (u8) hdev->adv_tx_power;
1063
1064                 ad_len += 3;
1065                 ptr += 3;
1066         }
1067
1068         name_len = strlen(hdev->dev_name);
1069         if (name_len > 0) {
1070                 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1071
1072                 if (name_len > max_len) {
1073                         name_len = max_len;
1074                         ptr[1] = EIR_NAME_SHORT;
1075                 } else
1076                         ptr[1] = EIR_NAME_COMPLETE;
1077
1078                 ptr[0] = name_len + 1;
1079
1080                 memcpy(ptr + 2, hdev->dev_name, name_len);
1081
1082                 ad_len += (name_len + 2);
1083                 ptr += (name_len + 2);
1084         }
1085
1086         return ad_len;
1087 }
1088
1089 void hci_update_ad(struct hci_request *req)
1090 {
1091         struct hci_dev *hdev = req->hdev;
1092         struct hci_cp_le_set_adv_data cp;
1093         u8 len;
1094
1095         if (!lmp_le_capable(hdev))
1096                 return;
1097
1098         memset(&cp, 0, sizeof(cp));
1099
1100         len = create_ad(hdev, cp.data);
1101
1102         if (hdev->adv_data_len == len &&
1103             memcmp(cp.data, hdev->adv_data, len) == 0)
1104                 return;
1105
1106         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1107         hdev->adv_data_len = len;
1108
1109         cp.length = len;
1110
1111         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1112 }
1113
1114 /* ---- HCI ioctl helpers ---- */
1115
1116 int hci_dev_open(__u16 dev)
1117 {
1118         struct hci_dev *hdev;
1119         int ret = 0;
1120
1121         hdev = hci_dev_get(dev);
1122         if (!hdev)
1123                 return -ENODEV;
1124
1125         BT_DBG("%s %p", hdev->name, hdev);
1126
1127         hci_req_lock(hdev);
1128
1129         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1130                 ret = -ENODEV;
1131                 goto done;
1132         }
1133
1134         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1135                 ret = -ERFKILL;
1136                 goto done;
1137         }
1138
1139         if (test_bit(HCI_UP, &hdev->flags)) {
1140                 ret = -EALREADY;
1141                 goto done;
1142         }
1143
1144         if (hdev->open(hdev)) {
1145                 ret = -EIO;
1146                 goto done;
1147         }
1148
1149         atomic_set(&hdev->cmd_cnt, 1);
1150         set_bit(HCI_INIT, &hdev->flags);
1151
1152         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1153                 ret = hdev->setup(hdev);
1154
1155         if (!ret) {
1156                 /* Treat all non BR/EDR controllers as raw devices if
1157                  * enable_hs is not set.
1158                  */
1159                 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1160                         set_bit(HCI_RAW, &hdev->flags);
1161
1162                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1163                         set_bit(HCI_RAW, &hdev->flags);
1164
1165                 if (!test_bit(HCI_RAW, &hdev->flags))
1166                         ret = __hci_init(hdev);
1167         }
1168
1169         clear_bit(HCI_INIT, &hdev->flags);
1170
1171         if (!ret) {
1172                 hci_dev_hold(hdev);
1173                 set_bit(HCI_UP, &hdev->flags);
1174                 hci_notify(hdev, HCI_DEV_UP);
1175                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1176                     mgmt_valid_hdev(hdev)) {
1177                         hci_dev_lock(hdev);
1178                         mgmt_powered(hdev, 1);
1179                         hci_dev_unlock(hdev);
1180                 }
1181         } else {
1182                 /* Init failed, cleanup */
1183                 flush_work(&hdev->tx_work);
1184                 flush_work(&hdev->cmd_work);
1185                 flush_work(&hdev->rx_work);
1186
1187                 skb_queue_purge(&hdev->cmd_q);
1188                 skb_queue_purge(&hdev->rx_q);
1189
1190                 if (hdev->flush)
1191                         hdev->flush(hdev);
1192
1193                 if (hdev->sent_cmd) {
1194                         kfree_skb(hdev->sent_cmd);
1195                         hdev->sent_cmd = NULL;
1196                 }
1197
1198                 hdev->close(hdev);
1199                 hdev->flags = 0;
1200         }
1201
1202 done:
1203         hci_req_unlock(hdev);
1204         hci_dev_put(hdev);
1205         return ret;
1206 }
1207
1208 static int hci_dev_do_close(struct hci_dev *hdev)
1209 {
1210         BT_DBG("%s %p", hdev->name, hdev);
1211
1212         cancel_delayed_work(&hdev->power_off);
1213
1214         hci_req_cancel(hdev, ENODEV);
1215         hci_req_lock(hdev);
1216
1217         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1218                 del_timer_sync(&hdev->cmd_timer);
1219                 hci_req_unlock(hdev);
1220                 return 0;
1221         }
1222
1223         /* Flush RX and TX works */
1224         flush_work(&hdev->tx_work);
1225         flush_work(&hdev->rx_work);
1226
1227         if (hdev->discov_timeout > 0) {
1228                 cancel_delayed_work(&hdev->discov_off);
1229                 hdev->discov_timeout = 0;
1230                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1231         }
1232
1233         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1234                 cancel_delayed_work(&hdev->service_cache);
1235
1236         cancel_delayed_work_sync(&hdev->le_scan_disable);
1237
1238         hci_dev_lock(hdev);
1239         hci_inquiry_cache_flush(hdev);
1240         hci_conn_hash_flush(hdev);
1241         hci_dev_unlock(hdev);
1242
1243         hci_notify(hdev, HCI_DEV_DOWN);
1244
1245         if (hdev->flush)
1246                 hdev->flush(hdev);
1247
1248         /* Reset device */
1249         skb_queue_purge(&hdev->cmd_q);
1250         atomic_set(&hdev->cmd_cnt, 1);
1251         if (!test_bit(HCI_RAW, &hdev->flags) &&
1252             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1253                 set_bit(HCI_INIT, &hdev->flags);
1254                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1255                 clear_bit(HCI_INIT, &hdev->flags);
1256         }
1257
1258         /* flush cmd  work */
1259         flush_work(&hdev->cmd_work);
1260
1261         /* Drop queues */
1262         skb_queue_purge(&hdev->rx_q);
1263         skb_queue_purge(&hdev->cmd_q);
1264         skb_queue_purge(&hdev->raw_q);
1265
1266         /* Drop last sent command */
1267         if (hdev->sent_cmd) {
1268                 del_timer_sync(&hdev->cmd_timer);
1269                 kfree_skb(hdev->sent_cmd);
1270                 hdev->sent_cmd = NULL;
1271         }
1272
1273         kfree_skb(hdev->recv_evt);
1274         hdev->recv_evt = NULL;
1275
1276         /* After this point our queues are empty
1277          * and no tasks are scheduled. */
1278         hdev->close(hdev);
1279
1280         /* Clear flags */
1281         hdev->flags = 0;
1282         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1283
1284         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1285             mgmt_valid_hdev(hdev)) {
1286                 hci_dev_lock(hdev);
1287                 mgmt_powered(hdev, 0);
1288                 hci_dev_unlock(hdev);
1289         }
1290
1291         /* Controller radio is available but is currently powered down */
1292         hdev->amp_status = 0;
1293
1294         memset(hdev->eir, 0, sizeof(hdev->eir));
1295         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1296
1297         hci_req_unlock(hdev);
1298
1299         hci_dev_put(hdev);
1300         return 0;
1301 }
1302
1303 int hci_dev_close(__u16 dev)
1304 {
1305         struct hci_dev *hdev;
1306         int err;
1307
1308         hdev = hci_dev_get(dev);
1309         if (!hdev)
1310                 return -ENODEV;
1311
1312         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1313                 cancel_delayed_work(&hdev->power_off);
1314
1315         err = hci_dev_do_close(hdev);
1316
1317         hci_dev_put(hdev);
1318         return err;
1319 }
1320
1321 int hci_dev_reset(__u16 dev)
1322 {
1323         struct hci_dev *hdev;
1324         int ret = 0;
1325
1326         hdev = hci_dev_get(dev);
1327         if (!hdev)
1328                 return -ENODEV;
1329
1330         hci_req_lock(hdev);
1331
1332         if (!test_bit(HCI_UP, &hdev->flags))
1333                 goto done;
1334
1335         /* Drop queues */
1336         skb_queue_purge(&hdev->rx_q);
1337         skb_queue_purge(&hdev->cmd_q);
1338
1339         hci_dev_lock(hdev);
1340         hci_inquiry_cache_flush(hdev);
1341         hci_conn_hash_flush(hdev);
1342         hci_dev_unlock(hdev);
1343
1344         if (hdev->flush)
1345                 hdev->flush(hdev);
1346
1347         atomic_set(&hdev->cmd_cnt, 1);
1348         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1349
1350         if (!test_bit(HCI_RAW, &hdev->flags))
1351                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1352
1353 done:
1354         hci_req_unlock(hdev);
1355         hci_dev_put(hdev);
1356         return ret;
1357 }
1358
1359 int hci_dev_reset_stat(__u16 dev)
1360 {
1361         struct hci_dev *hdev;
1362         int ret = 0;
1363
1364         hdev = hci_dev_get(dev);
1365         if (!hdev)
1366                 return -ENODEV;
1367
1368         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1369
1370         hci_dev_put(hdev);
1371
1372         return ret;
1373 }
1374
1375 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1376 {
1377         struct hci_dev *hdev;
1378         struct hci_dev_req dr;
1379         int err = 0;
1380
1381         if (copy_from_user(&dr, arg, sizeof(dr)))
1382                 return -EFAULT;
1383
1384         hdev = hci_dev_get(dr.dev_id);
1385         if (!hdev)
1386                 return -ENODEV;
1387
1388         switch (cmd) {
1389         case HCISETAUTH:
1390                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1391                                    HCI_INIT_TIMEOUT);
1392                 break;
1393
1394         case HCISETENCRYPT:
1395                 if (!lmp_encrypt_capable(hdev)) {
1396                         err = -EOPNOTSUPP;
1397                         break;
1398                 }
1399
1400                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1401                         /* Auth must be enabled first */
1402                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1403                                            HCI_INIT_TIMEOUT);
1404                         if (err)
1405                                 break;
1406                 }
1407
1408                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1409                                    HCI_INIT_TIMEOUT);
1410                 break;
1411
1412         case HCISETSCAN:
1413                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1414                                    HCI_INIT_TIMEOUT);
1415                 break;
1416
1417         case HCISETLINKPOL:
1418                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1419                                    HCI_INIT_TIMEOUT);
1420                 break;
1421
1422         case HCISETLINKMODE:
1423                 hdev->link_mode = ((__u16) dr.dev_opt) &
1424                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1425                 break;
1426
1427         case HCISETPTYPE:
1428                 hdev->pkt_type = (__u16) dr.dev_opt;
1429                 break;
1430
1431         case HCISETACLMTU:
1432                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1433                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1434                 break;
1435
1436         case HCISETSCOMTU:
1437                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1438                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1439                 break;
1440
1441         default:
1442                 err = -EINVAL;
1443                 break;
1444         }
1445
1446         hci_dev_put(hdev);
1447         return err;
1448 }
1449
1450 int hci_get_dev_list(void __user *arg)
1451 {
1452         struct hci_dev *hdev;
1453         struct hci_dev_list_req *dl;
1454         struct hci_dev_req *dr;
1455         int n = 0, size, err;
1456         __u16 dev_num;
1457
1458         if (get_user(dev_num, (__u16 __user *) arg))
1459                 return -EFAULT;
1460
1461         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1462                 return -EINVAL;
1463
1464         size = sizeof(*dl) + dev_num * sizeof(*dr);
1465
1466         dl = kzalloc(size, GFP_KERNEL);
1467         if (!dl)
1468                 return -ENOMEM;
1469
1470         dr = dl->dev_req;
1471
1472         read_lock(&hci_dev_list_lock);
1473         list_for_each_entry(hdev, &hci_dev_list, list) {
1474                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1475                         cancel_delayed_work(&hdev->power_off);
1476
1477                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1478                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1479
1480                 (dr + n)->dev_id  = hdev->id;
1481                 (dr + n)->dev_opt = hdev->flags;
1482
1483                 if (++n >= dev_num)
1484                         break;
1485         }
1486         read_unlock(&hci_dev_list_lock);
1487
1488         dl->dev_num = n;
1489         size = sizeof(*dl) + n * sizeof(*dr);
1490
1491         err = copy_to_user(arg, dl, size);
1492         kfree(dl);
1493
1494         return err ? -EFAULT : 0;
1495 }
1496
1497 int hci_get_dev_info(void __user *arg)
1498 {
1499         struct hci_dev *hdev;
1500         struct hci_dev_info di;
1501         int err = 0;
1502
1503         if (copy_from_user(&di, arg, sizeof(di)))
1504                 return -EFAULT;
1505
1506         hdev = hci_dev_get(di.dev_id);
1507         if (!hdev)
1508                 return -ENODEV;
1509
1510         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1511                 cancel_delayed_work_sync(&hdev->power_off);
1512
1513         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1514                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1515
1516         strcpy(di.name, hdev->name);
1517         di.bdaddr   = hdev->bdaddr;
1518         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1519         di.flags    = hdev->flags;
1520         di.pkt_type = hdev->pkt_type;
1521         if (lmp_bredr_capable(hdev)) {
1522                 di.acl_mtu  = hdev->acl_mtu;
1523                 di.acl_pkts = hdev->acl_pkts;
1524                 di.sco_mtu  = hdev->sco_mtu;
1525                 di.sco_pkts = hdev->sco_pkts;
1526         } else {
1527                 di.acl_mtu  = hdev->le_mtu;
1528                 di.acl_pkts = hdev->le_pkts;
1529                 di.sco_mtu  = 0;
1530                 di.sco_pkts = 0;
1531         }
1532         di.link_policy = hdev->link_policy;
1533         di.link_mode   = hdev->link_mode;
1534
1535         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1536         memcpy(&di.features, &hdev->features, sizeof(di.features));
1537
1538         if (copy_to_user(arg, &di, sizeof(di)))
1539                 err = -EFAULT;
1540
1541         hci_dev_put(hdev);
1542
1543         return err;
1544 }
1545
1546 /* ---- Interface to HCI drivers ---- */
1547
1548 static int hci_rfkill_set_block(void *data, bool blocked)
1549 {
1550         struct hci_dev *hdev = data;
1551
1552         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1553
1554         if (!blocked)
1555                 return 0;
1556
1557         hci_dev_do_close(hdev);
1558
1559         return 0;
1560 }
1561
1562 static const struct rfkill_ops hci_rfkill_ops = {
1563         .set_block = hci_rfkill_set_block,
1564 };
1565
1566 static void hci_power_on(struct work_struct *work)
1567 {
1568         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1569         int err;
1570
1571         BT_DBG("%s", hdev->name);
1572
1573         err = hci_dev_open(hdev->id);
1574         if (err < 0) {
1575                 mgmt_set_powered_failed(hdev, err);
1576                 return;
1577         }
1578
1579         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1580                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1581                                    HCI_AUTO_OFF_TIMEOUT);
1582
1583         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1584                 mgmt_index_added(hdev);
1585 }
1586
1587 static void hci_power_off(struct work_struct *work)
1588 {
1589         struct hci_dev *hdev = container_of(work, struct hci_dev,
1590                                             power_off.work);
1591
1592         BT_DBG("%s", hdev->name);
1593
1594         hci_dev_do_close(hdev);
1595 }
1596
1597 static void hci_discov_off(struct work_struct *work)
1598 {
1599         struct hci_dev *hdev;
1600         u8 scan = SCAN_PAGE;
1601
1602         hdev = container_of(work, struct hci_dev, discov_off.work);
1603
1604         BT_DBG("%s", hdev->name);
1605
1606         hci_dev_lock(hdev);
1607
1608         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1609
1610         hdev->discov_timeout = 0;
1611
1612         hci_dev_unlock(hdev);
1613 }
1614
1615 int hci_uuids_clear(struct hci_dev *hdev)
1616 {
1617         struct bt_uuid *uuid, *tmp;
1618
1619         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1620                 list_del(&uuid->list);
1621                 kfree(uuid);
1622         }
1623
1624         return 0;
1625 }
1626
1627 int hci_link_keys_clear(struct hci_dev *hdev)
1628 {
1629         struct list_head *p, *n;
1630
1631         list_for_each_safe(p, n, &hdev->link_keys) {
1632                 struct link_key *key;
1633
1634                 key = list_entry(p, struct link_key, list);
1635
1636                 list_del(p);
1637                 kfree(key);
1638         }
1639
1640         return 0;
1641 }
1642
1643 int hci_smp_ltks_clear(struct hci_dev *hdev)
1644 {
1645         struct smp_ltk *k, *tmp;
1646
1647         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1648                 list_del(&k->list);
1649                 kfree(k);
1650         }
1651
1652         return 0;
1653 }
1654
1655 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1656 {
1657         struct link_key *k;
1658
1659         list_for_each_entry(k, &hdev->link_keys, list)
1660                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1661                         return k;
1662
1663         return NULL;
1664 }
1665
1666 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1667                                u8 key_type, u8 old_key_type)
1668 {
1669         /* Legacy key */
1670         if (key_type < 0x03)
1671                 return true;
1672
1673         /* Debug keys are insecure so don't store them persistently */
1674         if (key_type == HCI_LK_DEBUG_COMBINATION)
1675                 return false;
1676
1677         /* Changed combination key and there's no previous one */
1678         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1679                 return false;
1680
1681         /* Security mode 3 case */
1682         if (!conn)
1683                 return true;
1684
1685         /* Neither local nor remote side had no-bonding as requirement */
1686         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1687                 return true;
1688
1689         /* Local side had dedicated bonding as requirement */
1690         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1691                 return true;
1692
1693         /* Remote side had dedicated bonding as requirement */
1694         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1695                 return true;
1696
1697         /* If none of the above criteria match, then don't store the key
1698          * persistently */
1699         return false;
1700 }
1701
1702 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1703 {
1704         struct smp_ltk *k;
1705
1706         list_for_each_entry(k, &hdev->long_term_keys, list) {
1707                 if (k->ediv != ediv ||
1708                     memcmp(rand, k->rand, sizeof(k->rand)))
1709                         continue;
1710
1711                 return k;
1712         }
1713
1714         return NULL;
1715 }
1716
1717 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1718                                      u8 addr_type)
1719 {
1720         struct smp_ltk *k;
1721
1722         list_for_each_entry(k, &hdev->long_term_keys, list)
1723                 if (addr_type == k->bdaddr_type &&
1724                     bacmp(bdaddr, &k->bdaddr) == 0)
1725                         return k;
1726
1727         return NULL;
1728 }
1729
1730 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1731                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1732 {
1733         struct link_key *key, *old_key;
1734         u8 old_key_type;
1735         bool persistent;
1736
1737         old_key = hci_find_link_key(hdev, bdaddr);
1738         if (old_key) {
1739                 old_key_type = old_key->type;
1740                 key = old_key;
1741         } else {
1742                 old_key_type = conn ? conn->key_type : 0xff;
1743                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1744                 if (!key)
1745                         return -ENOMEM;
1746                 list_add(&key->list, &hdev->link_keys);
1747         }
1748
1749         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1750
1751         /* Some buggy controller combinations generate a changed
1752          * combination key for legacy pairing even when there's no
1753          * previous key */
1754         if (type == HCI_LK_CHANGED_COMBINATION &&
1755             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1756                 type = HCI_LK_COMBINATION;
1757                 if (conn)
1758                         conn->key_type = type;
1759         }
1760
1761         bacpy(&key->bdaddr, bdaddr);
1762         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1763         key->pin_len = pin_len;
1764
1765         if (type == HCI_LK_CHANGED_COMBINATION)
1766                 key->type = old_key_type;
1767         else
1768                 key->type = type;
1769
1770         if (!new_key)
1771                 return 0;
1772
1773         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1774
1775         mgmt_new_link_key(hdev, key, persistent);
1776
1777         if (conn)
1778                 conn->flush_key = !persistent;
1779
1780         return 0;
1781 }
1782
1783 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1784                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1785                 ediv, u8 rand[8])
1786 {
1787         struct smp_ltk *key, *old_key;
1788
1789         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1790                 return 0;
1791
1792         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1793         if (old_key)
1794                 key = old_key;
1795         else {
1796                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1797                 if (!key)
1798                         return -ENOMEM;
1799                 list_add(&key->list, &hdev->long_term_keys);
1800         }
1801
1802         bacpy(&key->bdaddr, bdaddr);
1803         key->bdaddr_type = addr_type;
1804         memcpy(key->val, tk, sizeof(key->val));
1805         key->authenticated = authenticated;
1806         key->ediv = ediv;
1807         key->enc_size = enc_size;
1808         key->type = type;
1809         memcpy(key->rand, rand, sizeof(key->rand));
1810
1811         if (!new_key)
1812                 return 0;
1813
1814         if (type & HCI_SMP_LTK)
1815                 mgmt_new_ltk(hdev, key, 1);
1816
1817         return 0;
1818 }
1819
1820 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1821 {
1822         struct link_key *key;
1823
1824         key = hci_find_link_key(hdev, bdaddr);
1825         if (!key)
1826                 return -ENOENT;
1827
1828         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1829
1830         list_del(&key->list);
1831         kfree(key);
1832
1833         return 0;
1834 }
1835
1836 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1837 {
1838         struct smp_ltk *k, *tmp;
1839
1840         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1841                 if (bacmp(bdaddr, &k->bdaddr))
1842                         continue;
1843
1844                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1845
1846                 list_del(&k->list);
1847                 kfree(k);
1848         }
1849
1850         return 0;
1851 }
1852
1853 /* HCI command timer function */
1854 static void hci_cmd_timeout(unsigned long arg)
1855 {
1856         struct hci_dev *hdev = (void *) arg;
1857
1858         if (hdev->sent_cmd) {
1859                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1860                 u16 opcode = __le16_to_cpu(sent->opcode);
1861
1862                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1863         } else {
1864                 BT_ERR("%s command tx timeout", hdev->name);
1865         }
1866
1867         atomic_set(&hdev->cmd_cnt, 1);
1868         queue_work(hdev->workqueue, &hdev->cmd_work);
1869 }
1870
1871 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1872                                           bdaddr_t *bdaddr)
1873 {
1874         struct oob_data *data;
1875
1876         list_for_each_entry(data, &hdev->remote_oob_data, list)
1877                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1878                         return data;
1879
1880         return NULL;
1881 }
1882
1883 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1884 {
1885         struct oob_data *data;
1886
1887         data = hci_find_remote_oob_data(hdev, bdaddr);
1888         if (!data)
1889                 return -ENOENT;
1890
1891         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1892
1893         list_del(&data->list);
1894         kfree(data);
1895
1896         return 0;
1897 }
1898
1899 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1900 {
1901         struct oob_data *data, *n;
1902
1903         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1904                 list_del(&data->list);
1905                 kfree(data);
1906         }
1907
1908         return 0;
1909 }
1910
1911 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1912                             u8 *randomizer)
1913 {
1914         struct oob_data *data;
1915
1916         data = hci_find_remote_oob_data(hdev, bdaddr);
1917
1918         if (!data) {
1919                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1920                 if (!data)
1921                         return -ENOMEM;
1922
1923                 bacpy(&data->bdaddr, bdaddr);
1924                 list_add(&data->list, &hdev->remote_oob_data);
1925         }
1926
1927         memcpy(data->hash, hash, sizeof(data->hash));
1928         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1929
1930         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1931
1932         return 0;
1933 }
1934
1935 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1936 {
1937         struct bdaddr_list *b;
1938
1939         list_for_each_entry(b, &hdev->blacklist, list)
1940                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1941                         return b;
1942
1943         return NULL;
1944 }
1945
1946 int hci_blacklist_clear(struct hci_dev *hdev)
1947 {
1948         struct list_head *p, *n;
1949
1950         list_for_each_safe(p, n, &hdev->blacklist) {
1951                 struct bdaddr_list *b;
1952
1953                 b = list_entry(p, struct bdaddr_list, list);
1954
1955                 list_del(p);
1956                 kfree(b);
1957         }
1958
1959         return 0;
1960 }
1961
1962 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1963 {
1964         struct bdaddr_list *entry;
1965
1966         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1967                 return -EBADF;
1968
1969         if (hci_blacklist_lookup(hdev, bdaddr))
1970                 return -EEXIST;
1971
1972         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1973         if (!entry)
1974                 return -ENOMEM;
1975
1976         bacpy(&entry->bdaddr, bdaddr);
1977
1978         list_add(&entry->list, &hdev->blacklist);
1979
1980         return mgmt_device_blocked(hdev, bdaddr, type);
1981 }
1982
1983 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1984 {
1985         struct bdaddr_list *entry;
1986
1987         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1988                 return hci_blacklist_clear(hdev);
1989
1990         entry = hci_blacklist_lookup(hdev, bdaddr);
1991         if (!entry)
1992                 return -ENOENT;
1993
1994         list_del(&entry->list);
1995         kfree(entry);
1996
1997         return mgmt_device_unblocked(hdev, bdaddr, type);
1998 }
1999
2000 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2001 {
2002         if (status) {
2003                 BT_ERR("Failed to start inquiry: status %d", status);
2004
2005                 hci_dev_lock(hdev);
2006                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2007                 hci_dev_unlock(hdev);
2008                 return;
2009         }
2010 }
2011
2012 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2013 {
2014         /* General inquiry access code (GIAC) */
2015         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2016         struct hci_request req;
2017         struct hci_cp_inquiry cp;
2018         int err;
2019
2020         if (status) {
2021                 BT_ERR("Failed to disable LE scanning: status %d", status);
2022                 return;
2023         }
2024
2025         switch (hdev->discovery.type) {
2026         case DISCOV_TYPE_LE:
2027                 hci_dev_lock(hdev);
2028                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2029                 hci_dev_unlock(hdev);
2030                 break;
2031
2032         case DISCOV_TYPE_INTERLEAVED:
2033                 hci_req_init(&req, hdev);
2034
2035                 memset(&cp, 0, sizeof(cp));
2036                 memcpy(&cp.lap, lap, sizeof(cp.lap));
2037                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2038                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2039
2040                 hci_dev_lock(hdev);
2041
2042                 hci_inquiry_cache_flush(hdev);
2043
2044                 err = hci_req_run(&req, inquiry_complete);
2045                 if (err) {
2046                         BT_ERR("Inquiry request failed: err %d", err);
2047                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2048                 }
2049
2050                 hci_dev_unlock(hdev);
2051                 break;
2052         }
2053 }
2054
2055 static void le_scan_disable_work(struct work_struct *work)
2056 {
2057         struct hci_dev *hdev = container_of(work, struct hci_dev,
2058                                             le_scan_disable.work);
2059         struct hci_cp_le_set_scan_enable cp;
2060         struct hci_request req;
2061         int err;
2062
2063         BT_DBG("%s", hdev->name);
2064
2065         hci_req_init(&req, hdev);
2066
2067         memset(&cp, 0, sizeof(cp));
2068         cp.enable = LE_SCAN_DISABLE;
2069         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2070
2071         err = hci_req_run(&req, le_scan_disable_work_complete);
2072         if (err)
2073                 BT_ERR("Disable LE scanning request failed: err %d", err);
2074 }
2075
2076 /* Alloc HCI device */
2077 struct hci_dev *hci_alloc_dev(void)
2078 {
2079         struct hci_dev *hdev;
2080
2081         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2082         if (!hdev)
2083                 return NULL;
2084
2085         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2086         hdev->esco_type = (ESCO_HV1);
2087         hdev->link_mode = (HCI_LM_ACCEPT);
2088         hdev->io_capability = 0x03; /* No Input No Output */
2089         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2090         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2091
2092         hdev->sniff_max_interval = 800;
2093         hdev->sniff_min_interval = 80;
2094
2095         mutex_init(&hdev->lock);
2096         mutex_init(&hdev->req_lock);
2097
2098         INIT_LIST_HEAD(&hdev->mgmt_pending);
2099         INIT_LIST_HEAD(&hdev->blacklist);
2100         INIT_LIST_HEAD(&hdev->uuids);
2101         INIT_LIST_HEAD(&hdev->link_keys);
2102         INIT_LIST_HEAD(&hdev->long_term_keys);
2103         INIT_LIST_HEAD(&hdev->remote_oob_data);
2104         INIT_LIST_HEAD(&hdev->conn_hash.list);
2105
2106         INIT_WORK(&hdev->rx_work, hci_rx_work);
2107         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2108         INIT_WORK(&hdev->tx_work, hci_tx_work);
2109         INIT_WORK(&hdev->power_on, hci_power_on);
2110
2111         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2112         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2113         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2114
2115         skb_queue_head_init(&hdev->rx_q);
2116         skb_queue_head_init(&hdev->cmd_q);
2117         skb_queue_head_init(&hdev->raw_q);
2118
2119         init_waitqueue_head(&hdev->req_wait_q);
2120
2121         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2122
2123         hci_init_sysfs(hdev);
2124         discovery_init(hdev);
2125
2126         return hdev;
2127 }
2128 EXPORT_SYMBOL(hci_alloc_dev);
2129
2130 /* Free HCI device */
2131 void hci_free_dev(struct hci_dev *hdev)
2132 {
2133         /* will free via device release */
2134         put_device(&hdev->dev);
2135 }
2136 EXPORT_SYMBOL(hci_free_dev);
2137
2138 /* Register HCI device */
2139 int hci_register_dev(struct hci_dev *hdev)
2140 {
2141         int id, error;
2142
2143         if (!hdev->open || !hdev->close)
2144                 return -EINVAL;
2145
2146         /* Do not allow HCI_AMP devices to register at index 0,
2147          * so the index can be used as the AMP controller ID.
2148          */
2149         switch (hdev->dev_type) {
2150         case HCI_BREDR:
2151                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2152                 break;
2153         case HCI_AMP:
2154                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2155                 break;
2156         default:
2157                 return -EINVAL;
2158         }
2159
2160         if (id < 0)
2161                 return id;
2162
2163         sprintf(hdev->name, "hci%d", id);
2164         hdev->id = id;
2165
2166         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2167
2168         write_lock(&hci_dev_list_lock);
2169         list_add(&hdev->list, &hci_dev_list);
2170         write_unlock(&hci_dev_list_lock);
2171
2172         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2173                                           WQ_MEM_RECLAIM, 1, hdev->name);
2174         if (!hdev->workqueue) {
2175                 error = -ENOMEM;
2176                 goto err;
2177         }
2178
2179         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2180                                               WQ_MEM_RECLAIM, 1, hdev->name);
2181         if (!hdev->req_workqueue) {
2182                 destroy_workqueue(hdev->workqueue);
2183                 error = -ENOMEM;
2184                 goto err;
2185         }
2186
2187         error = hci_add_sysfs(hdev);
2188         if (error < 0)
2189                 goto err_wqueue;
2190
2191         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2192                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2193                                     hdev);
2194         if (hdev->rfkill) {
2195                 if (rfkill_register(hdev->rfkill) < 0) {
2196                         rfkill_destroy(hdev->rfkill);
2197                         hdev->rfkill = NULL;
2198                 }
2199         }
2200
2201         set_bit(HCI_SETUP, &hdev->dev_flags);
2202
2203         if (hdev->dev_type != HCI_AMP)
2204                 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2205
2206         hci_notify(hdev, HCI_DEV_REG);
2207         hci_dev_hold(hdev);
2208
2209         queue_work(hdev->req_workqueue, &hdev->power_on);
2210
2211         return id;
2212
2213 err_wqueue:
2214         destroy_workqueue(hdev->workqueue);
2215         destroy_workqueue(hdev->req_workqueue);
2216 err:
2217         ida_simple_remove(&hci_index_ida, hdev->id);
2218         write_lock(&hci_dev_list_lock);
2219         list_del(&hdev->list);
2220         write_unlock(&hci_dev_list_lock);
2221
2222         return error;
2223 }
2224 EXPORT_SYMBOL(hci_register_dev);
2225
2226 /* Unregister HCI device */
2227 void hci_unregister_dev(struct hci_dev *hdev)
2228 {
2229         int i, id;
2230
2231         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2232
2233         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2234
2235         id = hdev->id;
2236
2237         write_lock(&hci_dev_list_lock);
2238         list_del(&hdev->list);
2239         write_unlock(&hci_dev_list_lock);
2240
2241         hci_dev_do_close(hdev);
2242
2243         for (i = 0; i < NUM_REASSEMBLY; i++)
2244                 kfree_skb(hdev->reassembly[i]);
2245
2246         cancel_work_sync(&hdev->power_on);
2247
2248         if (!test_bit(HCI_INIT, &hdev->flags) &&
2249             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2250                 hci_dev_lock(hdev);
2251                 mgmt_index_removed(hdev);
2252                 hci_dev_unlock(hdev);
2253         }
2254
2255         /* mgmt_index_removed should take care of emptying the
2256          * pending list */
2257         BUG_ON(!list_empty(&hdev->mgmt_pending));
2258
2259         hci_notify(hdev, HCI_DEV_UNREG);
2260
2261         if (hdev->rfkill) {
2262                 rfkill_unregister(hdev->rfkill);
2263                 rfkill_destroy(hdev->rfkill);
2264         }
2265
2266         hci_del_sysfs(hdev);
2267
2268         destroy_workqueue(hdev->workqueue);
2269         destroy_workqueue(hdev->req_workqueue);
2270
2271         hci_dev_lock(hdev);
2272         hci_blacklist_clear(hdev);
2273         hci_uuids_clear(hdev);
2274         hci_link_keys_clear(hdev);
2275         hci_smp_ltks_clear(hdev);
2276         hci_remote_oob_data_clear(hdev);
2277         hci_dev_unlock(hdev);
2278
2279         hci_dev_put(hdev);
2280
2281         ida_simple_remove(&hci_index_ida, id);
2282 }
2283 EXPORT_SYMBOL(hci_unregister_dev);
2284
2285 /* Suspend HCI device */
2286 int hci_suspend_dev(struct hci_dev *hdev)
2287 {
2288         hci_notify(hdev, HCI_DEV_SUSPEND);
2289         return 0;
2290 }
2291 EXPORT_SYMBOL(hci_suspend_dev);
2292
2293 /* Resume HCI device */
2294 int hci_resume_dev(struct hci_dev *hdev)
2295 {
2296         hci_notify(hdev, HCI_DEV_RESUME);
2297         return 0;
2298 }
2299 EXPORT_SYMBOL(hci_resume_dev);
2300
2301 /* Receive frame from HCI drivers */
2302 int hci_recv_frame(struct sk_buff *skb)
2303 {
2304         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2305         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2306                       && !test_bit(HCI_INIT, &hdev->flags))) {
2307                 kfree_skb(skb);
2308                 return -ENXIO;
2309         }
2310
2311         /* Incoming skb */
2312         bt_cb(skb)->incoming = 1;
2313
2314         /* Time stamp */
2315         __net_timestamp(skb);
2316
2317         skb_queue_tail(&hdev->rx_q, skb);
2318         queue_work(hdev->workqueue, &hdev->rx_work);
2319
2320         return 0;
2321 }
2322 EXPORT_SYMBOL(hci_recv_frame);
2323
2324 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2325                           int count, __u8 index)
2326 {
2327         int len = 0;
2328         int hlen = 0;
2329         int remain = count;
2330         struct sk_buff *skb;
2331         struct bt_skb_cb *scb;
2332
2333         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2334             index >= NUM_REASSEMBLY)
2335                 return -EILSEQ;
2336
2337         skb = hdev->reassembly[index];
2338
2339         if (!skb) {
2340                 switch (type) {
2341                 case HCI_ACLDATA_PKT:
2342                         len = HCI_MAX_FRAME_SIZE;
2343                         hlen = HCI_ACL_HDR_SIZE;
2344                         break;
2345                 case HCI_EVENT_PKT:
2346                         len = HCI_MAX_EVENT_SIZE;
2347                         hlen = HCI_EVENT_HDR_SIZE;
2348                         break;
2349                 case HCI_SCODATA_PKT:
2350                         len = HCI_MAX_SCO_SIZE;
2351                         hlen = HCI_SCO_HDR_SIZE;
2352                         break;
2353                 }
2354
2355                 skb = bt_skb_alloc(len, GFP_ATOMIC);
2356                 if (!skb)
2357                         return -ENOMEM;
2358
2359                 scb = (void *) skb->cb;
2360                 scb->expect = hlen;
2361                 scb->pkt_type = type;
2362
2363                 skb->dev = (void *) hdev;
2364                 hdev->reassembly[index] = skb;
2365         }
2366
2367         while (count) {
2368                 scb = (void *) skb->cb;
2369                 len = min_t(uint, scb->expect, count);
2370
2371                 memcpy(skb_put(skb, len), data, len);
2372
2373                 count -= len;
2374                 data += len;
2375                 scb->expect -= len;
2376                 remain = count;
2377
2378                 switch (type) {
2379                 case HCI_EVENT_PKT:
2380                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2381                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2382                                 scb->expect = h->plen;
2383
2384                                 if (skb_tailroom(skb) < scb->expect) {
2385                                         kfree_skb(skb);
2386                                         hdev->reassembly[index] = NULL;
2387                                         return -ENOMEM;
2388                                 }
2389                         }
2390                         break;
2391
2392                 case HCI_ACLDATA_PKT:
2393                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2394                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2395                                 scb->expect = __le16_to_cpu(h->dlen);
2396
2397                                 if (skb_tailroom(skb) < scb->expect) {
2398                                         kfree_skb(skb);
2399                                         hdev->reassembly[index] = NULL;
2400                                         return -ENOMEM;
2401                                 }
2402                         }
2403                         break;
2404
2405                 case HCI_SCODATA_PKT:
2406                         if (skb->len == HCI_SCO_HDR_SIZE) {
2407                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2408                                 scb->expect = h->dlen;
2409
2410                                 if (skb_tailroom(skb) < scb->expect) {
2411                                         kfree_skb(skb);
2412                                         hdev->reassembly[index] = NULL;
2413                                         return -ENOMEM;
2414                                 }
2415                         }
2416                         break;
2417                 }
2418
2419                 if (scb->expect == 0) {
2420                         /* Complete frame */
2421
2422                         bt_cb(skb)->pkt_type = type;
2423                         hci_recv_frame(skb);
2424
2425                         hdev->reassembly[index] = NULL;
2426                         return remain;
2427                 }
2428         }
2429
2430         return remain;
2431 }
2432
2433 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2434 {
2435         int rem = 0;
2436
2437         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2438                 return -EILSEQ;
2439
2440         while (count) {
2441                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2442                 if (rem < 0)
2443                         return rem;
2444
2445                 data += (count - rem);
2446                 count = rem;
2447         }
2448
2449         return rem;
2450 }
2451 EXPORT_SYMBOL(hci_recv_fragment);
2452
2453 #define STREAM_REASSEMBLY 0
2454
2455 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2456 {
2457         int type;
2458         int rem = 0;
2459
2460         while (count) {
2461                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2462
2463                 if (!skb) {
2464                         struct { char type; } *pkt;
2465
2466                         /* Start of the frame */
2467                         pkt = data;
2468                         type = pkt->type;
2469
2470                         data++;
2471                         count--;
2472                 } else
2473                         type = bt_cb(skb)->pkt_type;
2474
2475                 rem = hci_reassembly(hdev, type, data, count,
2476                                      STREAM_REASSEMBLY);
2477                 if (rem < 0)
2478                         return rem;
2479
2480                 data += (count - rem);
2481                 count = rem;
2482         }
2483
2484         return rem;
2485 }
2486 EXPORT_SYMBOL(hci_recv_stream_fragment);
2487
2488 /* ---- Interface to upper protocols ---- */
2489
2490 int hci_register_cb(struct hci_cb *cb)
2491 {
2492         BT_DBG("%p name %s", cb, cb->name);
2493
2494         write_lock(&hci_cb_list_lock);
2495         list_add(&cb->list, &hci_cb_list);
2496         write_unlock(&hci_cb_list_lock);
2497
2498         return 0;
2499 }
2500 EXPORT_SYMBOL(hci_register_cb);
2501
2502 int hci_unregister_cb(struct hci_cb *cb)
2503 {
2504         BT_DBG("%p name %s", cb, cb->name);
2505
2506         write_lock(&hci_cb_list_lock);
2507         list_del(&cb->list);
2508         write_unlock(&hci_cb_list_lock);
2509
2510         return 0;
2511 }
2512 EXPORT_SYMBOL(hci_unregister_cb);
2513
2514 static int hci_send_frame(struct sk_buff *skb)
2515 {
2516         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2517
2518         if (!hdev) {
2519                 kfree_skb(skb);
2520                 return -ENODEV;
2521         }
2522
2523         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2524
2525         /* Time stamp */
2526         __net_timestamp(skb);
2527
2528         /* Send copy to monitor */
2529         hci_send_to_monitor(hdev, skb);
2530
2531         if (atomic_read(&hdev->promisc)) {
2532                 /* Send copy to the sockets */
2533                 hci_send_to_sock(hdev, skb);
2534         }
2535
2536         /* Get rid of skb owner, prior to sending to the driver. */
2537         skb_orphan(skb);
2538
2539         return hdev->send(skb);
2540 }
2541
2542 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2543 {
2544         skb_queue_head_init(&req->cmd_q);
2545         req->hdev = hdev;
2546         req->err = 0;
2547 }
2548
2549 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2550 {
2551         struct hci_dev *hdev = req->hdev;
2552         struct sk_buff *skb;
2553         unsigned long flags;
2554
2555         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2556
2557         /* If an error occured during request building, remove all HCI
2558          * commands queued on the HCI request queue.
2559          */
2560         if (req->err) {
2561                 skb_queue_purge(&req->cmd_q);
2562                 return req->err;
2563         }
2564
2565         /* Do not allow empty requests */
2566         if (skb_queue_empty(&req->cmd_q))
2567                 return -ENODATA;
2568
2569         skb = skb_peek_tail(&req->cmd_q);
2570         bt_cb(skb)->req.complete = complete;
2571
2572         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2573         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2574         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2575
2576         queue_work(hdev->workqueue, &hdev->cmd_work);
2577
2578         return 0;
2579 }
2580
2581 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2582                                        u32 plen, const void *param)
2583 {
2584         int len = HCI_COMMAND_HDR_SIZE + plen;
2585         struct hci_command_hdr *hdr;
2586         struct sk_buff *skb;
2587
2588         skb = bt_skb_alloc(len, GFP_ATOMIC);
2589         if (!skb)
2590                 return NULL;
2591
2592         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2593         hdr->opcode = cpu_to_le16(opcode);
2594         hdr->plen   = plen;
2595
2596         if (plen)
2597                 memcpy(skb_put(skb, plen), param, plen);
2598
2599         BT_DBG("skb len %d", skb->len);
2600
2601         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2602         skb->dev = (void *) hdev;
2603
2604         return skb;
2605 }
2606
2607 /* Send HCI command */
2608 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2609                  const void *param)
2610 {
2611         struct sk_buff *skb;
2612
2613         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2614
2615         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2616         if (!skb) {
2617                 BT_ERR("%s no memory for command", hdev->name);
2618                 return -ENOMEM;
2619         }
2620
2621         /* Stand-alone HCI commands must be flaged as
2622          * single-command requests.
2623          */
2624         bt_cb(skb)->req.start = true;
2625
2626         skb_queue_tail(&hdev->cmd_q, skb);
2627         queue_work(hdev->workqueue, &hdev->cmd_work);
2628
2629         return 0;
2630 }
2631
2632 /* Queue a command to an asynchronous HCI request */
2633 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2634                     const void *param, u8 event)
2635 {
2636         struct hci_dev *hdev = req->hdev;
2637         struct sk_buff *skb;
2638
2639         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2640
2641         /* If an error occured during request building, there is no point in
2642          * queueing the HCI command. We can simply return.
2643          */
2644         if (req->err)
2645                 return;
2646
2647         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2648         if (!skb) {
2649                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2650                        hdev->name, opcode);
2651                 req->err = -ENOMEM;
2652                 return;
2653         }
2654
2655         if (skb_queue_empty(&req->cmd_q))
2656                 bt_cb(skb)->req.start = true;
2657
2658         bt_cb(skb)->req.event = event;
2659
2660         skb_queue_tail(&req->cmd_q, skb);
2661 }
2662
2663 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2664                  const void *param)
2665 {
2666         hci_req_add_ev(req, opcode, plen, param, 0);
2667 }
2668
2669 /* Get data from the previously sent command */
2670 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2671 {
2672         struct hci_command_hdr *hdr;
2673
2674         if (!hdev->sent_cmd)
2675                 return NULL;
2676
2677         hdr = (void *) hdev->sent_cmd->data;
2678
2679         if (hdr->opcode != cpu_to_le16(opcode))
2680                 return NULL;
2681
2682         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2683
2684         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2685 }
2686
2687 /* Send ACL data */
2688 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2689 {
2690         struct hci_acl_hdr *hdr;
2691         int len = skb->len;
2692
2693         skb_push(skb, HCI_ACL_HDR_SIZE);
2694         skb_reset_transport_header(skb);
2695         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2696         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2697         hdr->dlen   = cpu_to_le16(len);
2698 }
2699
2700 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2701                           struct sk_buff *skb, __u16 flags)
2702 {
2703         struct hci_conn *conn = chan->conn;
2704         struct hci_dev *hdev = conn->hdev;
2705         struct sk_buff *list;
2706
2707         skb->len = skb_headlen(skb);
2708         skb->data_len = 0;
2709
2710         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2711
2712         switch (hdev->dev_type) {
2713         case HCI_BREDR:
2714                 hci_add_acl_hdr(skb, conn->handle, flags);
2715                 break;
2716         case HCI_AMP:
2717                 hci_add_acl_hdr(skb, chan->handle, flags);
2718                 break;
2719         default:
2720                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2721                 return;
2722         }
2723
2724         list = skb_shinfo(skb)->frag_list;
2725         if (!list) {
2726                 /* Non fragmented */
2727                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2728
2729                 skb_queue_tail(queue, skb);
2730         } else {
2731                 /* Fragmented */
2732                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2733
2734                 skb_shinfo(skb)->frag_list = NULL;
2735
2736                 /* Queue all fragments atomically */
2737                 spin_lock(&queue->lock);
2738
2739                 __skb_queue_tail(queue, skb);
2740
2741                 flags &= ~ACL_START;
2742                 flags |= ACL_CONT;
2743                 do {
2744                         skb = list; list = list->next;
2745
2746                         skb->dev = (void *) hdev;
2747                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2748                         hci_add_acl_hdr(skb, conn->handle, flags);
2749
2750                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2751
2752                         __skb_queue_tail(queue, skb);
2753                 } while (list);
2754
2755                 spin_unlock(&queue->lock);
2756         }
2757 }
2758
2759 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2760 {
2761         struct hci_dev *hdev = chan->conn->hdev;
2762
2763         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2764
2765         skb->dev = (void *) hdev;
2766
2767         hci_queue_acl(chan, &chan->data_q, skb, flags);
2768
2769         queue_work(hdev->workqueue, &hdev->tx_work);
2770 }
2771
2772 /* Send SCO data */
2773 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2774 {
2775         struct hci_dev *hdev = conn->hdev;
2776         struct hci_sco_hdr hdr;
2777
2778         BT_DBG("%s len %d", hdev->name, skb->len);
2779
2780         hdr.handle = cpu_to_le16(conn->handle);
2781         hdr.dlen   = skb->len;
2782
2783         skb_push(skb, HCI_SCO_HDR_SIZE);
2784         skb_reset_transport_header(skb);
2785         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2786
2787         skb->dev = (void *) hdev;
2788         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2789
2790         skb_queue_tail(&conn->data_q, skb);
2791         queue_work(hdev->workqueue, &hdev->tx_work);
2792 }
2793
2794 /* ---- HCI TX task (outgoing data) ---- */
2795
2796 /* HCI Connection scheduler */
2797 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2798                                      int *quote)
2799 {
2800         struct hci_conn_hash *h = &hdev->conn_hash;
2801         struct hci_conn *conn = NULL, *c;
2802         unsigned int num = 0, min = ~0;
2803
2804         /* We don't have to lock device here. Connections are always
2805          * added and removed with TX task disabled. */
2806
2807         rcu_read_lock();
2808
2809         list_for_each_entry_rcu(c, &h->list, list) {
2810                 if (c->type != type || skb_queue_empty(&c->data_q))
2811                         continue;
2812
2813                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2814                         continue;
2815
2816                 num++;
2817
2818                 if (c->sent < min) {
2819                         min  = c->sent;
2820                         conn = c;
2821                 }
2822
2823                 if (hci_conn_num(hdev, type) == num)
2824                         break;
2825         }
2826
2827         rcu_read_unlock();
2828
2829         if (conn) {
2830                 int cnt, q;
2831
2832                 switch (conn->type) {
2833                 case ACL_LINK:
2834                         cnt = hdev->acl_cnt;
2835                         break;
2836                 case SCO_LINK:
2837                 case ESCO_LINK:
2838                         cnt = hdev->sco_cnt;
2839                         break;
2840                 case LE_LINK:
2841                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2842                         break;
2843                 default:
2844                         cnt = 0;
2845                         BT_ERR("Unknown link type");
2846                 }
2847
2848                 q = cnt / num;
2849                 *quote = q ? q : 1;
2850         } else
2851                 *quote = 0;
2852
2853         BT_DBG("conn %p quote %d", conn, *quote);
2854         return conn;
2855 }
2856
2857 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2858 {
2859         struct hci_conn_hash *h = &hdev->conn_hash;
2860         struct hci_conn *c;
2861
2862         BT_ERR("%s link tx timeout", hdev->name);
2863
2864         rcu_read_lock();
2865
2866         /* Kill stalled connections */
2867         list_for_each_entry_rcu(c, &h->list, list) {
2868                 if (c->type == type && c->sent) {
2869                         BT_ERR("%s killing stalled connection %pMR",
2870                                hdev->name, &c->dst);
2871                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2872                 }
2873         }
2874
2875         rcu_read_unlock();
2876 }
2877
2878 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2879                                       int *quote)
2880 {
2881         struct hci_conn_hash *h = &hdev->conn_hash;
2882         struct hci_chan *chan = NULL;
2883         unsigned int num = 0, min = ~0, cur_prio = 0;
2884         struct hci_conn *conn;
2885         int cnt, q, conn_num = 0;
2886
2887         BT_DBG("%s", hdev->name);
2888
2889         rcu_read_lock();
2890
2891         list_for_each_entry_rcu(conn, &h->list, list) {
2892                 struct hci_chan *tmp;
2893
2894                 if (conn->type != type)
2895                         continue;
2896
2897                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2898                         continue;
2899
2900                 conn_num++;
2901
2902                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2903                         struct sk_buff *skb;
2904
2905                         if (skb_queue_empty(&tmp->data_q))
2906                                 continue;
2907
2908                         skb = skb_peek(&tmp->data_q);
2909                         if (skb->priority < cur_prio)
2910                                 continue;
2911
2912                         if (skb->priority > cur_prio) {
2913                                 num = 0;
2914                                 min = ~0;
2915                                 cur_prio = skb->priority;
2916                         }
2917
2918                         num++;
2919
2920                         if (conn->sent < min) {
2921                                 min  = conn->sent;
2922                                 chan = tmp;
2923                         }
2924                 }
2925
2926                 if (hci_conn_num(hdev, type) == conn_num)
2927                         break;
2928         }
2929
2930         rcu_read_unlock();
2931
2932         if (!chan)
2933                 return NULL;
2934
2935         switch (chan->conn->type) {
2936         case ACL_LINK:
2937                 cnt = hdev->acl_cnt;
2938                 break;
2939         case AMP_LINK:
2940                 cnt = hdev->block_cnt;
2941                 break;
2942         case SCO_LINK:
2943         case ESCO_LINK:
2944                 cnt = hdev->sco_cnt;
2945                 break;
2946         case LE_LINK:
2947                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2948                 break;
2949         default:
2950                 cnt = 0;
2951                 BT_ERR("Unknown link type");
2952         }
2953
2954         q = cnt / num;
2955         *quote = q ? q : 1;
2956         BT_DBG("chan %p quote %d", chan, *quote);
2957         return chan;
2958 }
2959
2960 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2961 {
2962         struct hci_conn_hash *h = &hdev->conn_hash;
2963         struct hci_conn *conn;
2964         int num = 0;
2965
2966         BT_DBG("%s", hdev->name);
2967
2968         rcu_read_lock();
2969
2970         list_for_each_entry_rcu(conn, &h->list, list) {
2971                 struct hci_chan *chan;
2972
2973                 if (conn->type != type)
2974                         continue;
2975
2976                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2977                         continue;
2978
2979                 num++;
2980
2981                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2982                         struct sk_buff *skb;
2983
2984                         if (chan->sent) {
2985                                 chan->sent = 0;
2986                                 continue;
2987                         }
2988
2989                         if (skb_queue_empty(&chan->data_q))
2990                                 continue;
2991
2992                         skb = skb_peek(&chan->data_q);
2993                         if (skb->priority >= HCI_PRIO_MAX - 1)
2994                                 continue;
2995
2996                         skb->priority = HCI_PRIO_MAX - 1;
2997
2998                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2999                                skb->priority);
3000                 }
3001
3002                 if (hci_conn_num(hdev, type) == num)
3003                         break;
3004         }
3005
3006         rcu_read_unlock();
3007
3008 }
3009
3010 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3011 {
3012         /* Calculate count of blocks used by this packet */
3013         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3014 }
3015
3016 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3017 {
3018         if (!test_bit(HCI_RAW, &hdev->flags)) {
3019                 /* ACL tx timeout must be longer than maximum
3020                  * link supervision timeout (40.9 seconds) */
3021                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3022                                        HCI_ACL_TX_TIMEOUT))
3023                         hci_link_tx_to(hdev, ACL_LINK);
3024         }
3025 }
3026
3027 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3028 {
3029         unsigned int cnt = hdev->acl_cnt;
3030         struct hci_chan *chan;
3031         struct sk_buff *skb;
3032         int quote;
3033
3034         __check_timeout(hdev, cnt);
3035
3036         while (hdev->acl_cnt &&
3037                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3038                 u32 priority = (skb_peek(&chan->data_q))->priority;
3039                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3040                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3041                                skb->len, skb->priority);
3042
3043                         /* Stop if priority has changed */
3044                         if (skb->priority < priority)
3045                                 break;
3046
3047                         skb = skb_dequeue(&chan->data_q);
3048
3049                         hci_conn_enter_active_mode(chan->conn,
3050                                                    bt_cb(skb)->force_active);
3051
3052                         hci_send_frame(skb);
3053                         hdev->acl_last_tx = jiffies;
3054
3055                         hdev->acl_cnt--;
3056                         chan->sent++;
3057                         chan->conn->sent++;
3058                 }
3059         }
3060
3061         if (cnt != hdev->acl_cnt)
3062                 hci_prio_recalculate(hdev, ACL_LINK);
3063 }
3064
3065 static void hci_sched_acl_blk(struct hci_dev *hdev)
3066 {
3067         unsigned int cnt = hdev->block_cnt;
3068         struct hci_chan *chan;
3069         struct sk_buff *skb;
3070         int quote;
3071         u8 type;
3072
3073         __check_timeout(hdev, cnt);
3074
3075         BT_DBG("%s", hdev->name);
3076
3077         if (hdev->dev_type == HCI_AMP)
3078                 type = AMP_LINK;
3079         else
3080                 type = ACL_LINK;
3081
3082         while (hdev->block_cnt > 0 &&
3083                (chan = hci_chan_sent(hdev, type, &quote))) {
3084                 u32 priority = (skb_peek(&chan->data_q))->priority;
3085                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3086                         int blocks;
3087
3088                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3089                                skb->len, skb->priority);
3090
3091                         /* Stop if priority has changed */
3092                         if (skb->priority < priority)
3093                                 break;
3094
3095                         skb = skb_dequeue(&chan->data_q);
3096
3097                         blocks = __get_blocks(hdev, skb);
3098                         if (blocks > hdev->block_cnt)
3099                                 return;
3100
3101                         hci_conn_enter_active_mode(chan->conn,
3102                                                    bt_cb(skb)->force_active);
3103
3104                         hci_send_frame(skb);
3105                         hdev->acl_last_tx = jiffies;
3106
3107                         hdev->block_cnt -= blocks;
3108                         quote -= blocks;
3109
3110                         chan->sent += blocks;
3111                         chan->conn->sent += blocks;
3112                 }
3113         }
3114
3115         if (cnt != hdev->block_cnt)
3116                 hci_prio_recalculate(hdev, type);
3117 }
3118
3119 static void hci_sched_acl(struct hci_dev *hdev)
3120 {
3121         BT_DBG("%s", hdev->name);
3122
3123         /* No ACL link over BR/EDR controller */
3124         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3125                 return;
3126
3127         /* No AMP link over AMP controller */
3128         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3129                 return;
3130
3131         switch (hdev->flow_ctl_mode) {
3132         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3133                 hci_sched_acl_pkt(hdev);
3134                 break;
3135
3136         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3137                 hci_sched_acl_blk(hdev);
3138                 break;
3139         }
3140 }
3141
3142 /* Schedule SCO */
3143 static void hci_sched_sco(struct hci_dev *hdev)
3144 {
3145         struct hci_conn *conn;
3146         struct sk_buff *skb;
3147         int quote;
3148
3149         BT_DBG("%s", hdev->name);
3150
3151         if (!hci_conn_num(hdev, SCO_LINK))
3152                 return;
3153
3154         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3155                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3156                         BT_DBG("skb %p len %d", skb, skb->len);
3157                         hci_send_frame(skb);
3158
3159                         conn->sent++;
3160                         if (conn->sent == ~0)
3161                                 conn->sent = 0;
3162                 }
3163         }
3164 }
3165
3166 static void hci_sched_esco(struct hci_dev *hdev)
3167 {
3168         struct hci_conn *conn;
3169         struct sk_buff *skb;
3170         int quote;
3171
3172         BT_DBG("%s", hdev->name);
3173
3174         if (!hci_conn_num(hdev, ESCO_LINK))
3175                 return;
3176
3177         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3178                                                      &quote))) {
3179                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3180                         BT_DBG("skb %p len %d", skb, skb->len);
3181                         hci_send_frame(skb);
3182
3183                         conn->sent++;
3184                         if (conn->sent == ~0)
3185                                 conn->sent = 0;
3186                 }
3187         }
3188 }
3189
3190 static void hci_sched_le(struct hci_dev *hdev)
3191 {
3192         struct hci_chan *chan;
3193         struct sk_buff *skb;
3194         int quote, cnt, tmp;
3195
3196         BT_DBG("%s", hdev->name);
3197
3198         if (!hci_conn_num(hdev, LE_LINK))
3199                 return;
3200
3201         if (!test_bit(HCI_RAW, &hdev->flags)) {
3202                 /* LE tx timeout must be longer than maximum
3203                  * link supervision timeout (40.9 seconds) */
3204                 if (!hdev->le_cnt && hdev->le_pkts &&
3205                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3206                         hci_link_tx_to(hdev, LE_LINK);
3207         }
3208
3209         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3210         tmp = cnt;
3211         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3212                 u32 priority = (skb_peek(&chan->data_q))->priority;
3213                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3214                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3215                                skb->len, skb->priority);
3216
3217                         /* Stop if priority has changed */
3218                         if (skb->priority < priority)
3219                                 break;
3220
3221                         skb = skb_dequeue(&chan->data_q);
3222
3223                         hci_send_frame(skb);
3224                         hdev->le_last_tx = jiffies;
3225
3226                         cnt--;
3227                         chan->sent++;
3228                         chan->conn->sent++;
3229                 }
3230         }
3231
3232         if (hdev->le_pkts)
3233                 hdev->le_cnt = cnt;
3234         else
3235                 hdev->acl_cnt = cnt;
3236
3237         if (cnt != tmp)
3238                 hci_prio_recalculate(hdev, LE_LINK);
3239 }
3240
3241 static void hci_tx_work(struct work_struct *work)
3242 {
3243         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3244         struct sk_buff *skb;
3245
3246         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3247                hdev->sco_cnt, hdev->le_cnt);
3248
3249         /* Schedule queues and send stuff to HCI driver */
3250
3251         hci_sched_acl(hdev);
3252
3253         hci_sched_sco(hdev);
3254
3255         hci_sched_esco(hdev);
3256
3257         hci_sched_le(hdev);
3258
3259         /* Send next queued raw (unknown type) packet */
3260         while ((skb = skb_dequeue(&hdev->raw_q)))
3261                 hci_send_frame(skb);
3262 }
3263
3264 /* ----- HCI RX task (incoming data processing) ----- */
3265
3266 /* ACL data packet */
3267 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3268 {
3269         struct hci_acl_hdr *hdr = (void *) skb->data;
3270         struct hci_conn *conn;
3271         __u16 handle, flags;
3272
3273         skb_pull(skb, HCI_ACL_HDR_SIZE);
3274
3275         handle = __le16_to_cpu(hdr->handle);
3276         flags  = hci_flags(handle);
3277         handle = hci_handle(handle);
3278
3279         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3280                handle, flags);
3281
3282         hdev->stat.acl_rx++;
3283
3284         hci_dev_lock(hdev);
3285         conn = hci_conn_hash_lookup_handle(hdev, handle);
3286         hci_dev_unlock(hdev);
3287
3288         if (conn) {
3289                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3290
3291                 /* Send to upper protocol */
3292                 l2cap_recv_acldata(conn, skb, flags);
3293                 return;
3294         } else {
3295                 BT_ERR("%s ACL packet for unknown connection handle %d",
3296                        hdev->name, handle);
3297         }
3298
3299         kfree_skb(skb);
3300 }
3301
3302 /* SCO data packet */
3303 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3304 {
3305         struct hci_sco_hdr *hdr = (void *) skb->data;
3306         struct hci_conn *conn;
3307         __u16 handle;
3308
3309         skb_pull(skb, HCI_SCO_HDR_SIZE);
3310
3311         handle = __le16_to_cpu(hdr->handle);
3312
3313         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3314
3315         hdev->stat.sco_rx++;
3316
3317         hci_dev_lock(hdev);
3318         conn = hci_conn_hash_lookup_handle(hdev, handle);
3319         hci_dev_unlock(hdev);
3320
3321         if (conn) {
3322                 /* Send to upper protocol */
3323                 sco_recv_scodata(conn, skb);
3324                 return;
3325         } else {
3326                 BT_ERR("%s SCO packet for unknown connection handle %d",
3327                        hdev->name, handle);
3328         }
3329
3330         kfree_skb(skb);
3331 }
3332
3333 static bool hci_req_is_complete(struct hci_dev *hdev)
3334 {
3335         struct sk_buff *skb;
3336
3337         skb = skb_peek(&hdev->cmd_q);
3338         if (!skb)
3339                 return true;
3340
3341         return bt_cb(skb)->req.start;
3342 }
3343
3344 static void hci_resend_last(struct hci_dev *hdev)
3345 {
3346         struct hci_command_hdr *sent;
3347         struct sk_buff *skb;
3348         u16 opcode;
3349
3350         if (!hdev->sent_cmd)
3351                 return;
3352
3353         sent = (void *) hdev->sent_cmd->data;
3354         opcode = __le16_to_cpu(sent->opcode);
3355         if (opcode == HCI_OP_RESET)
3356                 return;
3357
3358         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3359         if (!skb)
3360                 return;
3361
3362         skb_queue_head(&hdev->cmd_q, skb);
3363         queue_work(hdev->workqueue, &hdev->cmd_work);
3364 }
3365
3366 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3367 {
3368         hci_req_complete_t req_complete = NULL;
3369         struct sk_buff *skb;
3370         unsigned long flags;
3371
3372         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3373
3374         /* If the completed command doesn't match the last one that was
3375          * sent we need to do special handling of it.
3376          */
3377         if (!hci_sent_cmd_data(hdev, opcode)) {
3378                 /* Some CSR based controllers generate a spontaneous
3379                  * reset complete event during init and any pending
3380                  * command will never be completed. In such a case we
3381                  * need to resend whatever was the last sent
3382                  * command.
3383                  */
3384                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3385                         hci_resend_last(hdev);
3386
3387                 return;
3388         }
3389
3390         /* If the command succeeded and there's still more commands in
3391          * this request the request is not yet complete.
3392          */
3393         if (!status && !hci_req_is_complete(hdev))
3394                 return;
3395
3396         /* If this was the last command in a request the complete
3397          * callback would be found in hdev->sent_cmd instead of the
3398          * command queue (hdev->cmd_q).
3399          */
3400         if (hdev->sent_cmd) {
3401                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3402                 if (req_complete)
3403                         goto call_complete;
3404         }
3405
3406         /* Remove all pending commands belonging to this request */
3407         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3408         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3409                 if (bt_cb(skb)->req.start) {
3410                         __skb_queue_head(&hdev->cmd_q, skb);
3411                         break;
3412                 }
3413
3414                 req_complete = bt_cb(skb)->req.complete;
3415                 kfree_skb(skb);
3416         }
3417         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3418
3419 call_complete:
3420         if (req_complete)
3421                 req_complete(hdev, status);
3422 }
3423
3424 static void hci_rx_work(struct work_struct *work)
3425 {
3426         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3427         struct sk_buff *skb;
3428
3429         BT_DBG("%s", hdev->name);
3430
3431         while ((skb = skb_dequeue(&hdev->rx_q))) {
3432                 /* Send copy to monitor */
3433                 hci_send_to_monitor(hdev, skb);
3434
3435                 if (atomic_read(&hdev->promisc)) {
3436                         /* Send copy to the sockets */
3437                         hci_send_to_sock(hdev, skb);
3438                 }
3439
3440                 if (test_bit(HCI_RAW, &hdev->flags)) {
3441                         kfree_skb(skb);
3442                         continue;
3443                 }
3444
3445                 if (test_bit(HCI_INIT, &hdev->flags)) {
3446                         /* Don't process data packets in this states. */
3447                         switch (bt_cb(skb)->pkt_type) {
3448                         case HCI_ACLDATA_PKT:
3449                         case HCI_SCODATA_PKT:
3450                                 kfree_skb(skb);
3451                                 continue;
3452                         }
3453                 }
3454
3455                 /* Process frame */
3456                 switch (bt_cb(skb)->pkt_type) {
3457                 case HCI_EVENT_PKT:
3458                         BT_DBG("%s Event packet", hdev->name);
3459                         hci_event_packet(hdev, skb);
3460                         break;
3461
3462                 case HCI_ACLDATA_PKT:
3463                         BT_DBG("%s ACL data packet", hdev->name);
3464                         hci_acldata_packet(hdev, skb);
3465                         break;
3466
3467                 case HCI_SCODATA_PKT:
3468                         BT_DBG("%s SCO data packet", hdev->name);
3469                         hci_scodata_packet(hdev, skb);
3470                         break;
3471
3472                 default:
3473                         kfree_skb(skb);
3474                         break;
3475                 }
3476         }
3477 }
3478
3479 static void hci_cmd_work(struct work_struct *work)
3480 {
3481         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3482         struct sk_buff *skb;
3483
3484         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3485                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3486
3487         /* Send queued commands */
3488         if (atomic_read(&hdev->cmd_cnt)) {
3489                 skb = skb_dequeue(&hdev->cmd_q);
3490                 if (!skb)
3491                         return;
3492
3493                 kfree_skb(hdev->sent_cmd);
3494
3495                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3496                 if (hdev->sent_cmd) {
3497                         atomic_dec(&hdev->cmd_cnt);
3498                         hci_send_frame(skb);
3499                         if (test_bit(HCI_RESET, &hdev->flags))
3500                                 del_timer(&hdev->cmd_timer);
3501                         else
3502                                 mod_timer(&hdev->cmd_timer,
3503                                           jiffies + HCI_CMD_TIMEOUT);
3504                 } else {
3505                         skb_queue_head(&hdev->cmd_q, skb);
3506                         queue_work(hdev->workqueue, &hdev->cmd_work);
3507                 }
3508         }
3509 }
3510
3511 u8 bdaddr_to_le(u8 bdaddr_type)
3512 {
3513         switch (bdaddr_type) {
3514         case BDADDR_LE_PUBLIC:
3515                 return ADDR_LE_DEV_PUBLIC;
3516
3517         default:
3518                 /* Fallback to LE Random address type */
3519                 return ADDR_LE_DEV_RANDOM;
3520         }
3521 }