Bluetooth: Only schedule raw queue when user channel is active
[cascardo/linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55         hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI requests ---- */
59
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
61 {
62         BT_DBG("%s result 0x%2.2x", hdev->name, result);
63
64         if (hdev->req_status == HCI_REQ_PEND) {
65                 hdev->req_result = result;
66                 hdev->req_status = HCI_REQ_DONE;
67                 wake_up_interruptible(&hdev->req_wait_q);
68         }
69 }
70
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
72 {
73         BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75         if (hdev->req_status == HCI_REQ_PEND) {
76                 hdev->req_result = err;
77                 hdev->req_status = HCI_REQ_CANCELED;
78                 wake_up_interruptible(&hdev->req_wait_q);
79         }
80 }
81
82 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83                                             u8 event)
84 {
85         struct hci_ev_cmd_complete *ev;
86         struct hci_event_hdr *hdr;
87         struct sk_buff *skb;
88
89         hci_dev_lock(hdev);
90
91         skb = hdev->recv_evt;
92         hdev->recv_evt = NULL;
93
94         hci_dev_unlock(hdev);
95
96         if (!skb)
97                 return ERR_PTR(-ENODATA);
98
99         if (skb->len < sizeof(*hdr)) {
100                 BT_ERR("Too short HCI event");
101                 goto failed;
102         }
103
104         hdr = (void *) skb->data;
105         skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
107         if (event) {
108                 if (hdr->evt != event)
109                         goto failed;
110                 return skb;
111         }
112
113         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115                 goto failed;
116         }
117
118         if (skb->len < sizeof(*ev)) {
119                 BT_ERR("Too short cmd_complete event");
120                 goto failed;
121         }
122
123         ev = (void *) skb->data;
124         skb_pull(skb, sizeof(*ev));
125
126         if (opcode == __le16_to_cpu(ev->opcode))
127                 return skb;
128
129         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130                __le16_to_cpu(ev->opcode));
131
132 failed:
133         kfree_skb(skb);
134         return ERR_PTR(-ENODATA);
135 }
136
137 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
138                                   const void *param, u8 event, u32 timeout)
139 {
140         DECLARE_WAITQUEUE(wait, current);
141         struct hci_request req;
142         int err = 0;
143
144         BT_DBG("%s", hdev->name);
145
146         hci_req_init(&req, hdev);
147
148         hci_req_add_ev(&req, opcode, plen, param, event);
149
150         hdev->req_status = HCI_REQ_PEND;
151
152         err = hci_req_run(&req, hci_req_sync_complete);
153         if (err < 0)
154                 return ERR_PTR(err);
155
156         add_wait_queue(&hdev->req_wait_q, &wait);
157         set_current_state(TASK_INTERRUPTIBLE);
158
159         schedule_timeout(timeout);
160
161         remove_wait_queue(&hdev->req_wait_q, &wait);
162
163         if (signal_pending(current))
164                 return ERR_PTR(-EINTR);
165
166         switch (hdev->req_status) {
167         case HCI_REQ_DONE:
168                 err = -bt_to_errno(hdev->req_result);
169                 break;
170
171         case HCI_REQ_CANCELED:
172                 err = -hdev->req_result;
173                 break;
174
175         default:
176                 err = -ETIMEDOUT;
177                 break;
178         }
179
180         hdev->req_status = hdev->req_result = 0;
181
182         BT_DBG("%s end: err %d", hdev->name, err);
183
184         if (err < 0)
185                 return ERR_PTR(err);
186
187         return hci_get_cmd_complete(hdev, opcode, event);
188 }
189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
192                                const void *param, u32 timeout)
193 {
194         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
195 }
196 EXPORT_SYMBOL(__hci_cmd_sync);
197
198 /* Execute request and wait for completion. */
199 static int __hci_req_sync(struct hci_dev *hdev,
200                           void (*func)(struct hci_request *req,
201                                       unsigned long opt),
202                           unsigned long opt, __u32 timeout)
203 {
204         struct hci_request req;
205         DECLARE_WAITQUEUE(wait, current);
206         int err = 0;
207
208         BT_DBG("%s start", hdev->name);
209
210         hci_req_init(&req, hdev);
211
212         hdev->req_status = HCI_REQ_PEND;
213
214         func(&req, opt);
215
216         err = hci_req_run(&req, hci_req_sync_complete);
217         if (err < 0) {
218                 hdev->req_status = 0;
219
220                 /* ENODATA means the HCI request command queue is empty.
221                  * This can happen when a request with conditionals doesn't
222                  * trigger any commands to be sent. This is normal behavior
223                  * and should not trigger an error return.
224                  */
225                 if (err == -ENODATA)
226                         return 0;
227
228                 return err;
229         }
230
231         add_wait_queue(&hdev->req_wait_q, &wait);
232         set_current_state(TASK_INTERRUPTIBLE);
233
234         schedule_timeout(timeout);
235
236         remove_wait_queue(&hdev->req_wait_q, &wait);
237
238         if (signal_pending(current))
239                 return -EINTR;
240
241         switch (hdev->req_status) {
242         case HCI_REQ_DONE:
243                 err = -bt_to_errno(hdev->req_result);
244                 break;
245
246         case HCI_REQ_CANCELED:
247                 err = -hdev->req_result;
248                 break;
249
250         default:
251                 err = -ETIMEDOUT;
252                 break;
253         }
254
255         hdev->req_status = hdev->req_result = 0;
256
257         BT_DBG("%s end: err %d", hdev->name, err);
258
259         return err;
260 }
261
262 static int hci_req_sync(struct hci_dev *hdev,
263                         void (*req)(struct hci_request *req,
264                                     unsigned long opt),
265                         unsigned long opt, __u32 timeout)
266 {
267         int ret;
268
269         if (!test_bit(HCI_UP, &hdev->flags))
270                 return -ENETDOWN;
271
272         /* Serialize all requests */
273         hci_req_lock(hdev);
274         ret = __hci_req_sync(hdev, req, opt, timeout);
275         hci_req_unlock(hdev);
276
277         return ret;
278 }
279
280 static void hci_reset_req(struct hci_request *req, unsigned long opt)
281 {
282         BT_DBG("%s %ld", req->hdev->name, opt);
283
284         /* Reset device */
285         set_bit(HCI_RESET, &req->hdev->flags);
286         hci_req_add(req, HCI_OP_RESET, 0, NULL);
287 }
288
289 static void bredr_init(struct hci_request *req)
290 {
291         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
292
293         /* Read Local Supported Features */
294         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
295
296         /* Read Local Version */
297         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
298
299         /* Read BD Address */
300         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
301 }
302
303 static void amp_init(struct hci_request *req)
304 {
305         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
306
307         /* Read Local Version */
308         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
309
310         /* Read Local AMP Info */
311         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
312
313         /* Read Data Blk size */
314         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
315 }
316
317 static void hci_init1_req(struct hci_request *req, unsigned long opt)
318 {
319         struct hci_dev *hdev = req->hdev;
320
321         BT_DBG("%s %ld", hdev->name, opt);
322
323         /* Reset */
324         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
325                 hci_reset_req(req, 0);
326
327         switch (hdev->dev_type) {
328         case HCI_BREDR:
329                 bredr_init(req);
330                 break;
331
332         case HCI_AMP:
333                 amp_init(req);
334                 break;
335
336         default:
337                 BT_ERR("Unknown device type %d", hdev->dev_type);
338                 break;
339         }
340 }
341
342 static void bredr_setup(struct hci_request *req)
343 {
344         __le16 param;
345         __u8 flt_type;
346
347         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
348         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
349
350         /* Read Class of Device */
351         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
352
353         /* Read Local Name */
354         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
355
356         /* Read Voice Setting */
357         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
358
359         /* Clear Event Filters */
360         flt_type = HCI_FLT_CLEAR_ALL;
361         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
362
363         /* Connection accept timeout ~20 secs */
364         param = __constant_cpu_to_le16(0x7d00);
365         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
366
367         /* Read page scan parameters */
368         if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371         }
372 }
373
374 static void le_setup(struct hci_request *req)
375 {
376         struct hci_dev *hdev = req->hdev;
377
378         /* Read LE Buffer Size */
379         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
380
381         /* Read LE Local Supported Features */
382         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
383
384         /* Read LE Advertising Channel TX Power */
385         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
386
387         /* Read LE White List Size */
388         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
389
390         /* Read LE Supported States */
391         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
392
393         /* LE-only controllers have LE implicitly enabled */
394         if (!lmp_bredr_capable(hdev))
395                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
396 }
397
398 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399 {
400         if (lmp_ext_inq_capable(hdev))
401                 return 0x02;
402
403         if (lmp_inq_rssi_capable(hdev))
404                 return 0x01;
405
406         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407             hdev->lmp_subver == 0x0757)
408                 return 0x01;
409
410         if (hdev->manufacturer == 15) {
411                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412                         return 0x01;
413                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414                         return 0x01;
415                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416                         return 0x01;
417         }
418
419         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420             hdev->lmp_subver == 0x1805)
421                 return 0x01;
422
423         return 0x00;
424 }
425
426 static void hci_setup_inquiry_mode(struct hci_request *req)
427 {
428         u8 mode;
429
430         mode = hci_get_inquiry_mode(req->hdev);
431
432         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
433 }
434
435 static void hci_setup_event_mask(struct hci_request *req)
436 {
437         struct hci_dev *hdev = req->hdev;
438
439         /* The second byte is 0xff instead of 0x9f (two reserved bits
440          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441          * command otherwise.
442          */
443         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446          * any event mask for pre 1.2 devices.
447          */
448         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449                 return;
450
451         if (lmp_bredr_capable(hdev)) {
452                 events[4] |= 0x01; /* Flow Specification Complete */
453                 events[4] |= 0x02; /* Inquiry Result with RSSI */
454                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455                 events[5] |= 0x08; /* Synchronous Connection Complete */
456                 events[5] |= 0x10; /* Synchronous Connection Changed */
457         } else {
458                 /* Use a different default for LE-only devices */
459                 memset(events, 0, sizeof(events));
460                 events[0] |= 0x10; /* Disconnection Complete */
461                 events[0] |= 0x80; /* Encryption Change */
462                 events[1] |= 0x08; /* Read Remote Version Information Complete */
463                 events[1] |= 0x20; /* Command Complete */
464                 events[1] |= 0x40; /* Command Status */
465                 events[1] |= 0x80; /* Hardware Error */
466                 events[2] |= 0x04; /* Number of Completed Packets */
467                 events[3] |= 0x02; /* Data Buffer Overflow */
468                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
469         }
470
471         if (lmp_inq_rssi_capable(hdev))
472                 events[4] |= 0x02; /* Inquiry Result with RSSI */
473
474         if (lmp_sniffsubr_capable(hdev))
475                 events[5] |= 0x20; /* Sniff Subrating */
476
477         if (lmp_pause_enc_capable(hdev))
478                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
479
480         if (lmp_ext_inq_capable(hdev))
481                 events[5] |= 0x40; /* Extended Inquiry Result */
482
483         if (lmp_no_flush_capable(hdev))
484                 events[7] |= 0x01; /* Enhanced Flush Complete */
485
486         if (lmp_lsto_capable(hdev))
487                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
488
489         if (lmp_ssp_capable(hdev)) {
490                 events[6] |= 0x01;      /* IO Capability Request */
491                 events[6] |= 0x02;      /* IO Capability Response */
492                 events[6] |= 0x04;      /* User Confirmation Request */
493                 events[6] |= 0x08;      /* User Passkey Request */
494                 events[6] |= 0x10;      /* Remote OOB Data Request */
495                 events[6] |= 0x20;      /* Simple Pairing Complete */
496                 events[7] |= 0x04;      /* User Passkey Notification */
497                 events[7] |= 0x08;      /* Keypress Notification */
498                 events[7] |= 0x10;      /* Remote Host Supported
499                                          * Features Notification
500                                          */
501         }
502
503         if (lmp_le_capable(hdev))
504                 events[7] |= 0x20;      /* LE Meta-Event */
505
506         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
507
508         if (lmp_le_capable(hdev)) {
509                 memset(events, 0, sizeof(events));
510                 events[0] = 0x1f;
511                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
512                             sizeof(events), events);
513         }
514 }
515
516 static void hci_init2_req(struct hci_request *req, unsigned long opt)
517 {
518         struct hci_dev *hdev = req->hdev;
519
520         if (lmp_bredr_capable(hdev))
521                 bredr_setup(req);
522
523         if (lmp_le_capable(hdev))
524                 le_setup(req);
525
526         hci_setup_event_mask(req);
527
528         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
529          * local supported commands HCI command.
530          */
531         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
532                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
533
534         if (lmp_ssp_capable(hdev)) {
535                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
536                         u8 mode = 0x01;
537                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
538                                     sizeof(mode), &mode);
539                 } else {
540                         struct hci_cp_write_eir cp;
541
542                         memset(hdev->eir, 0, sizeof(hdev->eir));
543                         memset(&cp, 0, sizeof(cp));
544
545                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
546                 }
547         }
548
549         if (lmp_inq_rssi_capable(hdev))
550                 hci_setup_inquiry_mode(req);
551
552         if (lmp_inq_tx_pwr_capable(hdev))
553                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
554
555         if (lmp_ext_feat_capable(hdev)) {
556                 struct hci_cp_read_local_ext_features cp;
557
558                 cp.page = 0x01;
559                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
560                             sizeof(cp), &cp);
561         }
562
563         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
564                 u8 enable = 1;
565                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
566                             &enable);
567         }
568 }
569
570 static void hci_setup_link_policy(struct hci_request *req)
571 {
572         struct hci_dev *hdev = req->hdev;
573         struct hci_cp_write_def_link_policy cp;
574         u16 link_policy = 0;
575
576         if (lmp_rswitch_capable(hdev))
577                 link_policy |= HCI_LP_RSWITCH;
578         if (lmp_hold_capable(hdev))
579                 link_policy |= HCI_LP_HOLD;
580         if (lmp_sniff_capable(hdev))
581                 link_policy |= HCI_LP_SNIFF;
582         if (lmp_park_capable(hdev))
583                 link_policy |= HCI_LP_PARK;
584
585         cp.policy = cpu_to_le16(link_policy);
586         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
587 }
588
589 static void hci_set_le_support(struct hci_request *req)
590 {
591         struct hci_dev *hdev = req->hdev;
592         struct hci_cp_write_le_host_supported cp;
593
594         /* LE-only devices do not support explicit enablement */
595         if (!lmp_bredr_capable(hdev))
596                 return;
597
598         memset(&cp, 0, sizeof(cp));
599
600         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
601                 cp.le = 0x01;
602                 cp.simul = lmp_le_br_capable(hdev);
603         }
604
605         if (cp.le != lmp_host_le_capable(hdev))
606                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
607                             &cp);
608 }
609
610 static void hci_init3_req(struct hci_request *req, unsigned long opt)
611 {
612         struct hci_dev *hdev = req->hdev;
613         u8 p;
614
615         /* Some Broadcom based Bluetooth controllers do not support the
616          * Delete Stored Link Key command. They are clearly indicating its
617          * absence in the bit mask of supported commands.
618          *
619          * Check the supported commands and only if the the command is marked
620          * as supported send it. If not supported assume that the controller
621          * does not have actual support for stored link keys which makes this
622          * command redundant anyway.
623          */
624         if (hdev->commands[6] & 0x80) {
625                 struct hci_cp_delete_stored_link_key cp;
626
627                 bacpy(&cp.bdaddr, BDADDR_ANY);
628                 cp.delete_all = 0x01;
629                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
630                             sizeof(cp), &cp);
631         }
632
633         if (hdev->commands[5] & 0x10)
634                 hci_setup_link_policy(req);
635
636         if (lmp_le_capable(hdev)) {
637                 hci_set_le_support(req);
638                 hci_update_ad(req);
639         }
640
641         /* Read features beyond page 1 if available */
642         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
643                 struct hci_cp_read_local_ext_features cp;
644
645                 cp.page = p;
646                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
647                             sizeof(cp), &cp);
648         }
649 }
650
651 static int __hci_init(struct hci_dev *hdev)
652 {
653         int err;
654
655         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
656         if (err < 0)
657                 return err;
658
659         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
660          * BR/EDR/LE type controllers. AMP controllers only need the
661          * first stage init.
662          */
663         if (hdev->dev_type != HCI_BREDR)
664                 return 0;
665
666         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
667         if (err < 0)
668                 return err;
669
670         return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
671 }
672
673 static void hci_scan_req(struct hci_request *req, unsigned long opt)
674 {
675         __u8 scan = opt;
676
677         BT_DBG("%s %x", req->hdev->name, scan);
678
679         /* Inquiry and Page scans */
680         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
681 }
682
683 static void hci_auth_req(struct hci_request *req, unsigned long opt)
684 {
685         __u8 auth = opt;
686
687         BT_DBG("%s %x", req->hdev->name, auth);
688
689         /* Authentication */
690         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
691 }
692
693 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
694 {
695         __u8 encrypt = opt;
696
697         BT_DBG("%s %x", req->hdev->name, encrypt);
698
699         /* Encryption */
700         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
701 }
702
703 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
704 {
705         __le16 policy = cpu_to_le16(opt);
706
707         BT_DBG("%s %x", req->hdev->name, policy);
708
709         /* Default link policy */
710         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
711 }
712
713 /* Get HCI device by index.
714  * Device is held on return. */
715 struct hci_dev *hci_dev_get(int index)
716 {
717         struct hci_dev *hdev = NULL, *d;
718
719         BT_DBG("%d", index);
720
721         if (index < 0)
722                 return NULL;
723
724         read_lock(&hci_dev_list_lock);
725         list_for_each_entry(d, &hci_dev_list, list) {
726                 if (d->id == index) {
727                         hdev = hci_dev_hold(d);
728                         break;
729                 }
730         }
731         read_unlock(&hci_dev_list_lock);
732         return hdev;
733 }
734
735 /* ---- Inquiry support ---- */
736
737 bool hci_discovery_active(struct hci_dev *hdev)
738 {
739         struct discovery_state *discov = &hdev->discovery;
740
741         switch (discov->state) {
742         case DISCOVERY_FINDING:
743         case DISCOVERY_RESOLVING:
744                 return true;
745
746         default:
747                 return false;
748         }
749 }
750
751 void hci_discovery_set_state(struct hci_dev *hdev, int state)
752 {
753         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
754
755         if (hdev->discovery.state == state)
756                 return;
757
758         switch (state) {
759         case DISCOVERY_STOPPED:
760                 if (hdev->discovery.state != DISCOVERY_STARTING)
761                         mgmt_discovering(hdev, 0);
762                 break;
763         case DISCOVERY_STARTING:
764                 break;
765         case DISCOVERY_FINDING:
766                 mgmt_discovering(hdev, 1);
767                 break;
768         case DISCOVERY_RESOLVING:
769                 break;
770         case DISCOVERY_STOPPING:
771                 break;
772         }
773
774         hdev->discovery.state = state;
775 }
776
777 void hci_inquiry_cache_flush(struct hci_dev *hdev)
778 {
779         struct discovery_state *cache = &hdev->discovery;
780         struct inquiry_entry *p, *n;
781
782         list_for_each_entry_safe(p, n, &cache->all, all) {
783                 list_del(&p->all);
784                 kfree(p);
785         }
786
787         INIT_LIST_HEAD(&cache->unknown);
788         INIT_LIST_HEAD(&cache->resolve);
789 }
790
791 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
792                                                bdaddr_t *bdaddr)
793 {
794         struct discovery_state *cache = &hdev->discovery;
795         struct inquiry_entry *e;
796
797         BT_DBG("cache %p, %pMR", cache, bdaddr);
798
799         list_for_each_entry(e, &cache->all, all) {
800                 if (!bacmp(&e->data.bdaddr, bdaddr))
801                         return e;
802         }
803
804         return NULL;
805 }
806
807 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
808                                                        bdaddr_t *bdaddr)
809 {
810         struct discovery_state *cache = &hdev->discovery;
811         struct inquiry_entry *e;
812
813         BT_DBG("cache %p, %pMR", cache, bdaddr);
814
815         list_for_each_entry(e, &cache->unknown, list) {
816                 if (!bacmp(&e->data.bdaddr, bdaddr))
817                         return e;
818         }
819
820         return NULL;
821 }
822
823 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
824                                                        bdaddr_t *bdaddr,
825                                                        int state)
826 {
827         struct discovery_state *cache = &hdev->discovery;
828         struct inquiry_entry *e;
829
830         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
831
832         list_for_each_entry(e, &cache->resolve, list) {
833                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
834                         return e;
835                 if (!bacmp(&e->data.bdaddr, bdaddr))
836                         return e;
837         }
838
839         return NULL;
840 }
841
842 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
843                                       struct inquiry_entry *ie)
844 {
845         struct discovery_state *cache = &hdev->discovery;
846         struct list_head *pos = &cache->resolve;
847         struct inquiry_entry *p;
848
849         list_del(&ie->list);
850
851         list_for_each_entry(p, &cache->resolve, list) {
852                 if (p->name_state != NAME_PENDING &&
853                     abs(p->data.rssi) >= abs(ie->data.rssi))
854                         break;
855                 pos = &p->list;
856         }
857
858         list_add(&ie->list, pos);
859 }
860
861 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
862                               bool name_known, bool *ssp)
863 {
864         struct discovery_state *cache = &hdev->discovery;
865         struct inquiry_entry *ie;
866
867         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
868
869         hci_remove_remote_oob_data(hdev, &data->bdaddr);
870
871         if (ssp)
872                 *ssp = data->ssp_mode;
873
874         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
875         if (ie) {
876                 if (ie->data.ssp_mode && ssp)
877                         *ssp = true;
878
879                 if (ie->name_state == NAME_NEEDED &&
880                     data->rssi != ie->data.rssi) {
881                         ie->data.rssi = data->rssi;
882                         hci_inquiry_cache_update_resolve(hdev, ie);
883                 }
884
885                 goto update;
886         }
887
888         /* Entry not in the cache. Add new one. */
889         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
890         if (!ie)
891                 return false;
892
893         list_add(&ie->all, &cache->all);
894
895         if (name_known) {
896                 ie->name_state = NAME_KNOWN;
897         } else {
898                 ie->name_state = NAME_NOT_KNOWN;
899                 list_add(&ie->list, &cache->unknown);
900         }
901
902 update:
903         if (name_known && ie->name_state != NAME_KNOWN &&
904             ie->name_state != NAME_PENDING) {
905                 ie->name_state = NAME_KNOWN;
906                 list_del(&ie->list);
907         }
908
909         memcpy(&ie->data, data, sizeof(*data));
910         ie->timestamp = jiffies;
911         cache->timestamp = jiffies;
912
913         if (ie->name_state == NAME_NOT_KNOWN)
914                 return false;
915
916         return true;
917 }
918
919 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
920 {
921         struct discovery_state *cache = &hdev->discovery;
922         struct inquiry_info *info = (struct inquiry_info *) buf;
923         struct inquiry_entry *e;
924         int copied = 0;
925
926         list_for_each_entry(e, &cache->all, all) {
927                 struct inquiry_data *data = &e->data;
928
929                 if (copied >= num)
930                         break;
931
932                 bacpy(&info->bdaddr, &data->bdaddr);
933                 info->pscan_rep_mode    = data->pscan_rep_mode;
934                 info->pscan_period_mode = data->pscan_period_mode;
935                 info->pscan_mode        = data->pscan_mode;
936                 memcpy(info->dev_class, data->dev_class, 3);
937                 info->clock_offset      = data->clock_offset;
938
939                 info++;
940                 copied++;
941         }
942
943         BT_DBG("cache %p, copied %d", cache, copied);
944         return copied;
945 }
946
947 static void hci_inq_req(struct hci_request *req, unsigned long opt)
948 {
949         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
950         struct hci_dev *hdev = req->hdev;
951         struct hci_cp_inquiry cp;
952
953         BT_DBG("%s", hdev->name);
954
955         if (test_bit(HCI_INQUIRY, &hdev->flags))
956                 return;
957
958         /* Start Inquiry */
959         memcpy(&cp.lap, &ir->lap, 3);
960         cp.length  = ir->length;
961         cp.num_rsp = ir->num_rsp;
962         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
963 }
964
965 static int wait_inquiry(void *word)
966 {
967         schedule();
968         return signal_pending(current);
969 }
970
971 int hci_inquiry(void __user *arg)
972 {
973         __u8 __user *ptr = arg;
974         struct hci_inquiry_req ir;
975         struct hci_dev *hdev;
976         int err = 0, do_inquiry = 0, max_rsp;
977         long timeo;
978         __u8 *buf;
979
980         if (copy_from_user(&ir, ptr, sizeof(ir)))
981                 return -EFAULT;
982
983         hdev = hci_dev_get(ir.dev_id);
984         if (!hdev)
985                 return -ENODEV;
986
987         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
988                 err = -EBUSY;
989                 goto done;
990         }
991
992         hci_dev_lock(hdev);
993         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
994             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
995                 hci_inquiry_cache_flush(hdev);
996                 do_inquiry = 1;
997         }
998         hci_dev_unlock(hdev);
999
1000         timeo = ir.length * msecs_to_jiffies(2000);
1001
1002         if (do_inquiry) {
1003                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1004                                    timeo);
1005                 if (err < 0)
1006                         goto done;
1007
1008                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1009                  * cleared). If it is interrupted by a signal, return -EINTR.
1010                  */
1011                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1012                                 TASK_INTERRUPTIBLE))
1013                         return -EINTR;
1014         }
1015
1016         /* for unlimited number of responses we will use buffer with
1017          * 255 entries
1018          */
1019         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1020
1021         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1022          * copy it to the user space.
1023          */
1024         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1025         if (!buf) {
1026                 err = -ENOMEM;
1027                 goto done;
1028         }
1029
1030         hci_dev_lock(hdev);
1031         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1032         hci_dev_unlock(hdev);
1033
1034         BT_DBG("num_rsp %d", ir.num_rsp);
1035
1036         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1037                 ptr += sizeof(ir);
1038                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1039                                  ir.num_rsp))
1040                         err = -EFAULT;
1041         } else
1042                 err = -EFAULT;
1043
1044         kfree(buf);
1045
1046 done:
1047         hci_dev_put(hdev);
1048         return err;
1049 }
1050
1051 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1052 {
1053         u8 ad_len = 0, flags = 0;
1054         size_t name_len;
1055
1056         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1057                 flags |= LE_AD_GENERAL;
1058
1059         if (!lmp_bredr_capable(hdev))
1060                 flags |= LE_AD_NO_BREDR;
1061
1062         if (lmp_le_br_capable(hdev))
1063                 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1064
1065         if (lmp_host_le_br_capable(hdev))
1066                 flags |= LE_AD_SIM_LE_BREDR_HOST;
1067
1068         if (flags) {
1069                 BT_DBG("adv flags 0x%02x", flags);
1070
1071                 ptr[0] = 2;
1072                 ptr[1] = EIR_FLAGS;
1073                 ptr[2] = flags;
1074
1075                 ad_len += 3;
1076                 ptr += 3;
1077         }
1078
1079         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1080                 ptr[0] = 2;
1081                 ptr[1] = EIR_TX_POWER;
1082                 ptr[2] = (u8) hdev->adv_tx_power;
1083
1084                 ad_len += 3;
1085                 ptr += 3;
1086         }
1087
1088         name_len = strlen(hdev->dev_name);
1089         if (name_len > 0) {
1090                 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1091
1092                 if (name_len > max_len) {
1093                         name_len = max_len;
1094                         ptr[1] = EIR_NAME_SHORT;
1095                 } else
1096                         ptr[1] = EIR_NAME_COMPLETE;
1097
1098                 ptr[0] = name_len + 1;
1099
1100                 memcpy(ptr + 2, hdev->dev_name, name_len);
1101
1102                 ad_len += (name_len + 2);
1103                 ptr += (name_len + 2);
1104         }
1105
1106         return ad_len;
1107 }
1108
1109 void hci_update_ad(struct hci_request *req)
1110 {
1111         struct hci_dev *hdev = req->hdev;
1112         struct hci_cp_le_set_adv_data cp;
1113         u8 len;
1114
1115         if (!lmp_le_capable(hdev))
1116                 return;
1117
1118         memset(&cp, 0, sizeof(cp));
1119
1120         len = create_ad(hdev, cp.data);
1121
1122         if (hdev->adv_data_len == len &&
1123             memcmp(cp.data, hdev->adv_data, len) == 0)
1124                 return;
1125
1126         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1127         hdev->adv_data_len = len;
1128
1129         cp.length = len;
1130
1131         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1132 }
1133
1134 /* ---- HCI ioctl helpers ---- */
1135
1136 int hci_dev_open(__u16 dev)
1137 {
1138         struct hci_dev *hdev;
1139         int ret = 0;
1140
1141         hdev = hci_dev_get(dev);
1142         if (!hdev)
1143                 return -ENODEV;
1144
1145         BT_DBG("%s %p", hdev->name, hdev);
1146
1147         hci_req_lock(hdev);
1148
1149         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1150                 ret = -ENODEV;
1151                 goto done;
1152         }
1153
1154         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1155                 ret = -ERFKILL;
1156                 goto done;
1157         }
1158
1159         if (test_bit(HCI_UP, &hdev->flags)) {
1160                 ret = -EALREADY;
1161                 goto done;
1162         }
1163
1164         if (hdev->open(hdev)) {
1165                 ret = -EIO;
1166                 goto done;
1167         }
1168
1169         atomic_set(&hdev->cmd_cnt, 1);
1170         set_bit(HCI_INIT, &hdev->flags);
1171
1172         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1173                 ret = hdev->setup(hdev);
1174
1175         if (!ret) {
1176                 /* Treat all non BR/EDR controllers as raw devices if
1177                  * enable_hs is not set.
1178                  */
1179                 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1180                         set_bit(HCI_RAW, &hdev->flags);
1181
1182                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1183                         set_bit(HCI_RAW, &hdev->flags);
1184
1185                 if (!test_bit(HCI_RAW, &hdev->flags) &&
1186                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1187                         ret = __hci_init(hdev);
1188         }
1189
1190         clear_bit(HCI_INIT, &hdev->flags);
1191
1192         if (!ret) {
1193                 hci_dev_hold(hdev);
1194                 set_bit(HCI_UP, &hdev->flags);
1195                 hci_notify(hdev, HCI_DEV_UP);
1196                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1197                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1198                     mgmt_valid_hdev(hdev)) {
1199                         hci_dev_lock(hdev);
1200                         mgmt_powered(hdev, 1);
1201                         hci_dev_unlock(hdev);
1202                 }
1203         } else {
1204                 /* Init failed, cleanup */
1205                 flush_work(&hdev->tx_work);
1206                 flush_work(&hdev->cmd_work);
1207                 flush_work(&hdev->rx_work);
1208
1209                 skb_queue_purge(&hdev->cmd_q);
1210                 skb_queue_purge(&hdev->rx_q);
1211
1212                 if (hdev->flush)
1213                         hdev->flush(hdev);
1214
1215                 if (hdev->sent_cmd) {
1216                         kfree_skb(hdev->sent_cmd);
1217                         hdev->sent_cmd = NULL;
1218                 }
1219
1220                 hdev->close(hdev);
1221                 hdev->flags = 0;
1222         }
1223
1224 done:
1225         hci_req_unlock(hdev);
1226         hci_dev_put(hdev);
1227         return ret;
1228 }
1229
1230 static int hci_dev_do_close(struct hci_dev *hdev)
1231 {
1232         BT_DBG("%s %p", hdev->name, hdev);
1233
1234         cancel_delayed_work(&hdev->power_off);
1235
1236         hci_req_cancel(hdev, ENODEV);
1237         hci_req_lock(hdev);
1238
1239         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1240                 del_timer_sync(&hdev->cmd_timer);
1241                 hci_req_unlock(hdev);
1242                 return 0;
1243         }
1244
1245         /* Flush RX and TX works */
1246         flush_work(&hdev->tx_work);
1247         flush_work(&hdev->rx_work);
1248
1249         if (hdev->discov_timeout > 0) {
1250                 cancel_delayed_work(&hdev->discov_off);
1251                 hdev->discov_timeout = 0;
1252                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1253         }
1254
1255         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1256                 cancel_delayed_work(&hdev->service_cache);
1257
1258         cancel_delayed_work_sync(&hdev->le_scan_disable);
1259
1260         hci_dev_lock(hdev);
1261         hci_inquiry_cache_flush(hdev);
1262         hci_conn_hash_flush(hdev);
1263         hci_dev_unlock(hdev);
1264
1265         hci_notify(hdev, HCI_DEV_DOWN);
1266
1267         if (hdev->flush)
1268                 hdev->flush(hdev);
1269
1270         /* Reset device */
1271         skb_queue_purge(&hdev->cmd_q);
1272         atomic_set(&hdev->cmd_cnt, 1);
1273         if (!test_bit(HCI_RAW, &hdev->flags) &&
1274             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1275                 set_bit(HCI_INIT, &hdev->flags);
1276                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1277                 clear_bit(HCI_INIT, &hdev->flags);
1278         }
1279
1280         /* flush cmd  work */
1281         flush_work(&hdev->cmd_work);
1282
1283         /* Drop queues */
1284         skb_queue_purge(&hdev->rx_q);
1285         skb_queue_purge(&hdev->cmd_q);
1286         skb_queue_purge(&hdev->raw_q);
1287
1288         /* Drop last sent command */
1289         if (hdev->sent_cmd) {
1290                 del_timer_sync(&hdev->cmd_timer);
1291                 kfree_skb(hdev->sent_cmd);
1292                 hdev->sent_cmd = NULL;
1293         }
1294
1295         kfree_skb(hdev->recv_evt);
1296         hdev->recv_evt = NULL;
1297
1298         /* After this point our queues are empty
1299          * and no tasks are scheduled. */
1300         hdev->close(hdev);
1301
1302         /* Clear flags */
1303         hdev->flags = 0;
1304         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1305
1306         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1307             mgmt_valid_hdev(hdev)) {
1308                 hci_dev_lock(hdev);
1309                 mgmt_powered(hdev, 0);
1310                 hci_dev_unlock(hdev);
1311         }
1312
1313         /* Controller radio is available but is currently powered down */
1314         hdev->amp_status = 0;
1315
1316         memset(hdev->eir, 0, sizeof(hdev->eir));
1317         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1318
1319         hci_req_unlock(hdev);
1320
1321         hci_dev_put(hdev);
1322         return 0;
1323 }
1324
1325 int hci_dev_close(__u16 dev)
1326 {
1327         struct hci_dev *hdev;
1328         int err;
1329
1330         hdev = hci_dev_get(dev);
1331         if (!hdev)
1332                 return -ENODEV;
1333
1334         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1335                 err = -EBUSY;
1336                 goto done;
1337         }
1338
1339         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1340                 cancel_delayed_work(&hdev->power_off);
1341
1342         err = hci_dev_do_close(hdev);
1343
1344 done:
1345         hci_dev_put(hdev);
1346         return err;
1347 }
1348
1349 int hci_dev_reset(__u16 dev)
1350 {
1351         struct hci_dev *hdev;
1352         int ret = 0;
1353
1354         hdev = hci_dev_get(dev);
1355         if (!hdev)
1356                 return -ENODEV;
1357
1358         hci_req_lock(hdev);
1359
1360         if (!test_bit(HCI_UP, &hdev->flags)) {
1361                 ret = -ENETDOWN;
1362                 goto done;
1363         }
1364
1365         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1366                 ret = -EBUSY;
1367                 goto done;
1368         }
1369
1370         /* Drop queues */
1371         skb_queue_purge(&hdev->rx_q);
1372         skb_queue_purge(&hdev->cmd_q);
1373
1374         hci_dev_lock(hdev);
1375         hci_inquiry_cache_flush(hdev);
1376         hci_conn_hash_flush(hdev);
1377         hci_dev_unlock(hdev);
1378
1379         if (hdev->flush)
1380                 hdev->flush(hdev);
1381
1382         atomic_set(&hdev->cmd_cnt, 1);
1383         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1384
1385         if (!test_bit(HCI_RAW, &hdev->flags))
1386                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1387
1388 done:
1389         hci_req_unlock(hdev);
1390         hci_dev_put(hdev);
1391         return ret;
1392 }
1393
1394 int hci_dev_reset_stat(__u16 dev)
1395 {
1396         struct hci_dev *hdev;
1397         int ret = 0;
1398
1399         hdev = hci_dev_get(dev);
1400         if (!hdev)
1401                 return -ENODEV;
1402
1403         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1404                 ret = -EBUSY;
1405                 goto done;
1406         }
1407
1408         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1409
1410 done:
1411         hci_dev_put(hdev);
1412         return ret;
1413 }
1414
1415 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1416 {
1417         struct hci_dev *hdev;
1418         struct hci_dev_req dr;
1419         int err = 0;
1420
1421         if (copy_from_user(&dr, arg, sizeof(dr)))
1422                 return -EFAULT;
1423
1424         hdev = hci_dev_get(dr.dev_id);
1425         if (!hdev)
1426                 return -ENODEV;
1427
1428         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1429                 err = -EBUSY;
1430                 goto done;
1431         }
1432
1433         switch (cmd) {
1434         case HCISETAUTH:
1435                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1436                                    HCI_INIT_TIMEOUT);
1437                 break;
1438
1439         case HCISETENCRYPT:
1440                 if (!lmp_encrypt_capable(hdev)) {
1441                         err = -EOPNOTSUPP;
1442                         break;
1443                 }
1444
1445                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1446                         /* Auth must be enabled first */
1447                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1448                                            HCI_INIT_TIMEOUT);
1449                         if (err)
1450                                 break;
1451                 }
1452
1453                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1454                                    HCI_INIT_TIMEOUT);
1455                 break;
1456
1457         case HCISETSCAN:
1458                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1459                                    HCI_INIT_TIMEOUT);
1460                 break;
1461
1462         case HCISETLINKPOL:
1463                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1464                                    HCI_INIT_TIMEOUT);
1465                 break;
1466
1467         case HCISETLINKMODE:
1468                 hdev->link_mode = ((__u16) dr.dev_opt) &
1469                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1470                 break;
1471
1472         case HCISETPTYPE:
1473                 hdev->pkt_type = (__u16) dr.dev_opt;
1474                 break;
1475
1476         case HCISETACLMTU:
1477                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1478                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1479                 break;
1480
1481         case HCISETSCOMTU:
1482                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1483                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1484                 break;
1485
1486         default:
1487                 err = -EINVAL;
1488                 break;
1489         }
1490
1491 done:
1492         hci_dev_put(hdev);
1493         return err;
1494 }
1495
1496 int hci_get_dev_list(void __user *arg)
1497 {
1498         struct hci_dev *hdev;
1499         struct hci_dev_list_req *dl;
1500         struct hci_dev_req *dr;
1501         int n = 0, size, err;
1502         __u16 dev_num;
1503
1504         if (get_user(dev_num, (__u16 __user *) arg))
1505                 return -EFAULT;
1506
1507         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1508                 return -EINVAL;
1509
1510         size = sizeof(*dl) + dev_num * sizeof(*dr);
1511
1512         dl = kzalloc(size, GFP_KERNEL);
1513         if (!dl)
1514                 return -ENOMEM;
1515
1516         dr = dl->dev_req;
1517
1518         read_lock(&hci_dev_list_lock);
1519         list_for_each_entry(hdev, &hci_dev_list, list) {
1520                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1521                         cancel_delayed_work(&hdev->power_off);
1522
1523                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1524                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1525
1526                 (dr + n)->dev_id  = hdev->id;
1527                 (dr + n)->dev_opt = hdev->flags;
1528
1529                 if (++n >= dev_num)
1530                         break;
1531         }
1532         read_unlock(&hci_dev_list_lock);
1533
1534         dl->dev_num = n;
1535         size = sizeof(*dl) + n * sizeof(*dr);
1536
1537         err = copy_to_user(arg, dl, size);
1538         kfree(dl);
1539
1540         return err ? -EFAULT : 0;
1541 }
1542
1543 int hci_get_dev_info(void __user *arg)
1544 {
1545         struct hci_dev *hdev;
1546         struct hci_dev_info di;
1547         int err = 0;
1548
1549         if (copy_from_user(&di, arg, sizeof(di)))
1550                 return -EFAULT;
1551
1552         hdev = hci_dev_get(di.dev_id);
1553         if (!hdev)
1554                 return -ENODEV;
1555
1556         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1557                 cancel_delayed_work_sync(&hdev->power_off);
1558
1559         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1560                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1561
1562         strcpy(di.name, hdev->name);
1563         di.bdaddr   = hdev->bdaddr;
1564         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1565         di.flags    = hdev->flags;
1566         di.pkt_type = hdev->pkt_type;
1567         if (lmp_bredr_capable(hdev)) {
1568                 di.acl_mtu  = hdev->acl_mtu;
1569                 di.acl_pkts = hdev->acl_pkts;
1570                 di.sco_mtu  = hdev->sco_mtu;
1571                 di.sco_pkts = hdev->sco_pkts;
1572         } else {
1573                 di.acl_mtu  = hdev->le_mtu;
1574                 di.acl_pkts = hdev->le_pkts;
1575                 di.sco_mtu  = 0;
1576                 di.sco_pkts = 0;
1577         }
1578         di.link_policy = hdev->link_policy;
1579         di.link_mode   = hdev->link_mode;
1580
1581         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1582         memcpy(&di.features, &hdev->features, sizeof(di.features));
1583
1584         if (copy_to_user(arg, &di, sizeof(di)))
1585                 err = -EFAULT;
1586
1587         hci_dev_put(hdev);
1588
1589         return err;
1590 }
1591
1592 /* ---- Interface to HCI drivers ---- */
1593
1594 static int hci_rfkill_set_block(void *data, bool blocked)
1595 {
1596         struct hci_dev *hdev = data;
1597
1598         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1599
1600         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1601                 return -EBUSY;
1602
1603         if (!blocked)
1604                 return 0;
1605
1606         hci_dev_do_close(hdev);
1607
1608         return 0;
1609 }
1610
1611 static const struct rfkill_ops hci_rfkill_ops = {
1612         .set_block = hci_rfkill_set_block,
1613 };
1614
1615 static void hci_power_on(struct work_struct *work)
1616 {
1617         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1618         int err;
1619
1620         BT_DBG("%s", hdev->name);
1621
1622         err = hci_dev_open(hdev->id);
1623         if (err < 0) {
1624                 mgmt_set_powered_failed(hdev, err);
1625                 return;
1626         }
1627
1628         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1629                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1630                                    HCI_AUTO_OFF_TIMEOUT);
1631
1632         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1633                 mgmt_index_added(hdev);
1634 }
1635
1636 static void hci_power_off(struct work_struct *work)
1637 {
1638         struct hci_dev *hdev = container_of(work, struct hci_dev,
1639                                             power_off.work);
1640
1641         BT_DBG("%s", hdev->name);
1642
1643         hci_dev_do_close(hdev);
1644 }
1645
1646 static void hci_discov_off(struct work_struct *work)
1647 {
1648         struct hci_dev *hdev;
1649         u8 scan = SCAN_PAGE;
1650
1651         hdev = container_of(work, struct hci_dev, discov_off.work);
1652
1653         BT_DBG("%s", hdev->name);
1654
1655         hci_dev_lock(hdev);
1656
1657         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1658
1659         hdev->discov_timeout = 0;
1660
1661         hci_dev_unlock(hdev);
1662 }
1663
1664 int hci_uuids_clear(struct hci_dev *hdev)
1665 {
1666         struct bt_uuid *uuid, *tmp;
1667
1668         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1669                 list_del(&uuid->list);
1670                 kfree(uuid);
1671         }
1672
1673         return 0;
1674 }
1675
1676 int hci_link_keys_clear(struct hci_dev *hdev)
1677 {
1678         struct list_head *p, *n;
1679
1680         list_for_each_safe(p, n, &hdev->link_keys) {
1681                 struct link_key *key;
1682
1683                 key = list_entry(p, struct link_key, list);
1684
1685                 list_del(p);
1686                 kfree(key);
1687         }
1688
1689         return 0;
1690 }
1691
1692 int hci_smp_ltks_clear(struct hci_dev *hdev)
1693 {
1694         struct smp_ltk *k, *tmp;
1695
1696         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1697                 list_del(&k->list);
1698                 kfree(k);
1699         }
1700
1701         return 0;
1702 }
1703
1704 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1705 {
1706         struct link_key *k;
1707
1708         list_for_each_entry(k, &hdev->link_keys, list)
1709                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1710                         return k;
1711
1712         return NULL;
1713 }
1714
1715 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1716                                u8 key_type, u8 old_key_type)
1717 {
1718         /* Legacy key */
1719         if (key_type < 0x03)
1720                 return true;
1721
1722         /* Debug keys are insecure so don't store them persistently */
1723         if (key_type == HCI_LK_DEBUG_COMBINATION)
1724                 return false;
1725
1726         /* Changed combination key and there's no previous one */
1727         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1728                 return false;
1729
1730         /* Security mode 3 case */
1731         if (!conn)
1732                 return true;
1733
1734         /* Neither local nor remote side had no-bonding as requirement */
1735         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1736                 return true;
1737
1738         /* Local side had dedicated bonding as requirement */
1739         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1740                 return true;
1741
1742         /* Remote side had dedicated bonding as requirement */
1743         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1744                 return true;
1745
1746         /* If none of the above criteria match, then don't store the key
1747          * persistently */
1748         return false;
1749 }
1750
1751 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1752 {
1753         struct smp_ltk *k;
1754
1755         list_for_each_entry(k, &hdev->long_term_keys, list) {
1756                 if (k->ediv != ediv ||
1757                     memcmp(rand, k->rand, sizeof(k->rand)))
1758                         continue;
1759
1760                 return k;
1761         }
1762
1763         return NULL;
1764 }
1765
1766 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1767                                      u8 addr_type)
1768 {
1769         struct smp_ltk *k;
1770
1771         list_for_each_entry(k, &hdev->long_term_keys, list)
1772                 if (addr_type == k->bdaddr_type &&
1773                     bacmp(bdaddr, &k->bdaddr) == 0)
1774                         return k;
1775
1776         return NULL;
1777 }
1778
1779 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1780                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1781 {
1782         struct link_key *key, *old_key;
1783         u8 old_key_type;
1784         bool persistent;
1785
1786         old_key = hci_find_link_key(hdev, bdaddr);
1787         if (old_key) {
1788                 old_key_type = old_key->type;
1789                 key = old_key;
1790         } else {
1791                 old_key_type = conn ? conn->key_type : 0xff;
1792                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1793                 if (!key)
1794                         return -ENOMEM;
1795                 list_add(&key->list, &hdev->link_keys);
1796         }
1797
1798         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1799
1800         /* Some buggy controller combinations generate a changed
1801          * combination key for legacy pairing even when there's no
1802          * previous key */
1803         if (type == HCI_LK_CHANGED_COMBINATION &&
1804             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1805                 type = HCI_LK_COMBINATION;
1806                 if (conn)
1807                         conn->key_type = type;
1808         }
1809
1810         bacpy(&key->bdaddr, bdaddr);
1811         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1812         key->pin_len = pin_len;
1813
1814         if (type == HCI_LK_CHANGED_COMBINATION)
1815                 key->type = old_key_type;
1816         else
1817                 key->type = type;
1818
1819         if (!new_key)
1820                 return 0;
1821
1822         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1823
1824         mgmt_new_link_key(hdev, key, persistent);
1825
1826         if (conn)
1827                 conn->flush_key = !persistent;
1828
1829         return 0;
1830 }
1831
1832 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1833                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1834                 ediv, u8 rand[8])
1835 {
1836         struct smp_ltk *key, *old_key;
1837
1838         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1839                 return 0;
1840
1841         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1842         if (old_key)
1843                 key = old_key;
1844         else {
1845                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1846                 if (!key)
1847                         return -ENOMEM;
1848                 list_add(&key->list, &hdev->long_term_keys);
1849         }
1850
1851         bacpy(&key->bdaddr, bdaddr);
1852         key->bdaddr_type = addr_type;
1853         memcpy(key->val, tk, sizeof(key->val));
1854         key->authenticated = authenticated;
1855         key->ediv = ediv;
1856         key->enc_size = enc_size;
1857         key->type = type;
1858         memcpy(key->rand, rand, sizeof(key->rand));
1859
1860         if (!new_key)
1861                 return 0;
1862
1863         if (type & HCI_SMP_LTK)
1864                 mgmt_new_ltk(hdev, key, 1);
1865
1866         return 0;
1867 }
1868
1869 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1870 {
1871         struct link_key *key;
1872
1873         key = hci_find_link_key(hdev, bdaddr);
1874         if (!key)
1875                 return -ENOENT;
1876
1877         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1878
1879         list_del(&key->list);
1880         kfree(key);
1881
1882         return 0;
1883 }
1884
1885 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1886 {
1887         struct smp_ltk *k, *tmp;
1888
1889         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1890                 if (bacmp(bdaddr, &k->bdaddr))
1891                         continue;
1892
1893                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1894
1895                 list_del(&k->list);
1896                 kfree(k);
1897         }
1898
1899         return 0;
1900 }
1901
1902 /* HCI command timer function */
1903 static void hci_cmd_timeout(unsigned long arg)
1904 {
1905         struct hci_dev *hdev = (void *) arg;
1906
1907         if (hdev->sent_cmd) {
1908                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1909                 u16 opcode = __le16_to_cpu(sent->opcode);
1910
1911                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1912         } else {
1913                 BT_ERR("%s command tx timeout", hdev->name);
1914         }
1915
1916         atomic_set(&hdev->cmd_cnt, 1);
1917         queue_work(hdev->workqueue, &hdev->cmd_work);
1918 }
1919
1920 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1921                                           bdaddr_t *bdaddr)
1922 {
1923         struct oob_data *data;
1924
1925         list_for_each_entry(data, &hdev->remote_oob_data, list)
1926                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1927                         return data;
1928
1929         return NULL;
1930 }
1931
1932 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1933 {
1934         struct oob_data *data;
1935
1936         data = hci_find_remote_oob_data(hdev, bdaddr);
1937         if (!data)
1938                 return -ENOENT;
1939
1940         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1941
1942         list_del(&data->list);
1943         kfree(data);
1944
1945         return 0;
1946 }
1947
1948 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1949 {
1950         struct oob_data *data, *n;
1951
1952         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1953                 list_del(&data->list);
1954                 kfree(data);
1955         }
1956
1957         return 0;
1958 }
1959
1960 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1961                             u8 *randomizer)
1962 {
1963         struct oob_data *data;
1964
1965         data = hci_find_remote_oob_data(hdev, bdaddr);
1966
1967         if (!data) {
1968                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1969                 if (!data)
1970                         return -ENOMEM;
1971
1972                 bacpy(&data->bdaddr, bdaddr);
1973                 list_add(&data->list, &hdev->remote_oob_data);
1974         }
1975
1976         memcpy(data->hash, hash, sizeof(data->hash));
1977         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1978
1979         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1980
1981         return 0;
1982 }
1983
1984 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1985 {
1986         struct bdaddr_list *b;
1987
1988         list_for_each_entry(b, &hdev->blacklist, list)
1989                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1990                         return b;
1991
1992         return NULL;
1993 }
1994
1995 int hci_blacklist_clear(struct hci_dev *hdev)
1996 {
1997         struct list_head *p, *n;
1998
1999         list_for_each_safe(p, n, &hdev->blacklist) {
2000                 struct bdaddr_list *b;
2001
2002                 b = list_entry(p, struct bdaddr_list, list);
2003
2004                 list_del(p);
2005                 kfree(b);
2006         }
2007
2008         return 0;
2009 }
2010
2011 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2012 {
2013         struct bdaddr_list *entry;
2014
2015         if (bacmp(bdaddr, BDADDR_ANY) == 0)
2016                 return -EBADF;
2017
2018         if (hci_blacklist_lookup(hdev, bdaddr))
2019                 return -EEXIST;
2020
2021         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
2022         if (!entry)
2023                 return -ENOMEM;
2024
2025         bacpy(&entry->bdaddr, bdaddr);
2026
2027         list_add(&entry->list, &hdev->blacklist);
2028
2029         return mgmt_device_blocked(hdev, bdaddr, type);
2030 }
2031
2032 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2033 {
2034         struct bdaddr_list *entry;
2035
2036         if (bacmp(bdaddr, BDADDR_ANY) == 0)
2037                 return hci_blacklist_clear(hdev);
2038
2039         entry = hci_blacklist_lookup(hdev, bdaddr);
2040         if (!entry)
2041                 return -ENOENT;
2042
2043         list_del(&entry->list);
2044         kfree(entry);
2045
2046         return mgmt_device_unblocked(hdev, bdaddr, type);
2047 }
2048
2049 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2050 {
2051         if (status) {
2052                 BT_ERR("Failed to start inquiry: status %d", status);
2053
2054                 hci_dev_lock(hdev);
2055                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2056                 hci_dev_unlock(hdev);
2057                 return;
2058         }
2059 }
2060
2061 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2062 {
2063         /* General inquiry access code (GIAC) */
2064         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2065         struct hci_request req;
2066         struct hci_cp_inquiry cp;
2067         int err;
2068
2069         if (status) {
2070                 BT_ERR("Failed to disable LE scanning: status %d", status);
2071                 return;
2072         }
2073
2074         switch (hdev->discovery.type) {
2075         case DISCOV_TYPE_LE:
2076                 hci_dev_lock(hdev);
2077                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2078                 hci_dev_unlock(hdev);
2079                 break;
2080
2081         case DISCOV_TYPE_INTERLEAVED:
2082                 hci_req_init(&req, hdev);
2083
2084                 memset(&cp, 0, sizeof(cp));
2085                 memcpy(&cp.lap, lap, sizeof(cp.lap));
2086                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2087                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2088
2089                 hci_dev_lock(hdev);
2090
2091                 hci_inquiry_cache_flush(hdev);
2092
2093                 err = hci_req_run(&req, inquiry_complete);
2094                 if (err) {
2095                         BT_ERR("Inquiry request failed: err %d", err);
2096                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2097                 }
2098
2099                 hci_dev_unlock(hdev);
2100                 break;
2101         }
2102 }
2103
2104 static void le_scan_disable_work(struct work_struct *work)
2105 {
2106         struct hci_dev *hdev = container_of(work, struct hci_dev,
2107                                             le_scan_disable.work);
2108         struct hci_cp_le_set_scan_enable cp;
2109         struct hci_request req;
2110         int err;
2111
2112         BT_DBG("%s", hdev->name);
2113
2114         hci_req_init(&req, hdev);
2115
2116         memset(&cp, 0, sizeof(cp));
2117         cp.enable = LE_SCAN_DISABLE;
2118         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2119
2120         err = hci_req_run(&req, le_scan_disable_work_complete);
2121         if (err)
2122                 BT_ERR("Disable LE scanning request failed: err %d", err);
2123 }
2124
2125 /* Alloc HCI device */
2126 struct hci_dev *hci_alloc_dev(void)
2127 {
2128         struct hci_dev *hdev;
2129
2130         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2131         if (!hdev)
2132                 return NULL;
2133
2134         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2135         hdev->esco_type = (ESCO_HV1);
2136         hdev->link_mode = (HCI_LM_ACCEPT);
2137         hdev->io_capability = 0x03; /* No Input No Output */
2138         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2139         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2140
2141         hdev->sniff_max_interval = 800;
2142         hdev->sniff_min_interval = 80;
2143
2144         mutex_init(&hdev->lock);
2145         mutex_init(&hdev->req_lock);
2146
2147         INIT_LIST_HEAD(&hdev->mgmt_pending);
2148         INIT_LIST_HEAD(&hdev->blacklist);
2149         INIT_LIST_HEAD(&hdev->uuids);
2150         INIT_LIST_HEAD(&hdev->link_keys);
2151         INIT_LIST_HEAD(&hdev->long_term_keys);
2152         INIT_LIST_HEAD(&hdev->remote_oob_data);
2153         INIT_LIST_HEAD(&hdev->conn_hash.list);
2154
2155         INIT_WORK(&hdev->rx_work, hci_rx_work);
2156         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2157         INIT_WORK(&hdev->tx_work, hci_tx_work);
2158         INIT_WORK(&hdev->power_on, hci_power_on);
2159
2160         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2161         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2162         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2163
2164         skb_queue_head_init(&hdev->rx_q);
2165         skb_queue_head_init(&hdev->cmd_q);
2166         skb_queue_head_init(&hdev->raw_q);
2167
2168         init_waitqueue_head(&hdev->req_wait_q);
2169
2170         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2171
2172         hci_init_sysfs(hdev);
2173         discovery_init(hdev);
2174
2175         return hdev;
2176 }
2177 EXPORT_SYMBOL(hci_alloc_dev);
2178
2179 /* Free HCI device */
2180 void hci_free_dev(struct hci_dev *hdev)
2181 {
2182         /* will free via device release */
2183         put_device(&hdev->dev);
2184 }
2185 EXPORT_SYMBOL(hci_free_dev);
2186
2187 /* Register HCI device */
2188 int hci_register_dev(struct hci_dev *hdev)
2189 {
2190         int id, error;
2191
2192         if (!hdev->open || !hdev->close)
2193                 return -EINVAL;
2194
2195         /* Do not allow HCI_AMP devices to register at index 0,
2196          * so the index can be used as the AMP controller ID.
2197          */
2198         switch (hdev->dev_type) {
2199         case HCI_BREDR:
2200                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2201                 break;
2202         case HCI_AMP:
2203                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2204                 break;
2205         default:
2206                 return -EINVAL;
2207         }
2208
2209         if (id < 0)
2210                 return id;
2211
2212         sprintf(hdev->name, "hci%d", id);
2213         hdev->id = id;
2214
2215         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2216
2217         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2218                                           WQ_MEM_RECLAIM, 1, hdev->name);
2219         if (!hdev->workqueue) {
2220                 error = -ENOMEM;
2221                 goto err;
2222         }
2223
2224         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2225                                               WQ_MEM_RECLAIM, 1, hdev->name);
2226         if (!hdev->req_workqueue) {
2227                 destroy_workqueue(hdev->workqueue);
2228                 error = -ENOMEM;
2229                 goto err;
2230         }
2231
2232         error = hci_add_sysfs(hdev);
2233         if (error < 0)
2234                 goto err_wqueue;
2235
2236         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2237                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2238                                     hdev);
2239         if (hdev->rfkill) {
2240                 if (rfkill_register(hdev->rfkill) < 0) {
2241                         rfkill_destroy(hdev->rfkill);
2242                         hdev->rfkill = NULL;
2243                 }
2244         }
2245
2246         set_bit(HCI_SETUP, &hdev->dev_flags);
2247
2248         if (hdev->dev_type != HCI_AMP)
2249                 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2250
2251         write_lock(&hci_dev_list_lock);
2252         list_add(&hdev->list, &hci_dev_list);
2253         write_unlock(&hci_dev_list_lock);
2254
2255         hci_notify(hdev, HCI_DEV_REG);
2256         hci_dev_hold(hdev);
2257
2258         queue_work(hdev->req_workqueue, &hdev->power_on);
2259
2260         return id;
2261
2262 err_wqueue:
2263         destroy_workqueue(hdev->workqueue);
2264         destroy_workqueue(hdev->req_workqueue);
2265 err:
2266         ida_simple_remove(&hci_index_ida, hdev->id);
2267
2268         return error;
2269 }
2270 EXPORT_SYMBOL(hci_register_dev);
2271
2272 /* Unregister HCI device */
2273 void hci_unregister_dev(struct hci_dev *hdev)
2274 {
2275         int i, id;
2276
2277         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2278
2279         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2280
2281         id = hdev->id;
2282
2283         write_lock(&hci_dev_list_lock);
2284         list_del(&hdev->list);
2285         write_unlock(&hci_dev_list_lock);
2286
2287         hci_dev_do_close(hdev);
2288
2289         for (i = 0; i < NUM_REASSEMBLY; i++)
2290                 kfree_skb(hdev->reassembly[i]);
2291
2292         cancel_work_sync(&hdev->power_on);
2293
2294         if (!test_bit(HCI_INIT, &hdev->flags) &&
2295             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2296                 hci_dev_lock(hdev);
2297                 mgmt_index_removed(hdev);
2298                 hci_dev_unlock(hdev);
2299         }
2300
2301         /* mgmt_index_removed should take care of emptying the
2302          * pending list */
2303         BUG_ON(!list_empty(&hdev->mgmt_pending));
2304
2305         hci_notify(hdev, HCI_DEV_UNREG);
2306
2307         if (hdev->rfkill) {
2308                 rfkill_unregister(hdev->rfkill);
2309                 rfkill_destroy(hdev->rfkill);
2310         }
2311
2312         hci_del_sysfs(hdev);
2313
2314         destroy_workqueue(hdev->workqueue);
2315         destroy_workqueue(hdev->req_workqueue);
2316
2317         hci_dev_lock(hdev);
2318         hci_blacklist_clear(hdev);
2319         hci_uuids_clear(hdev);
2320         hci_link_keys_clear(hdev);
2321         hci_smp_ltks_clear(hdev);
2322         hci_remote_oob_data_clear(hdev);
2323         hci_dev_unlock(hdev);
2324
2325         hci_dev_put(hdev);
2326
2327         ida_simple_remove(&hci_index_ida, id);
2328 }
2329 EXPORT_SYMBOL(hci_unregister_dev);
2330
2331 /* Suspend HCI device */
2332 int hci_suspend_dev(struct hci_dev *hdev)
2333 {
2334         hci_notify(hdev, HCI_DEV_SUSPEND);
2335         return 0;
2336 }
2337 EXPORT_SYMBOL(hci_suspend_dev);
2338
2339 /* Resume HCI device */
2340 int hci_resume_dev(struct hci_dev *hdev)
2341 {
2342         hci_notify(hdev, HCI_DEV_RESUME);
2343         return 0;
2344 }
2345 EXPORT_SYMBOL(hci_resume_dev);
2346
2347 /* Receive frame from HCI drivers */
2348 int hci_recv_frame(struct sk_buff *skb)
2349 {
2350         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2351         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2352                       && !test_bit(HCI_INIT, &hdev->flags))) {
2353                 kfree_skb(skb);
2354                 return -ENXIO;
2355         }
2356
2357         /* Incoming skb */
2358         bt_cb(skb)->incoming = 1;
2359
2360         /* Time stamp */
2361         __net_timestamp(skb);
2362
2363         skb_queue_tail(&hdev->rx_q, skb);
2364         queue_work(hdev->workqueue, &hdev->rx_work);
2365
2366         return 0;
2367 }
2368 EXPORT_SYMBOL(hci_recv_frame);
2369
2370 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2371                           int count, __u8 index)
2372 {
2373         int len = 0;
2374         int hlen = 0;
2375         int remain = count;
2376         struct sk_buff *skb;
2377         struct bt_skb_cb *scb;
2378
2379         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2380             index >= NUM_REASSEMBLY)
2381                 return -EILSEQ;
2382
2383         skb = hdev->reassembly[index];
2384
2385         if (!skb) {
2386                 switch (type) {
2387                 case HCI_ACLDATA_PKT:
2388                         len = HCI_MAX_FRAME_SIZE;
2389                         hlen = HCI_ACL_HDR_SIZE;
2390                         break;
2391                 case HCI_EVENT_PKT:
2392                         len = HCI_MAX_EVENT_SIZE;
2393                         hlen = HCI_EVENT_HDR_SIZE;
2394                         break;
2395                 case HCI_SCODATA_PKT:
2396                         len = HCI_MAX_SCO_SIZE;
2397                         hlen = HCI_SCO_HDR_SIZE;
2398                         break;
2399                 }
2400
2401                 skb = bt_skb_alloc(len, GFP_ATOMIC);
2402                 if (!skb)
2403                         return -ENOMEM;
2404
2405                 scb = (void *) skb->cb;
2406                 scb->expect = hlen;
2407                 scb->pkt_type = type;
2408
2409                 skb->dev = (void *) hdev;
2410                 hdev->reassembly[index] = skb;
2411         }
2412
2413         while (count) {
2414                 scb = (void *) skb->cb;
2415                 len = min_t(uint, scb->expect, count);
2416
2417                 memcpy(skb_put(skb, len), data, len);
2418
2419                 count -= len;
2420                 data += len;
2421                 scb->expect -= len;
2422                 remain = count;
2423
2424                 switch (type) {
2425                 case HCI_EVENT_PKT:
2426                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2427                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2428                                 scb->expect = h->plen;
2429
2430                                 if (skb_tailroom(skb) < scb->expect) {
2431                                         kfree_skb(skb);
2432                                         hdev->reassembly[index] = NULL;
2433                                         return -ENOMEM;
2434                                 }
2435                         }
2436                         break;
2437
2438                 case HCI_ACLDATA_PKT:
2439                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2440                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2441                                 scb->expect = __le16_to_cpu(h->dlen);
2442
2443                                 if (skb_tailroom(skb) < scb->expect) {
2444                                         kfree_skb(skb);
2445                                         hdev->reassembly[index] = NULL;
2446                                         return -ENOMEM;
2447                                 }
2448                         }
2449                         break;
2450
2451                 case HCI_SCODATA_PKT:
2452                         if (skb->len == HCI_SCO_HDR_SIZE) {
2453                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2454                                 scb->expect = h->dlen;
2455
2456                                 if (skb_tailroom(skb) < scb->expect) {
2457                                         kfree_skb(skb);
2458                                         hdev->reassembly[index] = NULL;
2459                                         return -ENOMEM;
2460                                 }
2461                         }
2462                         break;
2463                 }
2464
2465                 if (scb->expect == 0) {
2466                         /* Complete frame */
2467
2468                         bt_cb(skb)->pkt_type = type;
2469                         hci_recv_frame(skb);
2470
2471                         hdev->reassembly[index] = NULL;
2472                         return remain;
2473                 }
2474         }
2475
2476         return remain;
2477 }
2478
2479 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2480 {
2481         int rem = 0;
2482
2483         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2484                 return -EILSEQ;
2485
2486         while (count) {
2487                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2488                 if (rem < 0)
2489                         return rem;
2490
2491                 data += (count - rem);
2492                 count = rem;
2493         }
2494
2495         return rem;
2496 }
2497 EXPORT_SYMBOL(hci_recv_fragment);
2498
2499 #define STREAM_REASSEMBLY 0
2500
2501 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2502 {
2503         int type;
2504         int rem = 0;
2505
2506         while (count) {
2507                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2508
2509                 if (!skb) {
2510                         struct { char type; } *pkt;
2511
2512                         /* Start of the frame */
2513                         pkt = data;
2514                         type = pkt->type;
2515
2516                         data++;
2517                         count--;
2518                 } else
2519                         type = bt_cb(skb)->pkt_type;
2520
2521                 rem = hci_reassembly(hdev, type, data, count,
2522                                      STREAM_REASSEMBLY);
2523                 if (rem < 0)
2524                         return rem;
2525
2526                 data += (count - rem);
2527                 count = rem;
2528         }
2529
2530         return rem;
2531 }
2532 EXPORT_SYMBOL(hci_recv_stream_fragment);
2533
2534 /* ---- Interface to upper protocols ---- */
2535
2536 int hci_register_cb(struct hci_cb *cb)
2537 {
2538         BT_DBG("%p name %s", cb, cb->name);
2539
2540         write_lock(&hci_cb_list_lock);
2541         list_add(&cb->list, &hci_cb_list);
2542         write_unlock(&hci_cb_list_lock);
2543
2544         return 0;
2545 }
2546 EXPORT_SYMBOL(hci_register_cb);
2547
2548 int hci_unregister_cb(struct hci_cb *cb)
2549 {
2550         BT_DBG("%p name %s", cb, cb->name);
2551
2552         write_lock(&hci_cb_list_lock);
2553         list_del(&cb->list);
2554         write_unlock(&hci_cb_list_lock);
2555
2556         return 0;
2557 }
2558 EXPORT_SYMBOL(hci_unregister_cb);
2559
2560 static int hci_send_frame(struct sk_buff *skb)
2561 {
2562         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2563
2564         if (!hdev) {
2565                 kfree_skb(skb);
2566                 return -ENODEV;
2567         }
2568
2569         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2570
2571         /* Time stamp */
2572         __net_timestamp(skb);
2573
2574         /* Send copy to monitor */
2575         hci_send_to_monitor(hdev, skb);
2576
2577         if (atomic_read(&hdev->promisc)) {
2578                 /* Send copy to the sockets */
2579                 hci_send_to_sock(hdev, skb);
2580         }
2581
2582         /* Get rid of skb owner, prior to sending to the driver. */
2583         skb_orphan(skb);
2584
2585         return hdev->send(skb);
2586 }
2587
2588 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2589 {
2590         skb_queue_head_init(&req->cmd_q);
2591         req->hdev = hdev;
2592         req->err = 0;
2593 }
2594
2595 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2596 {
2597         struct hci_dev *hdev = req->hdev;
2598         struct sk_buff *skb;
2599         unsigned long flags;
2600
2601         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2602
2603         /* If an error occured during request building, remove all HCI
2604          * commands queued on the HCI request queue.
2605          */
2606         if (req->err) {
2607                 skb_queue_purge(&req->cmd_q);
2608                 return req->err;
2609         }
2610
2611         /* Do not allow empty requests */
2612         if (skb_queue_empty(&req->cmd_q))
2613                 return -ENODATA;
2614
2615         skb = skb_peek_tail(&req->cmd_q);
2616         bt_cb(skb)->req.complete = complete;
2617
2618         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2619         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2620         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2621
2622         queue_work(hdev->workqueue, &hdev->cmd_work);
2623
2624         return 0;
2625 }
2626
2627 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2628                                        u32 plen, const void *param)
2629 {
2630         int len = HCI_COMMAND_HDR_SIZE + plen;
2631         struct hci_command_hdr *hdr;
2632         struct sk_buff *skb;
2633
2634         skb = bt_skb_alloc(len, GFP_ATOMIC);
2635         if (!skb)
2636                 return NULL;
2637
2638         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2639         hdr->opcode = cpu_to_le16(opcode);
2640         hdr->plen   = plen;
2641
2642         if (plen)
2643                 memcpy(skb_put(skb, plen), param, plen);
2644
2645         BT_DBG("skb len %d", skb->len);
2646
2647         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2648         skb->dev = (void *) hdev;
2649
2650         return skb;
2651 }
2652
2653 /* Send HCI command */
2654 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2655                  const void *param)
2656 {
2657         struct sk_buff *skb;
2658
2659         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2660
2661         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2662         if (!skb) {
2663                 BT_ERR("%s no memory for command", hdev->name);
2664                 return -ENOMEM;
2665         }
2666
2667         /* Stand-alone HCI commands must be flaged as
2668          * single-command requests.
2669          */
2670         bt_cb(skb)->req.start = true;
2671
2672         skb_queue_tail(&hdev->cmd_q, skb);
2673         queue_work(hdev->workqueue, &hdev->cmd_work);
2674
2675         return 0;
2676 }
2677
2678 /* Queue a command to an asynchronous HCI request */
2679 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2680                     const void *param, u8 event)
2681 {
2682         struct hci_dev *hdev = req->hdev;
2683         struct sk_buff *skb;
2684
2685         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2686
2687         /* If an error occured during request building, there is no point in
2688          * queueing the HCI command. We can simply return.
2689          */
2690         if (req->err)
2691                 return;
2692
2693         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2694         if (!skb) {
2695                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2696                        hdev->name, opcode);
2697                 req->err = -ENOMEM;
2698                 return;
2699         }
2700
2701         if (skb_queue_empty(&req->cmd_q))
2702                 bt_cb(skb)->req.start = true;
2703
2704         bt_cb(skb)->req.event = event;
2705
2706         skb_queue_tail(&req->cmd_q, skb);
2707 }
2708
2709 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2710                  const void *param)
2711 {
2712         hci_req_add_ev(req, opcode, plen, param, 0);
2713 }
2714
2715 /* Get data from the previously sent command */
2716 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2717 {
2718         struct hci_command_hdr *hdr;
2719
2720         if (!hdev->sent_cmd)
2721                 return NULL;
2722
2723         hdr = (void *) hdev->sent_cmd->data;
2724
2725         if (hdr->opcode != cpu_to_le16(opcode))
2726                 return NULL;
2727
2728         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2729
2730         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2731 }
2732
2733 /* Send ACL data */
2734 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2735 {
2736         struct hci_acl_hdr *hdr;
2737         int len = skb->len;
2738
2739         skb_push(skb, HCI_ACL_HDR_SIZE);
2740         skb_reset_transport_header(skb);
2741         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2742         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2743         hdr->dlen   = cpu_to_le16(len);
2744 }
2745
2746 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2747                           struct sk_buff *skb, __u16 flags)
2748 {
2749         struct hci_conn *conn = chan->conn;
2750         struct hci_dev *hdev = conn->hdev;
2751         struct sk_buff *list;
2752
2753         skb->len = skb_headlen(skb);
2754         skb->data_len = 0;
2755
2756         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2757
2758         switch (hdev->dev_type) {
2759         case HCI_BREDR:
2760                 hci_add_acl_hdr(skb, conn->handle, flags);
2761                 break;
2762         case HCI_AMP:
2763                 hci_add_acl_hdr(skb, chan->handle, flags);
2764                 break;
2765         default:
2766                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2767                 return;
2768         }
2769
2770         list = skb_shinfo(skb)->frag_list;
2771         if (!list) {
2772                 /* Non fragmented */
2773                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2774
2775                 skb_queue_tail(queue, skb);
2776         } else {
2777                 /* Fragmented */
2778                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2779
2780                 skb_shinfo(skb)->frag_list = NULL;
2781
2782                 /* Queue all fragments atomically */
2783                 spin_lock(&queue->lock);
2784
2785                 __skb_queue_tail(queue, skb);
2786
2787                 flags &= ~ACL_START;
2788                 flags |= ACL_CONT;
2789                 do {
2790                         skb = list; list = list->next;
2791
2792                         skb->dev = (void *) hdev;
2793                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2794                         hci_add_acl_hdr(skb, conn->handle, flags);
2795
2796                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2797
2798                         __skb_queue_tail(queue, skb);
2799                 } while (list);
2800
2801                 spin_unlock(&queue->lock);
2802         }
2803 }
2804
2805 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2806 {
2807         struct hci_dev *hdev = chan->conn->hdev;
2808
2809         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2810
2811         skb->dev = (void *) hdev;
2812
2813         hci_queue_acl(chan, &chan->data_q, skb, flags);
2814
2815         queue_work(hdev->workqueue, &hdev->tx_work);
2816 }
2817
2818 /* Send SCO data */
2819 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2820 {
2821         struct hci_dev *hdev = conn->hdev;
2822         struct hci_sco_hdr hdr;
2823
2824         BT_DBG("%s len %d", hdev->name, skb->len);
2825
2826         hdr.handle = cpu_to_le16(conn->handle);
2827         hdr.dlen   = skb->len;
2828
2829         skb_push(skb, HCI_SCO_HDR_SIZE);
2830         skb_reset_transport_header(skb);
2831         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2832
2833         skb->dev = (void *) hdev;
2834         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2835
2836         skb_queue_tail(&conn->data_q, skb);
2837         queue_work(hdev->workqueue, &hdev->tx_work);
2838 }
2839
2840 /* ---- HCI TX task (outgoing data) ---- */
2841
2842 /* HCI Connection scheduler */
2843 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2844                                      int *quote)
2845 {
2846         struct hci_conn_hash *h = &hdev->conn_hash;
2847         struct hci_conn *conn = NULL, *c;
2848         unsigned int num = 0, min = ~0;
2849
2850         /* We don't have to lock device here. Connections are always
2851          * added and removed with TX task disabled. */
2852
2853         rcu_read_lock();
2854
2855         list_for_each_entry_rcu(c, &h->list, list) {
2856                 if (c->type != type || skb_queue_empty(&c->data_q))
2857                         continue;
2858
2859                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2860                         continue;
2861
2862                 num++;
2863
2864                 if (c->sent < min) {
2865                         min  = c->sent;
2866                         conn = c;
2867                 }
2868
2869                 if (hci_conn_num(hdev, type) == num)
2870                         break;
2871         }
2872
2873         rcu_read_unlock();
2874
2875         if (conn) {
2876                 int cnt, q;
2877
2878                 switch (conn->type) {
2879                 case ACL_LINK:
2880                         cnt = hdev->acl_cnt;
2881                         break;
2882                 case SCO_LINK:
2883                 case ESCO_LINK:
2884                         cnt = hdev->sco_cnt;
2885                         break;
2886                 case LE_LINK:
2887                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2888                         break;
2889                 default:
2890                         cnt = 0;
2891                         BT_ERR("Unknown link type");
2892                 }
2893
2894                 q = cnt / num;
2895                 *quote = q ? q : 1;
2896         } else
2897                 *quote = 0;
2898
2899         BT_DBG("conn %p quote %d", conn, *quote);
2900         return conn;
2901 }
2902
2903 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2904 {
2905         struct hci_conn_hash *h = &hdev->conn_hash;
2906         struct hci_conn *c;
2907
2908         BT_ERR("%s link tx timeout", hdev->name);
2909
2910         rcu_read_lock();
2911
2912         /* Kill stalled connections */
2913         list_for_each_entry_rcu(c, &h->list, list) {
2914                 if (c->type == type && c->sent) {
2915                         BT_ERR("%s killing stalled connection %pMR",
2916                                hdev->name, &c->dst);
2917                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2918                 }
2919         }
2920
2921         rcu_read_unlock();
2922 }
2923
2924 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2925                                       int *quote)
2926 {
2927         struct hci_conn_hash *h = &hdev->conn_hash;
2928         struct hci_chan *chan = NULL;
2929         unsigned int num = 0, min = ~0, cur_prio = 0;
2930         struct hci_conn *conn;
2931         int cnt, q, conn_num = 0;
2932
2933         BT_DBG("%s", hdev->name);
2934
2935         rcu_read_lock();
2936
2937         list_for_each_entry_rcu(conn, &h->list, list) {
2938                 struct hci_chan *tmp;
2939
2940                 if (conn->type != type)
2941                         continue;
2942
2943                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2944                         continue;
2945
2946                 conn_num++;
2947
2948                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2949                         struct sk_buff *skb;
2950
2951                         if (skb_queue_empty(&tmp->data_q))
2952                                 continue;
2953
2954                         skb = skb_peek(&tmp->data_q);
2955                         if (skb->priority < cur_prio)
2956                                 continue;
2957
2958                         if (skb->priority > cur_prio) {
2959                                 num = 0;
2960                                 min = ~0;
2961                                 cur_prio = skb->priority;
2962                         }
2963
2964                         num++;
2965
2966                         if (conn->sent < min) {
2967                                 min  = conn->sent;
2968                                 chan = tmp;
2969                         }
2970                 }
2971
2972                 if (hci_conn_num(hdev, type) == conn_num)
2973                         break;
2974         }
2975
2976         rcu_read_unlock();
2977
2978         if (!chan)
2979                 return NULL;
2980
2981         switch (chan->conn->type) {
2982         case ACL_LINK:
2983                 cnt = hdev->acl_cnt;
2984                 break;
2985         case AMP_LINK:
2986                 cnt = hdev->block_cnt;
2987                 break;
2988         case SCO_LINK:
2989         case ESCO_LINK:
2990                 cnt = hdev->sco_cnt;
2991                 break;
2992         case LE_LINK:
2993                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2994                 break;
2995         default:
2996                 cnt = 0;
2997                 BT_ERR("Unknown link type");
2998         }
2999
3000         q = cnt / num;
3001         *quote = q ? q : 1;
3002         BT_DBG("chan %p quote %d", chan, *quote);
3003         return chan;
3004 }
3005
3006 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3007 {
3008         struct hci_conn_hash *h = &hdev->conn_hash;
3009         struct hci_conn *conn;
3010         int num = 0;
3011
3012         BT_DBG("%s", hdev->name);
3013
3014         rcu_read_lock();
3015
3016         list_for_each_entry_rcu(conn, &h->list, list) {
3017                 struct hci_chan *chan;
3018
3019                 if (conn->type != type)
3020                         continue;
3021
3022                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3023                         continue;
3024
3025                 num++;
3026
3027                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3028                         struct sk_buff *skb;
3029
3030                         if (chan->sent) {
3031                                 chan->sent = 0;
3032                                 continue;
3033                         }
3034
3035                         if (skb_queue_empty(&chan->data_q))
3036                                 continue;
3037
3038                         skb = skb_peek(&chan->data_q);
3039                         if (skb->priority >= HCI_PRIO_MAX - 1)
3040                                 continue;
3041
3042                         skb->priority = HCI_PRIO_MAX - 1;
3043
3044                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3045                                skb->priority);
3046                 }
3047
3048                 if (hci_conn_num(hdev, type) == num)
3049                         break;
3050         }
3051
3052         rcu_read_unlock();
3053
3054 }
3055
3056 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3057 {
3058         /* Calculate count of blocks used by this packet */
3059         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3060 }
3061
3062 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3063 {
3064         if (!test_bit(HCI_RAW, &hdev->flags)) {
3065                 /* ACL tx timeout must be longer than maximum
3066                  * link supervision timeout (40.9 seconds) */
3067                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3068                                        HCI_ACL_TX_TIMEOUT))
3069                         hci_link_tx_to(hdev, ACL_LINK);
3070         }
3071 }
3072
3073 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3074 {
3075         unsigned int cnt = hdev->acl_cnt;
3076         struct hci_chan *chan;
3077         struct sk_buff *skb;
3078         int quote;
3079
3080         __check_timeout(hdev, cnt);
3081
3082         while (hdev->acl_cnt &&
3083                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3084                 u32 priority = (skb_peek(&chan->data_q))->priority;
3085                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3086                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3087                                skb->len, skb->priority);
3088
3089                         /* Stop if priority has changed */
3090                         if (skb->priority < priority)
3091                                 break;
3092
3093                         skb = skb_dequeue(&chan->data_q);
3094
3095                         hci_conn_enter_active_mode(chan->conn,
3096                                                    bt_cb(skb)->force_active);
3097
3098                         hci_send_frame(skb);
3099                         hdev->acl_last_tx = jiffies;
3100
3101                         hdev->acl_cnt--;
3102                         chan->sent++;
3103                         chan->conn->sent++;
3104                 }
3105         }
3106
3107         if (cnt != hdev->acl_cnt)
3108                 hci_prio_recalculate(hdev, ACL_LINK);
3109 }
3110
3111 static void hci_sched_acl_blk(struct hci_dev *hdev)
3112 {
3113         unsigned int cnt = hdev->block_cnt;
3114         struct hci_chan *chan;
3115         struct sk_buff *skb;
3116         int quote;
3117         u8 type;
3118
3119         __check_timeout(hdev, cnt);
3120
3121         BT_DBG("%s", hdev->name);
3122
3123         if (hdev->dev_type == HCI_AMP)
3124                 type = AMP_LINK;
3125         else
3126                 type = ACL_LINK;
3127
3128         while (hdev->block_cnt > 0 &&
3129                (chan = hci_chan_sent(hdev, type, &quote))) {
3130                 u32 priority = (skb_peek(&chan->data_q))->priority;
3131                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3132                         int blocks;
3133
3134                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3135                                skb->len, skb->priority);
3136
3137                         /* Stop if priority has changed */
3138                         if (skb->priority < priority)
3139                                 break;
3140
3141                         skb = skb_dequeue(&chan->data_q);
3142
3143                         blocks = __get_blocks(hdev, skb);
3144                         if (blocks > hdev->block_cnt)
3145                                 return;
3146
3147                         hci_conn_enter_active_mode(chan->conn,
3148                                                    bt_cb(skb)->force_active);
3149
3150                         hci_send_frame(skb);
3151                         hdev->acl_last_tx = jiffies;
3152
3153                         hdev->block_cnt -= blocks;
3154                         quote -= blocks;
3155
3156                         chan->sent += blocks;
3157                         chan->conn->sent += blocks;
3158                 }
3159         }
3160
3161         if (cnt != hdev->block_cnt)
3162                 hci_prio_recalculate(hdev, type);
3163 }
3164
3165 static void hci_sched_acl(struct hci_dev *hdev)
3166 {
3167         BT_DBG("%s", hdev->name);
3168
3169         /* No ACL link over BR/EDR controller */
3170         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3171                 return;
3172
3173         /* No AMP link over AMP controller */
3174         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3175                 return;
3176
3177         switch (hdev->flow_ctl_mode) {
3178         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3179                 hci_sched_acl_pkt(hdev);
3180                 break;
3181
3182         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3183                 hci_sched_acl_blk(hdev);
3184                 break;
3185         }
3186 }
3187
3188 /* Schedule SCO */
3189 static void hci_sched_sco(struct hci_dev *hdev)
3190 {
3191         struct hci_conn *conn;
3192         struct sk_buff *skb;
3193         int quote;
3194
3195         BT_DBG("%s", hdev->name);
3196
3197         if (!hci_conn_num(hdev, SCO_LINK))
3198                 return;
3199
3200         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3201                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3202                         BT_DBG("skb %p len %d", skb, skb->len);
3203                         hci_send_frame(skb);
3204
3205                         conn->sent++;
3206                         if (conn->sent == ~0)
3207                                 conn->sent = 0;
3208                 }
3209         }
3210 }
3211
3212 static void hci_sched_esco(struct hci_dev *hdev)
3213 {
3214         struct hci_conn *conn;
3215         struct sk_buff *skb;
3216         int quote;
3217
3218         BT_DBG("%s", hdev->name);
3219
3220         if (!hci_conn_num(hdev, ESCO_LINK))
3221                 return;
3222
3223         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3224                                                      &quote))) {
3225                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3226                         BT_DBG("skb %p len %d", skb, skb->len);
3227                         hci_send_frame(skb);
3228
3229                         conn->sent++;
3230                         if (conn->sent == ~0)
3231                                 conn->sent = 0;
3232                 }
3233         }
3234 }
3235
3236 static void hci_sched_le(struct hci_dev *hdev)
3237 {
3238         struct hci_chan *chan;
3239         struct sk_buff *skb;
3240         int quote, cnt, tmp;
3241
3242         BT_DBG("%s", hdev->name);
3243
3244         if (!hci_conn_num(hdev, LE_LINK))
3245                 return;
3246
3247         if (!test_bit(HCI_RAW, &hdev->flags)) {
3248                 /* LE tx timeout must be longer than maximum
3249                  * link supervision timeout (40.9 seconds) */
3250                 if (!hdev->le_cnt && hdev->le_pkts &&
3251                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3252                         hci_link_tx_to(hdev, LE_LINK);
3253         }
3254
3255         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3256         tmp = cnt;
3257         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3258                 u32 priority = (skb_peek(&chan->data_q))->priority;
3259                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3260                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3261                                skb->len, skb->priority);
3262
3263                         /* Stop if priority has changed */
3264                         if (skb->priority < priority)
3265                                 break;
3266
3267                         skb = skb_dequeue(&chan->data_q);
3268
3269                         hci_send_frame(skb);
3270                         hdev->le_last_tx = jiffies;
3271
3272                         cnt--;
3273                         chan->sent++;
3274                         chan->conn->sent++;
3275                 }
3276         }
3277
3278         if (hdev->le_pkts)
3279                 hdev->le_cnt = cnt;
3280         else
3281                 hdev->acl_cnt = cnt;
3282
3283         if (cnt != tmp)
3284                 hci_prio_recalculate(hdev, LE_LINK);
3285 }
3286
3287 static void hci_tx_work(struct work_struct *work)
3288 {
3289         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3290         struct sk_buff *skb;
3291
3292         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3293                hdev->sco_cnt, hdev->le_cnt);
3294
3295         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3296                 /* Schedule queues and send stuff to HCI driver */
3297                 hci_sched_acl(hdev);
3298                 hci_sched_sco(hdev);
3299                 hci_sched_esco(hdev);
3300                 hci_sched_le(hdev);
3301         }
3302
3303         /* Send next queued raw (unknown type) packet */
3304         while ((skb = skb_dequeue(&hdev->raw_q)))
3305                 hci_send_frame(skb);
3306 }
3307
3308 /* ----- HCI RX task (incoming data processing) ----- */
3309
3310 /* ACL data packet */
3311 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3312 {
3313         struct hci_acl_hdr *hdr = (void *) skb->data;
3314         struct hci_conn *conn;
3315         __u16 handle, flags;
3316
3317         skb_pull(skb, HCI_ACL_HDR_SIZE);
3318
3319         handle = __le16_to_cpu(hdr->handle);
3320         flags  = hci_flags(handle);
3321         handle = hci_handle(handle);
3322
3323         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3324                handle, flags);
3325
3326         hdev->stat.acl_rx++;
3327
3328         hci_dev_lock(hdev);
3329         conn = hci_conn_hash_lookup_handle(hdev, handle);
3330         hci_dev_unlock(hdev);
3331
3332         if (conn) {
3333                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3334
3335                 /* Send to upper protocol */
3336                 l2cap_recv_acldata(conn, skb, flags);
3337                 return;
3338         } else {
3339                 BT_ERR("%s ACL packet for unknown connection handle %d",
3340                        hdev->name, handle);
3341         }
3342
3343         kfree_skb(skb);
3344 }
3345
3346 /* SCO data packet */
3347 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3348 {
3349         struct hci_sco_hdr *hdr = (void *) skb->data;
3350         struct hci_conn *conn;
3351         __u16 handle;
3352
3353         skb_pull(skb, HCI_SCO_HDR_SIZE);
3354
3355         handle = __le16_to_cpu(hdr->handle);
3356
3357         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3358
3359         hdev->stat.sco_rx++;
3360
3361         hci_dev_lock(hdev);
3362         conn = hci_conn_hash_lookup_handle(hdev, handle);
3363         hci_dev_unlock(hdev);
3364
3365         if (conn) {
3366                 /* Send to upper protocol */
3367                 sco_recv_scodata(conn, skb);
3368                 return;
3369         } else {
3370                 BT_ERR("%s SCO packet for unknown connection handle %d",
3371                        hdev->name, handle);
3372         }
3373
3374         kfree_skb(skb);
3375 }
3376
3377 static bool hci_req_is_complete(struct hci_dev *hdev)
3378 {
3379         struct sk_buff *skb;
3380
3381         skb = skb_peek(&hdev->cmd_q);
3382         if (!skb)
3383                 return true;
3384
3385         return bt_cb(skb)->req.start;
3386 }
3387
3388 static void hci_resend_last(struct hci_dev *hdev)
3389 {
3390         struct hci_command_hdr *sent;
3391         struct sk_buff *skb;
3392         u16 opcode;
3393
3394         if (!hdev->sent_cmd)
3395                 return;
3396
3397         sent = (void *) hdev->sent_cmd->data;
3398         opcode = __le16_to_cpu(sent->opcode);
3399         if (opcode == HCI_OP_RESET)
3400                 return;
3401
3402         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3403         if (!skb)
3404                 return;
3405
3406         skb_queue_head(&hdev->cmd_q, skb);
3407         queue_work(hdev->workqueue, &hdev->cmd_work);
3408 }
3409
3410 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3411 {
3412         hci_req_complete_t req_complete = NULL;
3413         struct sk_buff *skb;
3414         unsigned long flags;
3415
3416         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3417
3418         /* If the completed command doesn't match the last one that was
3419          * sent we need to do special handling of it.
3420          */
3421         if (!hci_sent_cmd_data(hdev, opcode)) {
3422                 /* Some CSR based controllers generate a spontaneous
3423                  * reset complete event during init and any pending
3424                  * command will never be completed. In such a case we
3425                  * need to resend whatever was the last sent
3426                  * command.
3427                  */
3428                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3429                         hci_resend_last(hdev);
3430
3431                 return;
3432         }
3433
3434         /* If the command succeeded and there's still more commands in
3435          * this request the request is not yet complete.
3436          */
3437         if (!status && !hci_req_is_complete(hdev))
3438                 return;
3439
3440         /* If this was the last command in a request the complete
3441          * callback would be found in hdev->sent_cmd instead of the
3442          * command queue (hdev->cmd_q).
3443          */
3444         if (hdev->sent_cmd) {
3445                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3446
3447                 if (req_complete) {
3448                         /* We must set the complete callback to NULL to
3449                          * avoid calling the callback more than once if
3450                          * this function gets called again.
3451                          */
3452                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
3453
3454                         goto call_complete;
3455                 }
3456         }
3457
3458         /* Remove all pending commands belonging to this request */
3459         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3460         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3461                 if (bt_cb(skb)->req.start) {
3462                         __skb_queue_head(&hdev->cmd_q, skb);
3463                         break;
3464                 }
3465
3466                 req_complete = bt_cb(skb)->req.complete;
3467                 kfree_skb(skb);
3468         }
3469         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3470
3471 call_complete:
3472         if (req_complete)
3473                 req_complete(hdev, status);
3474 }
3475
3476 static void hci_rx_work(struct work_struct *work)
3477 {
3478         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3479         struct sk_buff *skb;
3480
3481         BT_DBG("%s", hdev->name);
3482
3483         while ((skb = skb_dequeue(&hdev->rx_q))) {
3484                 /* Send copy to monitor */
3485                 hci_send_to_monitor(hdev, skb);
3486
3487                 if (atomic_read(&hdev->promisc)) {
3488                         /* Send copy to the sockets */
3489                         hci_send_to_sock(hdev, skb);
3490                 }
3491
3492                 if (test_bit(HCI_RAW, &hdev->flags) ||
3493                     test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3494                         kfree_skb(skb);
3495                         continue;
3496                 }
3497
3498                 if (test_bit(HCI_INIT, &hdev->flags)) {
3499                         /* Don't process data packets in this states. */
3500                         switch (bt_cb(skb)->pkt_type) {
3501                         case HCI_ACLDATA_PKT:
3502                         case HCI_SCODATA_PKT:
3503                                 kfree_skb(skb);
3504                                 continue;
3505                         }
3506                 }
3507
3508                 /* Process frame */
3509                 switch (bt_cb(skb)->pkt_type) {
3510                 case HCI_EVENT_PKT:
3511                         BT_DBG("%s Event packet", hdev->name);
3512                         hci_event_packet(hdev, skb);
3513                         break;
3514
3515                 case HCI_ACLDATA_PKT:
3516                         BT_DBG("%s ACL data packet", hdev->name);
3517                         hci_acldata_packet(hdev, skb);
3518                         break;
3519
3520                 case HCI_SCODATA_PKT:
3521                         BT_DBG("%s SCO data packet", hdev->name);
3522                         hci_scodata_packet(hdev, skb);
3523                         break;
3524
3525                 default:
3526                         kfree_skb(skb);
3527                         break;
3528                 }
3529         }
3530 }
3531
3532 static void hci_cmd_work(struct work_struct *work)
3533 {
3534         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3535         struct sk_buff *skb;
3536
3537         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3538                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3539
3540         /* Send queued commands */
3541         if (atomic_read(&hdev->cmd_cnt)) {
3542                 skb = skb_dequeue(&hdev->cmd_q);
3543                 if (!skb)
3544                         return;
3545
3546                 kfree_skb(hdev->sent_cmd);
3547
3548                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
3549                 if (hdev->sent_cmd) {
3550                         atomic_dec(&hdev->cmd_cnt);
3551                         hci_send_frame(skb);
3552                         if (test_bit(HCI_RESET, &hdev->flags))
3553                                 del_timer(&hdev->cmd_timer);
3554                         else
3555                                 mod_timer(&hdev->cmd_timer,
3556                                           jiffies + HCI_CMD_TIMEOUT);
3557                 } else {
3558                         skb_queue_head(&hdev->cmd_q, skb);
3559                         queue_work(hdev->workqueue, &hdev->cmd_work);
3560                 }
3561         }
3562 }
3563
3564 u8 bdaddr_to_le(u8 bdaddr_type)
3565 {
3566         switch (bdaddr_type) {
3567         case BDADDR_LE_PUBLIC:
3568                 return ADDR_LE_DEV_PUBLIC;
3569
3570         default:
3571                 /* Fallback to LE Random address type */
3572                 return ADDR_LE_DEV_RANDOM;
3573         }
3574 }