8f70a35b4d0eb29ef82c411d89a662d1acc877f0
[cascardo/linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55         hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI requests ---- */
59
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
61 {
62         BT_DBG("%s result 0x%2.2x", hdev->name, result);
63
64         if (hdev->req_status == HCI_REQ_PEND) {
65                 hdev->req_result = result;
66                 hdev->req_status = HCI_REQ_DONE;
67                 wake_up_interruptible(&hdev->req_wait_q);
68         }
69 }
70
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
72 {
73         BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75         if (hdev->req_status == HCI_REQ_PEND) {
76                 hdev->req_result = err;
77                 hdev->req_status = HCI_REQ_CANCELED;
78                 wake_up_interruptible(&hdev->req_wait_q);
79         }
80 }
81
82 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83                                             u8 event)
84 {
85         struct hci_ev_cmd_complete *ev;
86         struct hci_event_hdr *hdr;
87         struct sk_buff *skb;
88
89         hci_dev_lock(hdev);
90
91         skb = hdev->recv_evt;
92         hdev->recv_evt = NULL;
93
94         hci_dev_unlock(hdev);
95
96         if (!skb)
97                 return ERR_PTR(-ENODATA);
98
99         if (skb->len < sizeof(*hdr)) {
100                 BT_ERR("Too short HCI event");
101                 goto failed;
102         }
103
104         hdr = (void *) skb->data;
105         skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
107         if (event) {
108                 if (hdr->evt != event)
109                         goto failed;
110                 return skb;
111         }
112
113         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115                 goto failed;
116         }
117
118         if (skb->len < sizeof(*ev)) {
119                 BT_ERR("Too short cmd_complete event");
120                 goto failed;
121         }
122
123         ev = (void *) skb->data;
124         skb_pull(skb, sizeof(*ev));
125
126         if (opcode == __le16_to_cpu(ev->opcode))
127                 return skb;
128
129         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130                __le16_to_cpu(ev->opcode));
131
132 failed:
133         kfree_skb(skb);
134         return ERR_PTR(-ENODATA);
135 }
136
137 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
138                                   const void *param, u8 event, u32 timeout)
139 {
140         DECLARE_WAITQUEUE(wait, current);
141         struct hci_request req;
142         int err = 0;
143
144         BT_DBG("%s", hdev->name);
145
146         hci_req_init(&req, hdev);
147
148         hci_req_add_ev(&req, opcode, plen, param, event);
149
150         hdev->req_status = HCI_REQ_PEND;
151
152         err = hci_req_run(&req, hci_req_sync_complete);
153         if (err < 0)
154                 return ERR_PTR(err);
155
156         add_wait_queue(&hdev->req_wait_q, &wait);
157         set_current_state(TASK_INTERRUPTIBLE);
158
159         schedule_timeout(timeout);
160
161         remove_wait_queue(&hdev->req_wait_q, &wait);
162
163         if (signal_pending(current))
164                 return ERR_PTR(-EINTR);
165
166         switch (hdev->req_status) {
167         case HCI_REQ_DONE:
168                 err = -bt_to_errno(hdev->req_result);
169                 break;
170
171         case HCI_REQ_CANCELED:
172                 err = -hdev->req_result;
173                 break;
174
175         default:
176                 err = -ETIMEDOUT;
177                 break;
178         }
179
180         hdev->req_status = hdev->req_result = 0;
181
182         BT_DBG("%s end: err %d", hdev->name, err);
183
184         if (err < 0)
185                 return ERR_PTR(err);
186
187         return hci_get_cmd_complete(hdev, opcode, event);
188 }
189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
192                                const void *param, u32 timeout)
193 {
194         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
195 }
196 EXPORT_SYMBOL(__hci_cmd_sync);
197
198 /* Execute request and wait for completion. */
199 static int __hci_req_sync(struct hci_dev *hdev,
200                           void (*func)(struct hci_request *req,
201                                       unsigned long opt),
202                           unsigned long opt, __u32 timeout)
203 {
204         struct hci_request req;
205         DECLARE_WAITQUEUE(wait, current);
206         int err = 0;
207
208         BT_DBG("%s start", hdev->name);
209
210         hci_req_init(&req, hdev);
211
212         hdev->req_status = HCI_REQ_PEND;
213
214         func(&req, opt);
215
216         err = hci_req_run(&req, hci_req_sync_complete);
217         if (err < 0) {
218                 hdev->req_status = 0;
219
220                 /* ENODATA means the HCI request command queue is empty.
221                  * This can happen when a request with conditionals doesn't
222                  * trigger any commands to be sent. This is normal behavior
223                  * and should not trigger an error return.
224                  */
225                 if (err == -ENODATA)
226                         return 0;
227
228                 return err;
229         }
230
231         add_wait_queue(&hdev->req_wait_q, &wait);
232         set_current_state(TASK_INTERRUPTIBLE);
233
234         schedule_timeout(timeout);
235
236         remove_wait_queue(&hdev->req_wait_q, &wait);
237
238         if (signal_pending(current))
239                 return -EINTR;
240
241         switch (hdev->req_status) {
242         case HCI_REQ_DONE:
243                 err = -bt_to_errno(hdev->req_result);
244                 break;
245
246         case HCI_REQ_CANCELED:
247                 err = -hdev->req_result;
248                 break;
249
250         default:
251                 err = -ETIMEDOUT;
252                 break;
253         }
254
255         hdev->req_status = hdev->req_result = 0;
256
257         BT_DBG("%s end: err %d", hdev->name, err);
258
259         return err;
260 }
261
262 static int hci_req_sync(struct hci_dev *hdev,
263                         void (*req)(struct hci_request *req,
264                                     unsigned long opt),
265                         unsigned long opt, __u32 timeout)
266 {
267         int ret;
268
269         if (!test_bit(HCI_UP, &hdev->flags))
270                 return -ENETDOWN;
271
272         /* Serialize all requests */
273         hci_req_lock(hdev);
274         ret = __hci_req_sync(hdev, req, opt, timeout);
275         hci_req_unlock(hdev);
276
277         return ret;
278 }
279
280 static void hci_reset_req(struct hci_request *req, unsigned long opt)
281 {
282         BT_DBG("%s %ld", req->hdev->name, opt);
283
284         /* Reset device */
285         set_bit(HCI_RESET, &req->hdev->flags);
286         hci_req_add(req, HCI_OP_RESET, 0, NULL);
287 }
288
289 static void bredr_init(struct hci_request *req)
290 {
291         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
292
293         /* Read Local Supported Features */
294         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
295
296         /* Read Local Version */
297         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
298
299         /* Read BD Address */
300         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
301 }
302
303 static void amp_init(struct hci_request *req)
304 {
305         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
306
307         /* Read Local Version */
308         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
309
310         /* Read Local Supported Commands */
311         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
312
313         /* Read Local Supported Features */
314         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
315
316         /* Read Local AMP Info */
317         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
318
319         /* Read Data Blk size */
320         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
321
322         /* Read Flow Control Mode */
323         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
324
325         /* Read Location Data */
326         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
327 }
328
329 static void hci_init1_req(struct hci_request *req, unsigned long opt)
330 {
331         struct hci_dev *hdev = req->hdev;
332
333         BT_DBG("%s %ld", hdev->name, opt);
334
335         /* Reset */
336         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
337                 hci_reset_req(req, 0);
338
339         switch (hdev->dev_type) {
340         case HCI_BREDR:
341                 bredr_init(req);
342                 break;
343
344         case HCI_AMP:
345                 amp_init(req);
346                 break;
347
348         default:
349                 BT_ERR("Unknown device type %d", hdev->dev_type);
350                 break;
351         }
352 }
353
354 static void bredr_setup(struct hci_request *req)
355 {
356         __le16 param;
357         __u8 flt_type;
358
359         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
360         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
361
362         /* Read Class of Device */
363         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
364
365         /* Read Local Name */
366         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
367
368         /* Read Voice Setting */
369         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
370
371         /* Clear Event Filters */
372         flt_type = HCI_FLT_CLEAR_ALL;
373         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
374
375         /* Connection accept timeout ~20 secs */
376         param = __constant_cpu_to_le16(0x7d00);
377         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
378
379         /* Read page scan parameters */
380         if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
381                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
382                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
383         }
384 }
385
386 static void le_setup(struct hci_request *req)
387 {
388         struct hci_dev *hdev = req->hdev;
389
390         /* Read LE Buffer Size */
391         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
392
393         /* Read LE Local Supported Features */
394         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
395
396         /* Read LE Advertising Channel TX Power */
397         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
398
399         /* Read LE White List Size */
400         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
401
402         /* Read LE Supported States */
403         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
404
405         /* LE-only controllers have LE implicitly enabled */
406         if (!lmp_bredr_capable(hdev))
407                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
408 }
409
410 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
411 {
412         if (lmp_ext_inq_capable(hdev))
413                 return 0x02;
414
415         if (lmp_inq_rssi_capable(hdev))
416                 return 0x01;
417
418         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
419             hdev->lmp_subver == 0x0757)
420                 return 0x01;
421
422         if (hdev->manufacturer == 15) {
423                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
424                         return 0x01;
425                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
426                         return 0x01;
427                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
428                         return 0x01;
429         }
430
431         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
432             hdev->lmp_subver == 0x1805)
433                 return 0x01;
434
435         return 0x00;
436 }
437
438 static void hci_setup_inquiry_mode(struct hci_request *req)
439 {
440         u8 mode;
441
442         mode = hci_get_inquiry_mode(req->hdev);
443
444         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
445 }
446
447 static void hci_setup_event_mask(struct hci_request *req)
448 {
449         struct hci_dev *hdev = req->hdev;
450
451         /* The second byte is 0xff instead of 0x9f (two reserved bits
452          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
453          * command otherwise.
454          */
455         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
456
457         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
458          * any event mask for pre 1.2 devices.
459          */
460         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
461                 return;
462
463         if (lmp_bredr_capable(hdev)) {
464                 events[4] |= 0x01; /* Flow Specification Complete */
465                 events[4] |= 0x02; /* Inquiry Result with RSSI */
466                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
467                 events[5] |= 0x08; /* Synchronous Connection Complete */
468                 events[5] |= 0x10; /* Synchronous Connection Changed */
469         } else {
470                 /* Use a different default for LE-only devices */
471                 memset(events, 0, sizeof(events));
472                 events[0] |= 0x10; /* Disconnection Complete */
473                 events[0] |= 0x80; /* Encryption Change */
474                 events[1] |= 0x08; /* Read Remote Version Information Complete */
475                 events[1] |= 0x20; /* Command Complete */
476                 events[1] |= 0x40; /* Command Status */
477                 events[1] |= 0x80; /* Hardware Error */
478                 events[2] |= 0x04; /* Number of Completed Packets */
479                 events[3] |= 0x02; /* Data Buffer Overflow */
480                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
481         }
482
483         if (lmp_inq_rssi_capable(hdev))
484                 events[4] |= 0x02; /* Inquiry Result with RSSI */
485
486         if (lmp_sniffsubr_capable(hdev))
487                 events[5] |= 0x20; /* Sniff Subrating */
488
489         if (lmp_pause_enc_capable(hdev))
490                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
491
492         if (lmp_ext_inq_capable(hdev))
493                 events[5] |= 0x40; /* Extended Inquiry Result */
494
495         if (lmp_no_flush_capable(hdev))
496                 events[7] |= 0x01; /* Enhanced Flush Complete */
497
498         if (lmp_lsto_capable(hdev))
499                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
500
501         if (lmp_ssp_capable(hdev)) {
502                 events[6] |= 0x01;      /* IO Capability Request */
503                 events[6] |= 0x02;      /* IO Capability Response */
504                 events[6] |= 0x04;      /* User Confirmation Request */
505                 events[6] |= 0x08;      /* User Passkey Request */
506                 events[6] |= 0x10;      /* Remote OOB Data Request */
507                 events[6] |= 0x20;      /* Simple Pairing Complete */
508                 events[7] |= 0x04;      /* User Passkey Notification */
509                 events[7] |= 0x08;      /* Keypress Notification */
510                 events[7] |= 0x10;      /* Remote Host Supported
511                                          * Features Notification
512                                          */
513         }
514
515         if (lmp_le_capable(hdev))
516                 events[7] |= 0x20;      /* LE Meta-Event */
517
518         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
519
520         if (lmp_le_capable(hdev)) {
521                 memset(events, 0, sizeof(events));
522                 events[0] = 0x1f;
523                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
524                             sizeof(events), events);
525         }
526 }
527
528 static void hci_init2_req(struct hci_request *req, unsigned long opt)
529 {
530         struct hci_dev *hdev = req->hdev;
531
532         if (lmp_bredr_capable(hdev))
533                 bredr_setup(req);
534         else
535                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
536
537         if (lmp_le_capable(hdev))
538                 le_setup(req);
539
540         hci_setup_event_mask(req);
541
542         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
543          * local supported commands HCI command.
544          */
545         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
546                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
547
548         if (lmp_ssp_capable(hdev)) {
549                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
550                         u8 mode = 0x01;
551                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
552                                     sizeof(mode), &mode);
553                 } else {
554                         struct hci_cp_write_eir cp;
555
556                         memset(hdev->eir, 0, sizeof(hdev->eir));
557                         memset(&cp, 0, sizeof(cp));
558
559                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
560                 }
561         }
562
563         if (lmp_inq_rssi_capable(hdev))
564                 hci_setup_inquiry_mode(req);
565
566         if (lmp_inq_tx_pwr_capable(hdev))
567                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
568
569         if (lmp_ext_feat_capable(hdev)) {
570                 struct hci_cp_read_local_ext_features cp;
571
572                 cp.page = 0x01;
573                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
574                             sizeof(cp), &cp);
575         }
576
577         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
578                 u8 enable = 1;
579                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
580                             &enable);
581         }
582 }
583
584 static void hci_setup_link_policy(struct hci_request *req)
585 {
586         struct hci_dev *hdev = req->hdev;
587         struct hci_cp_write_def_link_policy cp;
588         u16 link_policy = 0;
589
590         if (lmp_rswitch_capable(hdev))
591                 link_policy |= HCI_LP_RSWITCH;
592         if (lmp_hold_capable(hdev))
593                 link_policy |= HCI_LP_HOLD;
594         if (lmp_sniff_capable(hdev))
595                 link_policy |= HCI_LP_SNIFF;
596         if (lmp_park_capable(hdev))
597                 link_policy |= HCI_LP_PARK;
598
599         cp.policy = cpu_to_le16(link_policy);
600         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
601 }
602
603 static void hci_set_le_support(struct hci_request *req)
604 {
605         struct hci_dev *hdev = req->hdev;
606         struct hci_cp_write_le_host_supported cp;
607
608         /* LE-only devices do not support explicit enablement */
609         if (!lmp_bredr_capable(hdev))
610                 return;
611
612         memset(&cp, 0, sizeof(cp));
613
614         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
615                 cp.le = 0x01;
616                 cp.simul = lmp_le_br_capable(hdev);
617         }
618
619         if (cp.le != lmp_host_le_capable(hdev))
620                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
621                             &cp);
622 }
623
624 static void hci_set_event_mask_page_2(struct hci_request *req)
625 {
626         struct hci_dev *hdev = req->hdev;
627         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
628
629         /* If Connectionless Slave Broadcast master role is supported
630          * enable all necessary events for it.
631          */
632         if (hdev->features[2][0] & 0x01) {
633                 events[1] |= 0x40;      /* Triggered Clock Capture */
634                 events[1] |= 0x80;      /* Synchronization Train Complete */
635                 events[2] |= 0x10;      /* Slave Page Response Timeout */
636                 events[2] |= 0x20;      /* CSB Channel Map Change */
637         }
638
639         /* If Connectionless Slave Broadcast slave role is supported
640          * enable all necessary events for it.
641          */
642         if (hdev->features[2][0] & 0x02) {
643                 events[2] |= 0x01;      /* Synchronization Train Received */
644                 events[2] |= 0x02;      /* CSB Receive */
645                 events[2] |= 0x04;      /* CSB Timeout */
646                 events[2] |= 0x08;      /* Truncated Page Complete */
647         }
648
649         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
650 }
651
652 static void hci_init3_req(struct hci_request *req, unsigned long opt)
653 {
654         struct hci_dev *hdev = req->hdev;
655         u8 p;
656
657         /* Some Broadcom based Bluetooth controllers do not support the
658          * Delete Stored Link Key command. They are clearly indicating its
659          * absence in the bit mask of supported commands.
660          *
661          * Check the supported commands and only if the the command is marked
662          * as supported send it. If not supported assume that the controller
663          * does not have actual support for stored link keys which makes this
664          * command redundant anyway.
665          */
666         if (hdev->commands[6] & 0x80) {
667                 struct hci_cp_delete_stored_link_key cp;
668
669                 bacpy(&cp.bdaddr, BDADDR_ANY);
670                 cp.delete_all = 0x01;
671                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
672                             sizeof(cp), &cp);
673         }
674
675         if (hdev->commands[5] & 0x10)
676                 hci_setup_link_policy(req);
677
678         if (lmp_le_capable(hdev)) {
679                 hci_set_le_support(req);
680                 hci_update_ad(req);
681         }
682
683         /* Read features beyond page 1 if available */
684         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
685                 struct hci_cp_read_local_ext_features cp;
686
687                 cp.page = p;
688                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
689                             sizeof(cp), &cp);
690         }
691 }
692
693 static void hci_init4_req(struct hci_request *req, unsigned long opt)
694 {
695         struct hci_dev *hdev = req->hdev;
696
697         /* Set event mask page 2 if the HCI command for it is supported */
698         if (hdev->commands[22] & 0x04)
699                 hci_set_event_mask_page_2(req);
700
701         /* Check for Synchronization Train support */
702         if (hdev->features[2][0] & 0x04)
703                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
704 }
705
706 static int __hci_init(struct hci_dev *hdev)
707 {
708         int err;
709
710         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
711         if (err < 0)
712                 return err;
713
714         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
715          * BR/EDR/LE type controllers. AMP controllers only need the
716          * first stage init.
717          */
718         if (hdev->dev_type != HCI_BREDR)
719                 return 0;
720
721         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
722         if (err < 0)
723                 return err;
724
725         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
726         if (err < 0)
727                 return err;
728
729         return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
730 }
731
732 static void hci_scan_req(struct hci_request *req, unsigned long opt)
733 {
734         __u8 scan = opt;
735
736         BT_DBG("%s %x", req->hdev->name, scan);
737
738         /* Inquiry and Page scans */
739         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
740 }
741
742 static void hci_auth_req(struct hci_request *req, unsigned long opt)
743 {
744         __u8 auth = opt;
745
746         BT_DBG("%s %x", req->hdev->name, auth);
747
748         /* Authentication */
749         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
750 }
751
752 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
753 {
754         __u8 encrypt = opt;
755
756         BT_DBG("%s %x", req->hdev->name, encrypt);
757
758         /* Encryption */
759         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
760 }
761
762 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
763 {
764         __le16 policy = cpu_to_le16(opt);
765
766         BT_DBG("%s %x", req->hdev->name, policy);
767
768         /* Default link policy */
769         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
770 }
771
772 /* Get HCI device by index.
773  * Device is held on return. */
774 struct hci_dev *hci_dev_get(int index)
775 {
776         struct hci_dev *hdev = NULL, *d;
777
778         BT_DBG("%d", index);
779
780         if (index < 0)
781                 return NULL;
782
783         read_lock(&hci_dev_list_lock);
784         list_for_each_entry(d, &hci_dev_list, list) {
785                 if (d->id == index) {
786                         hdev = hci_dev_hold(d);
787                         break;
788                 }
789         }
790         read_unlock(&hci_dev_list_lock);
791         return hdev;
792 }
793
794 /* ---- Inquiry support ---- */
795
796 bool hci_discovery_active(struct hci_dev *hdev)
797 {
798         struct discovery_state *discov = &hdev->discovery;
799
800         switch (discov->state) {
801         case DISCOVERY_FINDING:
802         case DISCOVERY_RESOLVING:
803                 return true;
804
805         default:
806                 return false;
807         }
808 }
809
810 void hci_discovery_set_state(struct hci_dev *hdev, int state)
811 {
812         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
813
814         if (hdev->discovery.state == state)
815                 return;
816
817         switch (state) {
818         case DISCOVERY_STOPPED:
819                 if (hdev->discovery.state != DISCOVERY_STARTING)
820                         mgmt_discovering(hdev, 0);
821                 break;
822         case DISCOVERY_STARTING:
823                 break;
824         case DISCOVERY_FINDING:
825                 mgmt_discovering(hdev, 1);
826                 break;
827         case DISCOVERY_RESOLVING:
828                 break;
829         case DISCOVERY_STOPPING:
830                 break;
831         }
832
833         hdev->discovery.state = state;
834 }
835
836 void hci_inquiry_cache_flush(struct hci_dev *hdev)
837 {
838         struct discovery_state *cache = &hdev->discovery;
839         struct inquiry_entry *p, *n;
840
841         list_for_each_entry_safe(p, n, &cache->all, all) {
842                 list_del(&p->all);
843                 kfree(p);
844         }
845
846         INIT_LIST_HEAD(&cache->unknown);
847         INIT_LIST_HEAD(&cache->resolve);
848 }
849
850 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
851                                                bdaddr_t *bdaddr)
852 {
853         struct discovery_state *cache = &hdev->discovery;
854         struct inquiry_entry *e;
855
856         BT_DBG("cache %p, %pMR", cache, bdaddr);
857
858         list_for_each_entry(e, &cache->all, all) {
859                 if (!bacmp(&e->data.bdaddr, bdaddr))
860                         return e;
861         }
862
863         return NULL;
864 }
865
866 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
867                                                        bdaddr_t *bdaddr)
868 {
869         struct discovery_state *cache = &hdev->discovery;
870         struct inquiry_entry *e;
871
872         BT_DBG("cache %p, %pMR", cache, bdaddr);
873
874         list_for_each_entry(e, &cache->unknown, list) {
875                 if (!bacmp(&e->data.bdaddr, bdaddr))
876                         return e;
877         }
878
879         return NULL;
880 }
881
882 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
883                                                        bdaddr_t *bdaddr,
884                                                        int state)
885 {
886         struct discovery_state *cache = &hdev->discovery;
887         struct inquiry_entry *e;
888
889         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
890
891         list_for_each_entry(e, &cache->resolve, list) {
892                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
893                         return e;
894                 if (!bacmp(&e->data.bdaddr, bdaddr))
895                         return e;
896         }
897
898         return NULL;
899 }
900
901 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
902                                       struct inquiry_entry *ie)
903 {
904         struct discovery_state *cache = &hdev->discovery;
905         struct list_head *pos = &cache->resolve;
906         struct inquiry_entry *p;
907
908         list_del(&ie->list);
909
910         list_for_each_entry(p, &cache->resolve, list) {
911                 if (p->name_state != NAME_PENDING &&
912                     abs(p->data.rssi) >= abs(ie->data.rssi))
913                         break;
914                 pos = &p->list;
915         }
916
917         list_add(&ie->list, pos);
918 }
919
920 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
921                               bool name_known, bool *ssp)
922 {
923         struct discovery_state *cache = &hdev->discovery;
924         struct inquiry_entry *ie;
925
926         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
927
928         hci_remove_remote_oob_data(hdev, &data->bdaddr);
929
930         if (ssp)
931                 *ssp = data->ssp_mode;
932
933         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
934         if (ie) {
935                 if (ie->data.ssp_mode && ssp)
936                         *ssp = true;
937
938                 if (ie->name_state == NAME_NEEDED &&
939                     data->rssi != ie->data.rssi) {
940                         ie->data.rssi = data->rssi;
941                         hci_inquiry_cache_update_resolve(hdev, ie);
942                 }
943
944                 goto update;
945         }
946
947         /* Entry not in the cache. Add new one. */
948         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
949         if (!ie)
950                 return false;
951
952         list_add(&ie->all, &cache->all);
953
954         if (name_known) {
955                 ie->name_state = NAME_KNOWN;
956         } else {
957                 ie->name_state = NAME_NOT_KNOWN;
958                 list_add(&ie->list, &cache->unknown);
959         }
960
961 update:
962         if (name_known && ie->name_state != NAME_KNOWN &&
963             ie->name_state != NAME_PENDING) {
964                 ie->name_state = NAME_KNOWN;
965                 list_del(&ie->list);
966         }
967
968         memcpy(&ie->data, data, sizeof(*data));
969         ie->timestamp = jiffies;
970         cache->timestamp = jiffies;
971
972         if (ie->name_state == NAME_NOT_KNOWN)
973                 return false;
974
975         return true;
976 }
977
978 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
979 {
980         struct discovery_state *cache = &hdev->discovery;
981         struct inquiry_info *info = (struct inquiry_info *) buf;
982         struct inquiry_entry *e;
983         int copied = 0;
984
985         list_for_each_entry(e, &cache->all, all) {
986                 struct inquiry_data *data = &e->data;
987
988                 if (copied >= num)
989                         break;
990
991                 bacpy(&info->bdaddr, &data->bdaddr);
992                 info->pscan_rep_mode    = data->pscan_rep_mode;
993                 info->pscan_period_mode = data->pscan_period_mode;
994                 info->pscan_mode        = data->pscan_mode;
995                 memcpy(info->dev_class, data->dev_class, 3);
996                 info->clock_offset      = data->clock_offset;
997
998                 info++;
999                 copied++;
1000         }
1001
1002         BT_DBG("cache %p, copied %d", cache, copied);
1003         return copied;
1004 }
1005
1006 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1007 {
1008         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1009         struct hci_dev *hdev = req->hdev;
1010         struct hci_cp_inquiry cp;
1011
1012         BT_DBG("%s", hdev->name);
1013
1014         if (test_bit(HCI_INQUIRY, &hdev->flags))
1015                 return;
1016
1017         /* Start Inquiry */
1018         memcpy(&cp.lap, &ir->lap, 3);
1019         cp.length  = ir->length;
1020         cp.num_rsp = ir->num_rsp;
1021         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1022 }
1023
1024 static int wait_inquiry(void *word)
1025 {
1026         schedule();
1027         return signal_pending(current);
1028 }
1029
1030 int hci_inquiry(void __user *arg)
1031 {
1032         __u8 __user *ptr = arg;
1033         struct hci_inquiry_req ir;
1034         struct hci_dev *hdev;
1035         int err = 0, do_inquiry = 0, max_rsp;
1036         long timeo;
1037         __u8 *buf;
1038
1039         if (copy_from_user(&ir, ptr, sizeof(ir)))
1040                 return -EFAULT;
1041
1042         hdev = hci_dev_get(ir.dev_id);
1043         if (!hdev)
1044                 return -ENODEV;
1045
1046         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1047                 err = -EBUSY;
1048                 goto done;
1049         }
1050
1051         if (hdev->dev_type != HCI_BREDR) {
1052                 err = -EOPNOTSUPP;
1053                 goto done;
1054         }
1055
1056         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1057                 err = -EOPNOTSUPP;
1058                 goto done;
1059         }
1060
1061         hci_dev_lock(hdev);
1062         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1063             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1064                 hci_inquiry_cache_flush(hdev);
1065                 do_inquiry = 1;
1066         }
1067         hci_dev_unlock(hdev);
1068
1069         timeo = ir.length * msecs_to_jiffies(2000);
1070
1071         if (do_inquiry) {
1072                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1073                                    timeo);
1074                 if (err < 0)
1075                         goto done;
1076
1077                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1078                  * cleared). If it is interrupted by a signal, return -EINTR.
1079                  */
1080                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1081                                 TASK_INTERRUPTIBLE))
1082                         return -EINTR;
1083         }
1084
1085         /* for unlimited number of responses we will use buffer with
1086          * 255 entries
1087          */
1088         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1089
1090         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1091          * copy it to the user space.
1092          */
1093         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1094         if (!buf) {
1095                 err = -ENOMEM;
1096                 goto done;
1097         }
1098
1099         hci_dev_lock(hdev);
1100         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1101         hci_dev_unlock(hdev);
1102
1103         BT_DBG("num_rsp %d", ir.num_rsp);
1104
1105         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1106                 ptr += sizeof(ir);
1107                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1108                                  ir.num_rsp))
1109                         err = -EFAULT;
1110         } else
1111                 err = -EFAULT;
1112
1113         kfree(buf);
1114
1115 done:
1116         hci_dev_put(hdev);
1117         return err;
1118 }
1119
1120 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1121 {
1122         u8 ad_len = 0, flags = 0;
1123         size_t name_len;
1124
1125         if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1126                 flags |= LE_AD_GENERAL;
1127
1128         if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1129                 if (lmp_le_br_capable(hdev))
1130                         flags |= LE_AD_SIM_LE_BREDR_CTRL;
1131                 if (lmp_host_le_br_capable(hdev))
1132                         flags |= LE_AD_SIM_LE_BREDR_HOST;
1133         } else {
1134                 flags |= LE_AD_NO_BREDR;
1135         }
1136
1137         if (flags) {
1138                 BT_DBG("adv flags 0x%02x", flags);
1139
1140                 ptr[0] = 2;
1141                 ptr[1] = EIR_FLAGS;
1142                 ptr[2] = flags;
1143
1144                 ad_len += 3;
1145                 ptr += 3;
1146         }
1147
1148         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1149                 ptr[0] = 2;
1150                 ptr[1] = EIR_TX_POWER;
1151                 ptr[2] = (u8) hdev->adv_tx_power;
1152
1153                 ad_len += 3;
1154                 ptr += 3;
1155         }
1156
1157         name_len = strlen(hdev->dev_name);
1158         if (name_len > 0) {
1159                 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1160
1161                 if (name_len > max_len) {
1162                         name_len = max_len;
1163                         ptr[1] = EIR_NAME_SHORT;
1164                 } else
1165                         ptr[1] = EIR_NAME_COMPLETE;
1166
1167                 ptr[0] = name_len + 1;
1168
1169                 memcpy(ptr + 2, hdev->dev_name, name_len);
1170
1171                 ad_len += (name_len + 2);
1172                 ptr += (name_len + 2);
1173         }
1174
1175         return ad_len;
1176 }
1177
1178 void hci_update_ad(struct hci_request *req)
1179 {
1180         struct hci_dev *hdev = req->hdev;
1181         struct hci_cp_le_set_adv_data cp;
1182         u8 len;
1183
1184         if (!lmp_le_capable(hdev))
1185                 return;
1186
1187         memset(&cp, 0, sizeof(cp));
1188
1189         len = create_ad(hdev, cp.data);
1190
1191         if (hdev->adv_data_len == len &&
1192             memcmp(cp.data, hdev->adv_data, len) == 0)
1193                 return;
1194
1195         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1196         hdev->adv_data_len = len;
1197
1198         cp.length = len;
1199
1200         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1201 }
1202
1203 static int hci_dev_do_open(struct hci_dev *hdev)
1204 {
1205         int ret = 0;
1206
1207         BT_DBG("%s %p", hdev->name, hdev);
1208
1209         hci_req_lock(hdev);
1210
1211         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1212                 ret = -ENODEV;
1213                 goto done;
1214         }
1215
1216         if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1217                 /* Check for rfkill but allow the HCI setup stage to
1218                  * proceed (which in itself doesn't cause any RF activity).
1219                  */
1220                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1221                         ret = -ERFKILL;
1222                         goto done;
1223                 }
1224
1225                 /* Check for valid public address or a configured static
1226                  * random adddress, but let the HCI setup proceed to
1227                  * be able to determine if there is a public address
1228                  * or not.
1229                  *
1230                  * This check is only valid for BR/EDR controllers
1231                  * since AMP controllers do not have an address.
1232                  */
1233                 if (hdev->dev_type == HCI_BREDR &&
1234                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1235                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1236                         ret = -EADDRNOTAVAIL;
1237                         goto done;
1238                 }
1239         }
1240
1241         if (test_bit(HCI_UP, &hdev->flags)) {
1242                 ret = -EALREADY;
1243                 goto done;
1244         }
1245
1246         if (hdev->open(hdev)) {
1247                 ret = -EIO;
1248                 goto done;
1249         }
1250
1251         atomic_set(&hdev->cmd_cnt, 1);
1252         set_bit(HCI_INIT, &hdev->flags);
1253
1254         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1255                 ret = hdev->setup(hdev);
1256
1257         if (!ret) {
1258                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1259                         set_bit(HCI_RAW, &hdev->flags);
1260
1261                 if (!test_bit(HCI_RAW, &hdev->flags) &&
1262                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1263                         ret = __hci_init(hdev);
1264         }
1265
1266         clear_bit(HCI_INIT, &hdev->flags);
1267
1268         if (!ret) {
1269                 hci_dev_hold(hdev);
1270                 set_bit(HCI_UP, &hdev->flags);
1271                 hci_notify(hdev, HCI_DEV_UP);
1272                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1273                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1274                     hdev->dev_type == HCI_BREDR) {
1275                         hci_dev_lock(hdev);
1276                         mgmt_powered(hdev, 1);
1277                         hci_dev_unlock(hdev);
1278                 }
1279         } else {
1280                 /* Init failed, cleanup */
1281                 flush_work(&hdev->tx_work);
1282                 flush_work(&hdev->cmd_work);
1283                 flush_work(&hdev->rx_work);
1284
1285                 skb_queue_purge(&hdev->cmd_q);
1286                 skb_queue_purge(&hdev->rx_q);
1287
1288                 if (hdev->flush)
1289                         hdev->flush(hdev);
1290
1291                 if (hdev->sent_cmd) {
1292                         kfree_skb(hdev->sent_cmd);
1293                         hdev->sent_cmd = NULL;
1294                 }
1295
1296                 hdev->close(hdev);
1297                 hdev->flags = 0;
1298         }
1299
1300 done:
1301         hci_req_unlock(hdev);
1302         return ret;
1303 }
1304
1305 /* ---- HCI ioctl helpers ---- */
1306
1307 int hci_dev_open(__u16 dev)
1308 {
1309         struct hci_dev *hdev;
1310         int err;
1311
1312         hdev = hci_dev_get(dev);
1313         if (!hdev)
1314                 return -ENODEV;
1315
1316         /* We need to ensure that no other power on/off work is pending
1317          * before proceeding to call hci_dev_do_open. This is
1318          * particularly important if the setup procedure has not yet
1319          * completed.
1320          */
1321         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1322                 cancel_delayed_work(&hdev->power_off);
1323
1324         /* After this call it is guaranteed that the setup procedure
1325          * has finished. This means that error conditions like RFKILL
1326          * or no valid public or static random address apply.
1327          */
1328         flush_workqueue(hdev->req_workqueue);
1329
1330         err = hci_dev_do_open(hdev);
1331
1332         hci_dev_put(hdev);
1333
1334         return err;
1335 }
1336
1337 static int hci_dev_do_close(struct hci_dev *hdev)
1338 {
1339         BT_DBG("%s %p", hdev->name, hdev);
1340
1341         cancel_delayed_work(&hdev->power_off);
1342
1343         hci_req_cancel(hdev, ENODEV);
1344         hci_req_lock(hdev);
1345
1346         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1347                 del_timer_sync(&hdev->cmd_timer);
1348                 hci_req_unlock(hdev);
1349                 return 0;
1350         }
1351
1352         /* Flush RX and TX works */
1353         flush_work(&hdev->tx_work);
1354         flush_work(&hdev->rx_work);
1355
1356         if (hdev->discov_timeout > 0) {
1357                 cancel_delayed_work(&hdev->discov_off);
1358                 hdev->discov_timeout = 0;
1359                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1360         }
1361
1362         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1363                 cancel_delayed_work(&hdev->service_cache);
1364
1365         cancel_delayed_work_sync(&hdev->le_scan_disable);
1366
1367         hci_dev_lock(hdev);
1368         hci_inquiry_cache_flush(hdev);
1369         hci_conn_hash_flush(hdev);
1370         hci_dev_unlock(hdev);
1371
1372         hci_notify(hdev, HCI_DEV_DOWN);
1373
1374         if (hdev->flush)
1375                 hdev->flush(hdev);
1376
1377         /* Reset device */
1378         skb_queue_purge(&hdev->cmd_q);
1379         atomic_set(&hdev->cmd_cnt, 1);
1380         if (!test_bit(HCI_RAW, &hdev->flags) &&
1381             !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1382             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1383                 set_bit(HCI_INIT, &hdev->flags);
1384                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1385                 clear_bit(HCI_INIT, &hdev->flags);
1386         }
1387
1388         /* flush cmd  work */
1389         flush_work(&hdev->cmd_work);
1390
1391         /* Drop queues */
1392         skb_queue_purge(&hdev->rx_q);
1393         skb_queue_purge(&hdev->cmd_q);
1394         skb_queue_purge(&hdev->raw_q);
1395
1396         /* Drop last sent command */
1397         if (hdev->sent_cmd) {
1398                 del_timer_sync(&hdev->cmd_timer);
1399                 kfree_skb(hdev->sent_cmd);
1400                 hdev->sent_cmd = NULL;
1401         }
1402
1403         kfree_skb(hdev->recv_evt);
1404         hdev->recv_evt = NULL;
1405
1406         /* After this point our queues are empty
1407          * and no tasks are scheduled. */
1408         hdev->close(hdev);
1409
1410         /* Clear flags */
1411         hdev->flags = 0;
1412         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1413
1414         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1415                 if (hdev->dev_type == HCI_BREDR) {
1416                         hci_dev_lock(hdev);
1417                         mgmt_powered(hdev, 0);
1418                         hci_dev_unlock(hdev);
1419                 }
1420         }
1421
1422         /* Controller radio is available but is currently powered down */
1423         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1424
1425         memset(hdev->eir, 0, sizeof(hdev->eir));
1426         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1427
1428         hci_req_unlock(hdev);
1429
1430         hci_dev_put(hdev);
1431         return 0;
1432 }
1433
1434 int hci_dev_close(__u16 dev)
1435 {
1436         struct hci_dev *hdev;
1437         int err;
1438
1439         hdev = hci_dev_get(dev);
1440         if (!hdev)
1441                 return -ENODEV;
1442
1443         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1444                 err = -EBUSY;
1445                 goto done;
1446         }
1447
1448         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1449                 cancel_delayed_work(&hdev->power_off);
1450
1451         err = hci_dev_do_close(hdev);
1452
1453 done:
1454         hci_dev_put(hdev);
1455         return err;
1456 }
1457
1458 int hci_dev_reset(__u16 dev)
1459 {
1460         struct hci_dev *hdev;
1461         int ret = 0;
1462
1463         hdev = hci_dev_get(dev);
1464         if (!hdev)
1465                 return -ENODEV;
1466
1467         hci_req_lock(hdev);
1468
1469         if (!test_bit(HCI_UP, &hdev->flags)) {
1470                 ret = -ENETDOWN;
1471                 goto done;
1472         }
1473
1474         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1475                 ret = -EBUSY;
1476                 goto done;
1477         }
1478
1479         /* Drop queues */
1480         skb_queue_purge(&hdev->rx_q);
1481         skb_queue_purge(&hdev->cmd_q);
1482
1483         hci_dev_lock(hdev);
1484         hci_inquiry_cache_flush(hdev);
1485         hci_conn_hash_flush(hdev);
1486         hci_dev_unlock(hdev);
1487
1488         if (hdev->flush)
1489                 hdev->flush(hdev);
1490
1491         atomic_set(&hdev->cmd_cnt, 1);
1492         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1493
1494         if (!test_bit(HCI_RAW, &hdev->flags))
1495                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1496
1497 done:
1498         hci_req_unlock(hdev);
1499         hci_dev_put(hdev);
1500         return ret;
1501 }
1502
1503 int hci_dev_reset_stat(__u16 dev)
1504 {
1505         struct hci_dev *hdev;
1506         int ret = 0;
1507
1508         hdev = hci_dev_get(dev);
1509         if (!hdev)
1510                 return -ENODEV;
1511
1512         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1513                 ret = -EBUSY;
1514                 goto done;
1515         }
1516
1517         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1518
1519 done:
1520         hci_dev_put(hdev);
1521         return ret;
1522 }
1523
1524 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1525 {
1526         struct hci_dev *hdev;
1527         struct hci_dev_req dr;
1528         int err = 0;
1529
1530         if (copy_from_user(&dr, arg, sizeof(dr)))
1531                 return -EFAULT;
1532
1533         hdev = hci_dev_get(dr.dev_id);
1534         if (!hdev)
1535                 return -ENODEV;
1536
1537         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1538                 err = -EBUSY;
1539                 goto done;
1540         }
1541
1542         if (hdev->dev_type != HCI_BREDR) {
1543                 err = -EOPNOTSUPP;
1544                 goto done;
1545         }
1546
1547         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1548                 err = -EOPNOTSUPP;
1549                 goto done;
1550         }
1551
1552         switch (cmd) {
1553         case HCISETAUTH:
1554                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1555                                    HCI_INIT_TIMEOUT);
1556                 break;
1557
1558         case HCISETENCRYPT:
1559                 if (!lmp_encrypt_capable(hdev)) {
1560                         err = -EOPNOTSUPP;
1561                         break;
1562                 }
1563
1564                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1565                         /* Auth must be enabled first */
1566                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1567                                            HCI_INIT_TIMEOUT);
1568                         if (err)
1569                                 break;
1570                 }
1571
1572                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1573                                    HCI_INIT_TIMEOUT);
1574                 break;
1575
1576         case HCISETSCAN:
1577                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1578                                    HCI_INIT_TIMEOUT);
1579                 break;
1580
1581         case HCISETLINKPOL:
1582                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1583                                    HCI_INIT_TIMEOUT);
1584                 break;
1585
1586         case HCISETLINKMODE:
1587                 hdev->link_mode = ((__u16) dr.dev_opt) &
1588                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1589                 break;
1590
1591         case HCISETPTYPE:
1592                 hdev->pkt_type = (__u16) dr.dev_opt;
1593                 break;
1594
1595         case HCISETACLMTU:
1596                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1597                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1598                 break;
1599
1600         case HCISETSCOMTU:
1601                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1602                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1603                 break;
1604
1605         default:
1606                 err = -EINVAL;
1607                 break;
1608         }
1609
1610 done:
1611         hci_dev_put(hdev);
1612         return err;
1613 }
1614
1615 int hci_get_dev_list(void __user *arg)
1616 {
1617         struct hci_dev *hdev;
1618         struct hci_dev_list_req *dl;
1619         struct hci_dev_req *dr;
1620         int n = 0, size, err;
1621         __u16 dev_num;
1622
1623         if (get_user(dev_num, (__u16 __user *) arg))
1624                 return -EFAULT;
1625
1626         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1627                 return -EINVAL;
1628
1629         size = sizeof(*dl) + dev_num * sizeof(*dr);
1630
1631         dl = kzalloc(size, GFP_KERNEL);
1632         if (!dl)
1633                 return -ENOMEM;
1634
1635         dr = dl->dev_req;
1636
1637         read_lock(&hci_dev_list_lock);
1638         list_for_each_entry(hdev, &hci_dev_list, list) {
1639                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1640                         cancel_delayed_work(&hdev->power_off);
1641
1642                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1643                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1644
1645                 (dr + n)->dev_id  = hdev->id;
1646                 (dr + n)->dev_opt = hdev->flags;
1647
1648                 if (++n >= dev_num)
1649                         break;
1650         }
1651         read_unlock(&hci_dev_list_lock);
1652
1653         dl->dev_num = n;
1654         size = sizeof(*dl) + n * sizeof(*dr);
1655
1656         err = copy_to_user(arg, dl, size);
1657         kfree(dl);
1658
1659         return err ? -EFAULT : 0;
1660 }
1661
1662 int hci_get_dev_info(void __user *arg)
1663 {
1664         struct hci_dev *hdev;
1665         struct hci_dev_info di;
1666         int err = 0;
1667
1668         if (copy_from_user(&di, arg, sizeof(di)))
1669                 return -EFAULT;
1670
1671         hdev = hci_dev_get(di.dev_id);
1672         if (!hdev)
1673                 return -ENODEV;
1674
1675         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1676                 cancel_delayed_work_sync(&hdev->power_off);
1677
1678         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1679                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1680
1681         strcpy(di.name, hdev->name);
1682         di.bdaddr   = hdev->bdaddr;
1683         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1684         di.flags    = hdev->flags;
1685         di.pkt_type = hdev->pkt_type;
1686         if (lmp_bredr_capable(hdev)) {
1687                 di.acl_mtu  = hdev->acl_mtu;
1688                 di.acl_pkts = hdev->acl_pkts;
1689                 di.sco_mtu  = hdev->sco_mtu;
1690                 di.sco_pkts = hdev->sco_pkts;
1691         } else {
1692                 di.acl_mtu  = hdev->le_mtu;
1693                 di.acl_pkts = hdev->le_pkts;
1694                 di.sco_mtu  = 0;
1695                 di.sco_pkts = 0;
1696         }
1697         di.link_policy = hdev->link_policy;
1698         di.link_mode   = hdev->link_mode;
1699
1700         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1701         memcpy(&di.features, &hdev->features, sizeof(di.features));
1702
1703         if (copy_to_user(arg, &di, sizeof(di)))
1704                 err = -EFAULT;
1705
1706         hci_dev_put(hdev);
1707
1708         return err;
1709 }
1710
1711 /* ---- Interface to HCI drivers ---- */
1712
1713 static int hci_rfkill_set_block(void *data, bool blocked)
1714 {
1715         struct hci_dev *hdev = data;
1716
1717         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1718
1719         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1720                 return -EBUSY;
1721
1722         if (blocked) {
1723                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
1724                 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1725                         hci_dev_do_close(hdev);
1726         } else {
1727                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1728         }
1729
1730         return 0;
1731 }
1732
1733 static const struct rfkill_ops hci_rfkill_ops = {
1734         .set_block = hci_rfkill_set_block,
1735 };
1736
1737 static void hci_power_on(struct work_struct *work)
1738 {
1739         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1740         int err;
1741
1742         BT_DBG("%s", hdev->name);
1743
1744         err = hci_dev_do_open(hdev);
1745         if (err < 0) {
1746                 mgmt_set_powered_failed(hdev, err);
1747                 return;
1748         }
1749
1750         /* During the HCI setup phase, a few error conditions are
1751          * ignored and they need to be checked now. If they are still
1752          * valid, it is important to turn the device back off.
1753          */
1754         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1755             (hdev->dev_type == HCI_BREDR &&
1756              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1757              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
1758                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1759                 hci_dev_do_close(hdev);
1760         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1761                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1762                                    HCI_AUTO_OFF_TIMEOUT);
1763         }
1764
1765         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1766                 mgmt_index_added(hdev);
1767 }
1768
1769 static void hci_power_off(struct work_struct *work)
1770 {
1771         struct hci_dev *hdev = container_of(work, struct hci_dev,
1772                                             power_off.work);
1773
1774         BT_DBG("%s", hdev->name);
1775
1776         hci_dev_do_close(hdev);
1777 }
1778
1779 static void hci_discov_off(struct work_struct *work)
1780 {
1781         struct hci_dev *hdev;
1782         u8 scan = SCAN_PAGE;
1783
1784         hdev = container_of(work, struct hci_dev, discov_off.work);
1785
1786         BT_DBG("%s", hdev->name);
1787
1788         hci_dev_lock(hdev);
1789
1790         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1791
1792         hdev->discov_timeout = 0;
1793
1794         hci_dev_unlock(hdev);
1795 }
1796
1797 int hci_uuids_clear(struct hci_dev *hdev)
1798 {
1799         struct bt_uuid *uuid, *tmp;
1800
1801         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1802                 list_del(&uuid->list);
1803                 kfree(uuid);
1804         }
1805
1806         return 0;
1807 }
1808
1809 int hci_link_keys_clear(struct hci_dev *hdev)
1810 {
1811         struct list_head *p, *n;
1812
1813         list_for_each_safe(p, n, &hdev->link_keys) {
1814                 struct link_key *key;
1815
1816                 key = list_entry(p, struct link_key, list);
1817
1818                 list_del(p);
1819                 kfree(key);
1820         }
1821
1822         return 0;
1823 }
1824
1825 int hci_smp_ltks_clear(struct hci_dev *hdev)
1826 {
1827         struct smp_ltk *k, *tmp;
1828
1829         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1830                 list_del(&k->list);
1831                 kfree(k);
1832         }
1833
1834         return 0;
1835 }
1836
1837 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1838 {
1839         struct link_key *k;
1840
1841         list_for_each_entry(k, &hdev->link_keys, list)
1842                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1843                         return k;
1844
1845         return NULL;
1846 }
1847
1848 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1849                                u8 key_type, u8 old_key_type)
1850 {
1851         /* Legacy key */
1852         if (key_type < 0x03)
1853                 return true;
1854
1855         /* Debug keys are insecure so don't store them persistently */
1856         if (key_type == HCI_LK_DEBUG_COMBINATION)
1857                 return false;
1858
1859         /* Changed combination key and there's no previous one */
1860         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1861                 return false;
1862
1863         /* Security mode 3 case */
1864         if (!conn)
1865                 return true;
1866
1867         /* Neither local nor remote side had no-bonding as requirement */
1868         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1869                 return true;
1870
1871         /* Local side had dedicated bonding as requirement */
1872         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1873                 return true;
1874
1875         /* Remote side had dedicated bonding as requirement */
1876         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1877                 return true;
1878
1879         /* If none of the above criteria match, then don't store the key
1880          * persistently */
1881         return false;
1882 }
1883
1884 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1885 {
1886         struct smp_ltk *k;
1887
1888         list_for_each_entry(k, &hdev->long_term_keys, list) {
1889                 if (k->ediv != ediv ||
1890                     memcmp(rand, k->rand, sizeof(k->rand)))
1891                         continue;
1892
1893                 return k;
1894         }
1895
1896         return NULL;
1897 }
1898
1899 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1900                                      u8 addr_type)
1901 {
1902         struct smp_ltk *k;
1903
1904         list_for_each_entry(k, &hdev->long_term_keys, list)
1905                 if (addr_type == k->bdaddr_type &&
1906                     bacmp(bdaddr, &k->bdaddr) == 0)
1907                         return k;
1908
1909         return NULL;
1910 }
1911
1912 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1913                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1914 {
1915         struct link_key *key, *old_key;
1916         u8 old_key_type;
1917         bool persistent;
1918
1919         old_key = hci_find_link_key(hdev, bdaddr);
1920         if (old_key) {
1921                 old_key_type = old_key->type;
1922                 key = old_key;
1923         } else {
1924                 old_key_type = conn ? conn->key_type : 0xff;
1925                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1926                 if (!key)
1927                         return -ENOMEM;
1928                 list_add(&key->list, &hdev->link_keys);
1929         }
1930
1931         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1932
1933         /* Some buggy controller combinations generate a changed
1934          * combination key for legacy pairing even when there's no
1935          * previous key */
1936         if (type == HCI_LK_CHANGED_COMBINATION &&
1937             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1938                 type = HCI_LK_COMBINATION;
1939                 if (conn)
1940                         conn->key_type = type;
1941         }
1942
1943         bacpy(&key->bdaddr, bdaddr);
1944         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1945         key->pin_len = pin_len;
1946
1947         if (type == HCI_LK_CHANGED_COMBINATION)
1948                 key->type = old_key_type;
1949         else
1950                 key->type = type;
1951
1952         if (!new_key)
1953                 return 0;
1954
1955         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1956
1957         mgmt_new_link_key(hdev, key, persistent);
1958
1959         if (conn)
1960                 conn->flush_key = !persistent;
1961
1962         return 0;
1963 }
1964
1965 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1966                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1967                 ediv, u8 rand[8])
1968 {
1969         struct smp_ltk *key, *old_key;
1970
1971         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1972                 return 0;
1973
1974         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1975         if (old_key)
1976                 key = old_key;
1977         else {
1978                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1979                 if (!key)
1980                         return -ENOMEM;
1981                 list_add(&key->list, &hdev->long_term_keys);
1982         }
1983
1984         bacpy(&key->bdaddr, bdaddr);
1985         key->bdaddr_type = addr_type;
1986         memcpy(key->val, tk, sizeof(key->val));
1987         key->authenticated = authenticated;
1988         key->ediv = ediv;
1989         key->enc_size = enc_size;
1990         key->type = type;
1991         memcpy(key->rand, rand, sizeof(key->rand));
1992
1993         if (!new_key)
1994                 return 0;
1995
1996         if (type & HCI_SMP_LTK)
1997                 mgmt_new_ltk(hdev, key, 1);
1998
1999         return 0;
2000 }
2001
2002 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2003 {
2004         struct link_key *key;
2005
2006         key = hci_find_link_key(hdev, bdaddr);
2007         if (!key)
2008                 return -ENOENT;
2009
2010         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2011
2012         list_del(&key->list);
2013         kfree(key);
2014
2015         return 0;
2016 }
2017
2018 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2019 {
2020         struct smp_ltk *k, *tmp;
2021
2022         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2023                 if (bacmp(bdaddr, &k->bdaddr))
2024                         continue;
2025
2026                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2027
2028                 list_del(&k->list);
2029                 kfree(k);
2030         }
2031
2032         return 0;
2033 }
2034
2035 /* HCI command timer function */
2036 static void hci_cmd_timeout(unsigned long arg)
2037 {
2038         struct hci_dev *hdev = (void *) arg;
2039
2040         if (hdev->sent_cmd) {
2041                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2042                 u16 opcode = __le16_to_cpu(sent->opcode);
2043
2044                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2045         } else {
2046                 BT_ERR("%s command tx timeout", hdev->name);
2047         }
2048
2049         atomic_set(&hdev->cmd_cnt, 1);
2050         queue_work(hdev->workqueue, &hdev->cmd_work);
2051 }
2052
2053 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2054                                           bdaddr_t *bdaddr)
2055 {
2056         struct oob_data *data;
2057
2058         list_for_each_entry(data, &hdev->remote_oob_data, list)
2059                 if (bacmp(bdaddr, &data->bdaddr) == 0)
2060                         return data;
2061
2062         return NULL;
2063 }
2064
2065 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2066 {
2067         struct oob_data *data;
2068
2069         data = hci_find_remote_oob_data(hdev, bdaddr);
2070         if (!data)
2071                 return -ENOENT;
2072
2073         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2074
2075         list_del(&data->list);
2076         kfree(data);
2077
2078         return 0;
2079 }
2080
2081 int hci_remote_oob_data_clear(struct hci_dev *hdev)
2082 {
2083         struct oob_data *data, *n;
2084
2085         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2086                 list_del(&data->list);
2087                 kfree(data);
2088         }
2089
2090         return 0;
2091 }
2092
2093 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
2094                             u8 *randomizer)
2095 {
2096         struct oob_data *data;
2097
2098         data = hci_find_remote_oob_data(hdev, bdaddr);
2099
2100         if (!data) {
2101                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2102                 if (!data)
2103                         return -ENOMEM;
2104
2105                 bacpy(&data->bdaddr, bdaddr);
2106                 list_add(&data->list, &hdev->remote_oob_data);
2107         }
2108
2109         memcpy(data->hash, hash, sizeof(data->hash));
2110         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2111
2112         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2113
2114         return 0;
2115 }
2116
2117 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
2118 {
2119         struct bdaddr_list *b;
2120
2121         list_for_each_entry(b, &hdev->blacklist, list)
2122                 if (bacmp(bdaddr, &b->bdaddr) == 0)
2123                         return b;
2124
2125         return NULL;
2126 }
2127
2128 int hci_blacklist_clear(struct hci_dev *hdev)
2129 {
2130         struct list_head *p, *n;
2131
2132         list_for_each_safe(p, n, &hdev->blacklist) {
2133                 struct bdaddr_list *b;
2134
2135                 b = list_entry(p, struct bdaddr_list, list);
2136
2137                 list_del(p);
2138                 kfree(b);
2139         }
2140
2141         return 0;
2142 }
2143
2144 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2145 {
2146         struct bdaddr_list *entry;
2147
2148         if (bacmp(bdaddr, BDADDR_ANY) == 0)
2149                 return -EBADF;
2150
2151         if (hci_blacklist_lookup(hdev, bdaddr))
2152                 return -EEXIST;
2153
2154         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
2155         if (!entry)
2156                 return -ENOMEM;
2157
2158         bacpy(&entry->bdaddr, bdaddr);
2159
2160         list_add(&entry->list, &hdev->blacklist);
2161
2162         return mgmt_device_blocked(hdev, bdaddr, type);
2163 }
2164
2165 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2166 {
2167         struct bdaddr_list *entry;
2168
2169         if (bacmp(bdaddr, BDADDR_ANY) == 0)
2170                 return hci_blacklist_clear(hdev);
2171
2172         entry = hci_blacklist_lookup(hdev, bdaddr);
2173         if (!entry)
2174                 return -ENOENT;
2175
2176         list_del(&entry->list);
2177         kfree(entry);
2178
2179         return mgmt_device_unblocked(hdev, bdaddr, type);
2180 }
2181
2182 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2183 {
2184         if (status) {
2185                 BT_ERR("Failed to start inquiry: status %d", status);
2186
2187                 hci_dev_lock(hdev);
2188                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2189                 hci_dev_unlock(hdev);
2190                 return;
2191         }
2192 }
2193
2194 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2195 {
2196         /* General inquiry access code (GIAC) */
2197         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2198         struct hci_request req;
2199         struct hci_cp_inquiry cp;
2200         int err;
2201
2202         if (status) {
2203                 BT_ERR("Failed to disable LE scanning: status %d", status);
2204                 return;
2205         }
2206
2207         switch (hdev->discovery.type) {
2208         case DISCOV_TYPE_LE:
2209                 hci_dev_lock(hdev);
2210                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2211                 hci_dev_unlock(hdev);
2212                 break;
2213
2214         case DISCOV_TYPE_INTERLEAVED:
2215                 hci_req_init(&req, hdev);
2216
2217                 memset(&cp, 0, sizeof(cp));
2218                 memcpy(&cp.lap, lap, sizeof(cp.lap));
2219                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2220                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2221
2222                 hci_dev_lock(hdev);
2223
2224                 hci_inquiry_cache_flush(hdev);
2225
2226                 err = hci_req_run(&req, inquiry_complete);
2227                 if (err) {
2228                         BT_ERR("Inquiry request failed: err %d", err);
2229                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2230                 }
2231
2232                 hci_dev_unlock(hdev);
2233                 break;
2234         }
2235 }
2236
2237 static void le_scan_disable_work(struct work_struct *work)
2238 {
2239         struct hci_dev *hdev = container_of(work, struct hci_dev,
2240                                             le_scan_disable.work);
2241         struct hci_cp_le_set_scan_enable cp;
2242         struct hci_request req;
2243         int err;
2244
2245         BT_DBG("%s", hdev->name);
2246
2247         hci_req_init(&req, hdev);
2248
2249         memset(&cp, 0, sizeof(cp));
2250         cp.enable = LE_SCAN_DISABLE;
2251         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2252
2253         err = hci_req_run(&req, le_scan_disable_work_complete);
2254         if (err)
2255                 BT_ERR("Disable LE scanning request failed: err %d", err);
2256 }
2257
2258 /* Alloc HCI device */
2259 struct hci_dev *hci_alloc_dev(void)
2260 {
2261         struct hci_dev *hdev;
2262
2263         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2264         if (!hdev)
2265                 return NULL;
2266
2267         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2268         hdev->esco_type = (ESCO_HV1);
2269         hdev->link_mode = (HCI_LM_ACCEPT);
2270         hdev->io_capability = 0x03; /* No Input No Output */
2271         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2272         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2273
2274         hdev->sniff_max_interval = 800;
2275         hdev->sniff_min_interval = 80;
2276
2277         hdev->le_scan_interval = 0x0060;
2278         hdev->le_scan_window = 0x0030;
2279
2280         mutex_init(&hdev->lock);
2281         mutex_init(&hdev->req_lock);
2282
2283         INIT_LIST_HEAD(&hdev->mgmt_pending);
2284         INIT_LIST_HEAD(&hdev->blacklist);
2285         INIT_LIST_HEAD(&hdev->uuids);
2286         INIT_LIST_HEAD(&hdev->link_keys);
2287         INIT_LIST_HEAD(&hdev->long_term_keys);
2288         INIT_LIST_HEAD(&hdev->remote_oob_data);
2289         INIT_LIST_HEAD(&hdev->conn_hash.list);
2290
2291         INIT_WORK(&hdev->rx_work, hci_rx_work);
2292         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2293         INIT_WORK(&hdev->tx_work, hci_tx_work);
2294         INIT_WORK(&hdev->power_on, hci_power_on);
2295
2296         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2297         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2298         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2299
2300         skb_queue_head_init(&hdev->rx_q);
2301         skb_queue_head_init(&hdev->cmd_q);
2302         skb_queue_head_init(&hdev->raw_q);
2303
2304         init_waitqueue_head(&hdev->req_wait_q);
2305
2306         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2307
2308         hci_init_sysfs(hdev);
2309         discovery_init(hdev);
2310
2311         return hdev;
2312 }
2313 EXPORT_SYMBOL(hci_alloc_dev);
2314
2315 /* Free HCI device */
2316 void hci_free_dev(struct hci_dev *hdev)
2317 {
2318         /* will free via device release */
2319         put_device(&hdev->dev);
2320 }
2321 EXPORT_SYMBOL(hci_free_dev);
2322
2323 /* Register HCI device */
2324 int hci_register_dev(struct hci_dev *hdev)
2325 {
2326         int id, error;
2327
2328         if (!hdev->open || !hdev->close)
2329                 return -EINVAL;
2330
2331         /* Do not allow HCI_AMP devices to register at index 0,
2332          * so the index can be used as the AMP controller ID.
2333          */
2334         switch (hdev->dev_type) {
2335         case HCI_BREDR:
2336                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2337                 break;
2338         case HCI_AMP:
2339                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2340                 break;
2341         default:
2342                 return -EINVAL;
2343         }
2344
2345         if (id < 0)
2346                 return id;
2347
2348         sprintf(hdev->name, "hci%d", id);
2349         hdev->id = id;
2350
2351         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2352
2353         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2354                                           WQ_MEM_RECLAIM, 1, hdev->name);
2355         if (!hdev->workqueue) {
2356                 error = -ENOMEM;
2357                 goto err;
2358         }
2359
2360         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2361                                               WQ_MEM_RECLAIM, 1, hdev->name);
2362         if (!hdev->req_workqueue) {
2363                 destroy_workqueue(hdev->workqueue);
2364                 error = -ENOMEM;
2365                 goto err;
2366         }
2367
2368         error = hci_add_sysfs(hdev);
2369         if (error < 0)
2370                 goto err_wqueue;
2371
2372         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2373                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2374                                     hdev);
2375         if (hdev->rfkill) {
2376                 if (rfkill_register(hdev->rfkill) < 0) {
2377                         rfkill_destroy(hdev->rfkill);
2378                         hdev->rfkill = NULL;
2379                 }
2380         }
2381
2382         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2383                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2384
2385         set_bit(HCI_SETUP, &hdev->dev_flags);
2386         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2387
2388         if (hdev->dev_type == HCI_BREDR) {
2389                 /* Assume BR/EDR support until proven otherwise (such as
2390                  * through reading supported features during init.
2391                  */
2392                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2393         }
2394
2395         write_lock(&hci_dev_list_lock);
2396         list_add(&hdev->list, &hci_dev_list);
2397         write_unlock(&hci_dev_list_lock);
2398
2399         hci_notify(hdev, HCI_DEV_REG);
2400         hci_dev_hold(hdev);
2401
2402         queue_work(hdev->req_workqueue, &hdev->power_on);
2403
2404         return id;
2405
2406 err_wqueue:
2407         destroy_workqueue(hdev->workqueue);
2408         destroy_workqueue(hdev->req_workqueue);
2409 err:
2410         ida_simple_remove(&hci_index_ida, hdev->id);
2411
2412         return error;
2413 }
2414 EXPORT_SYMBOL(hci_register_dev);
2415
2416 /* Unregister HCI device */
2417 void hci_unregister_dev(struct hci_dev *hdev)
2418 {
2419         int i, id;
2420
2421         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2422
2423         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2424
2425         id = hdev->id;
2426
2427         write_lock(&hci_dev_list_lock);
2428         list_del(&hdev->list);
2429         write_unlock(&hci_dev_list_lock);
2430
2431         hci_dev_do_close(hdev);
2432
2433         for (i = 0; i < NUM_REASSEMBLY; i++)
2434                 kfree_skb(hdev->reassembly[i]);
2435
2436         cancel_work_sync(&hdev->power_on);
2437
2438         if (!test_bit(HCI_INIT, &hdev->flags) &&
2439             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2440                 hci_dev_lock(hdev);
2441                 mgmt_index_removed(hdev);
2442                 hci_dev_unlock(hdev);
2443         }
2444
2445         /* mgmt_index_removed should take care of emptying the
2446          * pending list */
2447         BUG_ON(!list_empty(&hdev->mgmt_pending));
2448
2449         hci_notify(hdev, HCI_DEV_UNREG);
2450
2451         if (hdev->rfkill) {
2452                 rfkill_unregister(hdev->rfkill);
2453                 rfkill_destroy(hdev->rfkill);
2454         }
2455
2456         hci_del_sysfs(hdev);
2457
2458         destroy_workqueue(hdev->workqueue);
2459         destroy_workqueue(hdev->req_workqueue);
2460
2461         hci_dev_lock(hdev);
2462         hci_blacklist_clear(hdev);
2463         hci_uuids_clear(hdev);
2464         hci_link_keys_clear(hdev);
2465         hci_smp_ltks_clear(hdev);
2466         hci_remote_oob_data_clear(hdev);
2467         hci_dev_unlock(hdev);
2468
2469         hci_dev_put(hdev);
2470
2471         ida_simple_remove(&hci_index_ida, id);
2472 }
2473 EXPORT_SYMBOL(hci_unregister_dev);
2474
2475 /* Suspend HCI device */
2476 int hci_suspend_dev(struct hci_dev *hdev)
2477 {
2478         hci_notify(hdev, HCI_DEV_SUSPEND);
2479         return 0;
2480 }
2481 EXPORT_SYMBOL(hci_suspend_dev);
2482
2483 /* Resume HCI device */
2484 int hci_resume_dev(struct hci_dev *hdev)
2485 {
2486         hci_notify(hdev, HCI_DEV_RESUME);
2487         return 0;
2488 }
2489 EXPORT_SYMBOL(hci_resume_dev);
2490
2491 /* Receive frame from HCI drivers */
2492 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2493 {
2494         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2495                       && !test_bit(HCI_INIT, &hdev->flags))) {
2496                 kfree_skb(skb);
2497                 return -ENXIO;
2498         }
2499
2500         /* Incoming skb */
2501         bt_cb(skb)->incoming = 1;
2502
2503         /* Time stamp */
2504         __net_timestamp(skb);
2505
2506         skb_queue_tail(&hdev->rx_q, skb);
2507         queue_work(hdev->workqueue, &hdev->rx_work);
2508
2509         return 0;
2510 }
2511 EXPORT_SYMBOL(hci_recv_frame);
2512
2513 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2514                           int count, __u8 index)
2515 {
2516         int len = 0;
2517         int hlen = 0;
2518         int remain = count;
2519         struct sk_buff *skb;
2520         struct bt_skb_cb *scb;
2521
2522         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2523             index >= NUM_REASSEMBLY)
2524                 return -EILSEQ;
2525
2526         skb = hdev->reassembly[index];
2527
2528         if (!skb) {
2529                 switch (type) {
2530                 case HCI_ACLDATA_PKT:
2531                         len = HCI_MAX_FRAME_SIZE;
2532                         hlen = HCI_ACL_HDR_SIZE;
2533                         break;
2534                 case HCI_EVENT_PKT:
2535                         len = HCI_MAX_EVENT_SIZE;
2536                         hlen = HCI_EVENT_HDR_SIZE;
2537                         break;
2538                 case HCI_SCODATA_PKT:
2539                         len = HCI_MAX_SCO_SIZE;
2540                         hlen = HCI_SCO_HDR_SIZE;
2541                         break;
2542                 }
2543
2544                 skb = bt_skb_alloc(len, GFP_ATOMIC);
2545                 if (!skb)
2546                         return -ENOMEM;
2547
2548                 scb = (void *) skb->cb;
2549                 scb->expect = hlen;
2550                 scb->pkt_type = type;
2551
2552                 hdev->reassembly[index] = skb;
2553         }
2554
2555         while (count) {
2556                 scb = (void *) skb->cb;
2557                 len = min_t(uint, scb->expect, count);
2558
2559                 memcpy(skb_put(skb, len), data, len);
2560
2561                 count -= len;
2562                 data += len;
2563                 scb->expect -= len;
2564                 remain = count;
2565
2566                 switch (type) {
2567                 case HCI_EVENT_PKT:
2568                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2569                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2570                                 scb->expect = h->plen;
2571
2572                                 if (skb_tailroom(skb) < scb->expect) {
2573                                         kfree_skb(skb);
2574                                         hdev->reassembly[index] = NULL;
2575                                         return -ENOMEM;
2576                                 }
2577                         }
2578                         break;
2579
2580                 case HCI_ACLDATA_PKT:
2581                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2582                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2583                                 scb->expect = __le16_to_cpu(h->dlen);
2584
2585                                 if (skb_tailroom(skb) < scb->expect) {
2586                                         kfree_skb(skb);
2587                                         hdev->reassembly[index] = NULL;
2588                                         return -ENOMEM;
2589                                 }
2590                         }
2591                         break;
2592
2593                 case HCI_SCODATA_PKT:
2594                         if (skb->len == HCI_SCO_HDR_SIZE) {
2595                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2596                                 scb->expect = h->dlen;
2597
2598                                 if (skb_tailroom(skb) < scb->expect) {
2599                                         kfree_skb(skb);
2600                                         hdev->reassembly[index] = NULL;
2601                                         return -ENOMEM;
2602                                 }
2603                         }
2604                         break;
2605                 }
2606
2607                 if (scb->expect == 0) {
2608                         /* Complete frame */
2609
2610                         bt_cb(skb)->pkt_type = type;
2611                         hci_recv_frame(hdev, skb);
2612
2613                         hdev->reassembly[index] = NULL;
2614                         return remain;
2615                 }
2616         }
2617
2618         return remain;
2619 }
2620
2621 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2622 {
2623         int rem = 0;
2624
2625         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2626                 return -EILSEQ;
2627
2628         while (count) {
2629                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2630                 if (rem < 0)
2631                         return rem;
2632
2633                 data += (count - rem);
2634                 count = rem;
2635         }
2636
2637         return rem;
2638 }
2639 EXPORT_SYMBOL(hci_recv_fragment);
2640
2641 #define STREAM_REASSEMBLY 0
2642
2643 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2644 {
2645         int type;
2646         int rem = 0;
2647
2648         while (count) {
2649                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2650
2651                 if (!skb) {
2652                         struct { char type; } *pkt;
2653
2654                         /* Start of the frame */
2655                         pkt = data;
2656                         type = pkt->type;
2657
2658                         data++;
2659                         count--;
2660                 } else
2661                         type = bt_cb(skb)->pkt_type;
2662
2663                 rem = hci_reassembly(hdev, type, data, count,
2664                                      STREAM_REASSEMBLY);
2665                 if (rem < 0)
2666                         return rem;
2667
2668                 data += (count - rem);
2669                 count = rem;
2670         }
2671
2672         return rem;
2673 }
2674 EXPORT_SYMBOL(hci_recv_stream_fragment);
2675
2676 /* ---- Interface to upper protocols ---- */
2677
2678 int hci_register_cb(struct hci_cb *cb)
2679 {
2680         BT_DBG("%p name %s", cb, cb->name);
2681
2682         write_lock(&hci_cb_list_lock);
2683         list_add(&cb->list, &hci_cb_list);
2684         write_unlock(&hci_cb_list_lock);
2685
2686         return 0;
2687 }
2688 EXPORT_SYMBOL(hci_register_cb);
2689
2690 int hci_unregister_cb(struct hci_cb *cb)
2691 {
2692         BT_DBG("%p name %s", cb, cb->name);
2693
2694         write_lock(&hci_cb_list_lock);
2695         list_del(&cb->list);
2696         write_unlock(&hci_cb_list_lock);
2697
2698         return 0;
2699 }
2700 EXPORT_SYMBOL(hci_unregister_cb);
2701
2702 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
2703 {
2704         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2705
2706         /* Time stamp */
2707         __net_timestamp(skb);
2708
2709         /* Send copy to monitor */
2710         hci_send_to_monitor(hdev, skb);
2711
2712         if (atomic_read(&hdev->promisc)) {
2713                 /* Send copy to the sockets */
2714                 hci_send_to_sock(hdev, skb);
2715         }
2716
2717         /* Get rid of skb owner, prior to sending to the driver. */
2718         skb_orphan(skb);
2719
2720         if (hdev->send(hdev, skb) < 0)
2721                 BT_ERR("%s sending frame failed", hdev->name);
2722 }
2723
2724 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2725 {
2726         skb_queue_head_init(&req->cmd_q);
2727         req->hdev = hdev;
2728         req->err = 0;
2729 }
2730
2731 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2732 {
2733         struct hci_dev *hdev = req->hdev;
2734         struct sk_buff *skb;
2735         unsigned long flags;
2736
2737         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2738
2739         /* If an error occured during request building, remove all HCI
2740          * commands queued on the HCI request queue.
2741          */
2742         if (req->err) {
2743                 skb_queue_purge(&req->cmd_q);
2744                 return req->err;
2745         }
2746
2747         /* Do not allow empty requests */
2748         if (skb_queue_empty(&req->cmd_q))
2749                 return -ENODATA;
2750
2751         skb = skb_peek_tail(&req->cmd_q);
2752         bt_cb(skb)->req.complete = complete;
2753
2754         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2755         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2756         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2757
2758         queue_work(hdev->workqueue, &hdev->cmd_work);
2759
2760         return 0;
2761 }
2762
2763 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2764                                        u32 plen, const void *param)
2765 {
2766         int len = HCI_COMMAND_HDR_SIZE + plen;
2767         struct hci_command_hdr *hdr;
2768         struct sk_buff *skb;
2769
2770         skb = bt_skb_alloc(len, GFP_ATOMIC);
2771         if (!skb)
2772                 return NULL;
2773
2774         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2775         hdr->opcode = cpu_to_le16(opcode);
2776         hdr->plen   = plen;
2777
2778         if (plen)
2779                 memcpy(skb_put(skb, plen), param, plen);
2780
2781         BT_DBG("skb len %d", skb->len);
2782
2783         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2784
2785         return skb;
2786 }
2787
2788 /* Send HCI command */
2789 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2790                  const void *param)
2791 {
2792         struct sk_buff *skb;
2793
2794         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2795
2796         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2797         if (!skb) {
2798                 BT_ERR("%s no memory for command", hdev->name);
2799                 return -ENOMEM;
2800         }
2801
2802         /* Stand-alone HCI commands must be flaged as
2803          * single-command requests.
2804          */
2805         bt_cb(skb)->req.start = true;
2806
2807         skb_queue_tail(&hdev->cmd_q, skb);
2808         queue_work(hdev->workqueue, &hdev->cmd_work);
2809
2810         return 0;
2811 }
2812
2813 /* Queue a command to an asynchronous HCI request */
2814 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2815                     const void *param, u8 event)
2816 {
2817         struct hci_dev *hdev = req->hdev;
2818         struct sk_buff *skb;
2819
2820         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2821
2822         /* If an error occured during request building, there is no point in
2823          * queueing the HCI command. We can simply return.
2824          */
2825         if (req->err)
2826                 return;
2827
2828         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2829         if (!skb) {
2830                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2831                        hdev->name, opcode);
2832                 req->err = -ENOMEM;
2833                 return;
2834         }
2835
2836         if (skb_queue_empty(&req->cmd_q))
2837                 bt_cb(skb)->req.start = true;
2838
2839         bt_cb(skb)->req.event = event;
2840
2841         skb_queue_tail(&req->cmd_q, skb);
2842 }
2843
2844 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2845                  const void *param)
2846 {
2847         hci_req_add_ev(req, opcode, plen, param, 0);
2848 }
2849
2850 /* Get data from the previously sent command */
2851 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2852 {
2853         struct hci_command_hdr *hdr;
2854
2855         if (!hdev->sent_cmd)
2856                 return NULL;
2857
2858         hdr = (void *) hdev->sent_cmd->data;
2859
2860         if (hdr->opcode != cpu_to_le16(opcode))
2861                 return NULL;
2862
2863         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2864
2865         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2866 }
2867
2868 /* Send ACL data */
2869 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2870 {
2871         struct hci_acl_hdr *hdr;
2872         int len = skb->len;
2873
2874         skb_push(skb, HCI_ACL_HDR_SIZE);
2875         skb_reset_transport_header(skb);
2876         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2877         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2878         hdr->dlen   = cpu_to_le16(len);
2879 }
2880
2881 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2882                           struct sk_buff *skb, __u16 flags)
2883 {
2884         struct hci_conn *conn = chan->conn;
2885         struct hci_dev *hdev = conn->hdev;
2886         struct sk_buff *list;
2887
2888         skb->len = skb_headlen(skb);
2889         skb->data_len = 0;
2890
2891         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2892
2893         switch (hdev->dev_type) {
2894         case HCI_BREDR:
2895                 hci_add_acl_hdr(skb, conn->handle, flags);
2896                 break;
2897         case HCI_AMP:
2898                 hci_add_acl_hdr(skb, chan->handle, flags);
2899                 break;
2900         default:
2901                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2902                 return;
2903         }
2904
2905         list = skb_shinfo(skb)->frag_list;
2906         if (!list) {
2907                 /* Non fragmented */
2908                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2909
2910                 skb_queue_tail(queue, skb);
2911         } else {
2912                 /* Fragmented */
2913                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2914
2915                 skb_shinfo(skb)->frag_list = NULL;
2916
2917                 /* Queue all fragments atomically */
2918                 spin_lock(&queue->lock);
2919
2920                 __skb_queue_tail(queue, skb);
2921
2922                 flags &= ~ACL_START;
2923                 flags |= ACL_CONT;
2924                 do {
2925                         skb = list; list = list->next;
2926
2927                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2928                         hci_add_acl_hdr(skb, conn->handle, flags);
2929
2930                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2931
2932                         __skb_queue_tail(queue, skb);
2933                 } while (list);
2934
2935                 spin_unlock(&queue->lock);
2936         }
2937 }
2938
2939 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2940 {
2941         struct hci_dev *hdev = chan->conn->hdev;
2942
2943         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2944
2945         hci_queue_acl(chan, &chan->data_q, skb, flags);
2946
2947         queue_work(hdev->workqueue, &hdev->tx_work);
2948 }
2949
2950 /* Send SCO data */
2951 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2952 {
2953         struct hci_dev *hdev = conn->hdev;
2954         struct hci_sco_hdr hdr;
2955
2956         BT_DBG("%s len %d", hdev->name, skb->len);
2957
2958         hdr.handle = cpu_to_le16(conn->handle);
2959         hdr.dlen   = skb->len;
2960
2961         skb_push(skb, HCI_SCO_HDR_SIZE);
2962         skb_reset_transport_header(skb);
2963         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2964
2965         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2966
2967         skb_queue_tail(&conn->data_q, skb);
2968         queue_work(hdev->workqueue, &hdev->tx_work);
2969 }
2970
2971 /* ---- HCI TX task (outgoing data) ---- */
2972
2973 /* HCI Connection scheduler */
2974 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2975                                      int *quote)
2976 {
2977         struct hci_conn_hash *h = &hdev->conn_hash;
2978         struct hci_conn *conn = NULL, *c;
2979         unsigned int num = 0, min = ~0;
2980
2981         /* We don't have to lock device here. Connections are always
2982          * added and removed with TX task disabled. */
2983
2984         rcu_read_lock();
2985
2986         list_for_each_entry_rcu(c, &h->list, list) {
2987                 if (c->type != type || skb_queue_empty(&c->data_q))
2988                         continue;
2989
2990                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2991                         continue;
2992
2993                 num++;
2994
2995                 if (c->sent < min) {
2996                         min  = c->sent;
2997                         conn = c;
2998                 }
2999
3000                 if (hci_conn_num(hdev, type) == num)
3001                         break;
3002         }
3003
3004         rcu_read_unlock();
3005
3006         if (conn) {
3007                 int cnt, q;
3008
3009                 switch (conn->type) {
3010                 case ACL_LINK:
3011                         cnt = hdev->acl_cnt;
3012                         break;
3013                 case SCO_LINK:
3014                 case ESCO_LINK:
3015                         cnt = hdev->sco_cnt;
3016                         break;
3017                 case LE_LINK:
3018                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3019                         break;
3020                 default:
3021                         cnt = 0;
3022                         BT_ERR("Unknown link type");
3023                 }
3024
3025                 q = cnt / num;
3026                 *quote = q ? q : 1;
3027         } else
3028                 *quote = 0;
3029
3030         BT_DBG("conn %p quote %d", conn, *quote);
3031         return conn;
3032 }
3033
3034 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3035 {
3036         struct hci_conn_hash *h = &hdev->conn_hash;
3037         struct hci_conn *c;
3038
3039         BT_ERR("%s link tx timeout", hdev->name);
3040
3041         rcu_read_lock();
3042
3043         /* Kill stalled connections */
3044         list_for_each_entry_rcu(c, &h->list, list) {
3045                 if (c->type == type && c->sent) {
3046                         BT_ERR("%s killing stalled connection %pMR",
3047                                hdev->name, &c->dst);
3048                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3049                 }
3050         }
3051
3052         rcu_read_unlock();
3053 }
3054
3055 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3056                                       int *quote)
3057 {
3058         struct hci_conn_hash *h = &hdev->conn_hash;
3059         struct hci_chan *chan = NULL;
3060         unsigned int num = 0, min = ~0, cur_prio = 0;
3061         struct hci_conn *conn;
3062         int cnt, q, conn_num = 0;
3063
3064         BT_DBG("%s", hdev->name);
3065
3066         rcu_read_lock();
3067
3068         list_for_each_entry_rcu(conn, &h->list, list) {
3069                 struct hci_chan *tmp;
3070
3071                 if (conn->type != type)
3072                         continue;
3073
3074                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3075                         continue;
3076
3077                 conn_num++;
3078
3079                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3080                         struct sk_buff *skb;
3081
3082                         if (skb_queue_empty(&tmp->data_q))
3083                                 continue;
3084
3085                         skb = skb_peek(&tmp->data_q);
3086                         if (skb->priority < cur_prio)
3087                                 continue;
3088
3089                         if (skb->priority > cur_prio) {
3090                                 num = 0;
3091                                 min = ~0;
3092                                 cur_prio = skb->priority;
3093                         }
3094
3095                         num++;
3096
3097                         if (conn->sent < min) {
3098                                 min  = conn->sent;
3099                                 chan = tmp;
3100                         }
3101                 }
3102
3103                 if (hci_conn_num(hdev, type) == conn_num)
3104                         break;
3105         }
3106
3107         rcu_read_unlock();
3108
3109         if (!chan)
3110                 return NULL;
3111
3112         switch (chan->conn->type) {
3113         case ACL_LINK:
3114                 cnt = hdev->acl_cnt;
3115                 break;
3116         case AMP_LINK:
3117                 cnt = hdev->block_cnt;
3118                 break;
3119         case SCO_LINK:
3120         case ESCO_LINK:
3121                 cnt = hdev->sco_cnt;
3122                 break;
3123         case LE_LINK:
3124                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3125                 break;
3126         default:
3127                 cnt = 0;
3128                 BT_ERR("Unknown link type");
3129         }
3130
3131         q = cnt / num;
3132         *quote = q ? q : 1;
3133         BT_DBG("chan %p quote %d", chan, *quote);
3134         return chan;
3135 }
3136
3137 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3138 {
3139         struct hci_conn_hash *h = &hdev->conn_hash;
3140         struct hci_conn *conn;
3141         int num = 0;
3142
3143         BT_DBG("%s", hdev->name);
3144
3145         rcu_read_lock();
3146
3147         list_for_each_entry_rcu(conn, &h->list, list) {
3148                 struct hci_chan *chan;
3149
3150                 if (conn->type != type)
3151                         continue;
3152
3153                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3154                         continue;
3155
3156                 num++;
3157
3158                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3159                         struct sk_buff *skb;
3160
3161                         if (chan->sent) {
3162                                 chan->sent = 0;
3163                                 continue;
3164                         }
3165
3166                         if (skb_queue_empty(&chan->data_q))
3167                                 continue;
3168
3169                         skb = skb_peek(&chan->data_q);
3170                         if (skb->priority >= HCI_PRIO_MAX - 1)
3171                                 continue;
3172
3173                         skb->priority = HCI_PRIO_MAX - 1;
3174
3175                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3176                                skb->priority);
3177                 }
3178
3179                 if (hci_conn_num(hdev, type) == num)
3180                         break;
3181         }
3182
3183         rcu_read_unlock();
3184
3185 }
3186
3187 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3188 {
3189         /* Calculate count of blocks used by this packet */
3190         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3191 }
3192
3193 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3194 {
3195         if (!test_bit(HCI_RAW, &hdev->flags)) {
3196                 /* ACL tx timeout must be longer than maximum
3197                  * link supervision timeout (40.9 seconds) */
3198                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3199                                        HCI_ACL_TX_TIMEOUT))
3200                         hci_link_tx_to(hdev, ACL_LINK);
3201         }
3202 }
3203
3204 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3205 {
3206         unsigned int cnt = hdev->acl_cnt;
3207         struct hci_chan *chan;
3208         struct sk_buff *skb;
3209         int quote;
3210
3211         __check_timeout(hdev, cnt);
3212
3213         while (hdev->acl_cnt &&
3214                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3215                 u32 priority = (skb_peek(&chan->data_q))->priority;
3216                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3217                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3218                                skb->len, skb->priority);
3219
3220                         /* Stop if priority has changed */
3221                         if (skb->priority < priority)
3222                                 break;
3223
3224                         skb = skb_dequeue(&chan->data_q);
3225
3226                         hci_conn_enter_active_mode(chan->conn,
3227                                                    bt_cb(skb)->force_active);
3228
3229                         hci_send_frame(hdev, skb);
3230                         hdev->acl_last_tx = jiffies;
3231
3232                         hdev->acl_cnt--;
3233                         chan->sent++;
3234                         chan->conn->sent++;
3235                 }
3236         }
3237
3238         if (cnt != hdev->acl_cnt)
3239                 hci_prio_recalculate(hdev, ACL_LINK);
3240 }
3241
3242 static void hci_sched_acl_blk(struct hci_dev *hdev)
3243 {
3244         unsigned int cnt = hdev->block_cnt;
3245         struct hci_chan *chan;
3246         struct sk_buff *skb;
3247         int quote;
3248         u8 type;
3249
3250         __check_timeout(hdev, cnt);
3251
3252         BT_DBG("%s", hdev->name);
3253
3254         if (hdev->dev_type == HCI_AMP)
3255                 type = AMP_LINK;
3256         else
3257                 type = ACL_LINK;
3258
3259         while (hdev->block_cnt > 0 &&
3260                (chan = hci_chan_sent(hdev, type, &quote))) {
3261                 u32 priority = (skb_peek(&chan->data_q))->priority;
3262                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3263                         int blocks;
3264
3265                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3266                                skb->len, skb->priority);
3267
3268                         /* Stop if priority has changed */
3269                         if (skb->priority < priority)
3270                                 break;
3271
3272                         skb = skb_dequeue(&chan->data_q);
3273
3274                         blocks = __get_blocks(hdev, skb);
3275                         if (blocks > hdev->block_cnt)
3276                                 return;
3277
3278                         hci_conn_enter_active_mode(chan->conn,
3279                                                    bt_cb(skb)->force_active);
3280
3281                         hci_send_frame(hdev, skb);
3282                         hdev->acl_last_tx = jiffies;
3283
3284                         hdev->block_cnt -= blocks;
3285                         quote -= blocks;
3286
3287                         chan->sent += blocks;
3288                         chan->conn->sent += blocks;
3289                 }
3290         }
3291
3292         if (cnt != hdev->block_cnt)
3293                 hci_prio_recalculate(hdev, type);
3294 }
3295
3296 static void hci_sched_acl(struct hci_dev *hdev)
3297 {
3298         BT_DBG("%s", hdev->name);
3299
3300         /* No ACL link over BR/EDR controller */
3301         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3302                 return;
3303
3304         /* No AMP link over AMP controller */
3305         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3306                 return;
3307
3308         switch (hdev->flow_ctl_mode) {
3309         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3310                 hci_sched_acl_pkt(hdev);
3311                 break;
3312
3313         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3314                 hci_sched_acl_blk(hdev);
3315                 break;
3316         }
3317 }
3318
3319 /* Schedule SCO */
3320 static void hci_sched_sco(struct hci_dev *hdev)
3321 {
3322         struct hci_conn *conn;
3323         struct sk_buff *skb;
3324         int quote;
3325
3326         BT_DBG("%s", hdev->name);
3327
3328         if (!hci_conn_num(hdev, SCO_LINK))
3329                 return;
3330
3331         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3332                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3333                         BT_DBG("skb %p len %d", skb, skb->len);
3334                         hci_send_frame(hdev, skb);
3335
3336                         conn->sent++;
3337                         if (conn->sent == ~0)
3338                                 conn->sent = 0;
3339                 }
3340         }
3341 }
3342
3343 static void hci_sched_esco(struct hci_dev *hdev)
3344 {
3345         struct hci_conn *conn;
3346         struct sk_buff *skb;
3347         int quote;
3348
3349         BT_DBG("%s", hdev->name);
3350
3351         if (!hci_conn_num(hdev, ESCO_LINK))
3352                 return;
3353
3354         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3355                                                      &quote))) {
3356                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3357                         BT_DBG("skb %p len %d", skb, skb->len);
3358                         hci_send_frame(hdev, skb);
3359
3360                         conn->sent++;
3361                         if (conn->sent == ~0)
3362                                 conn->sent = 0;
3363                 }
3364         }
3365 }
3366
3367 static void hci_sched_le(struct hci_dev *hdev)
3368 {
3369         struct hci_chan *chan;
3370         struct sk_buff *skb;
3371         int quote, cnt, tmp;
3372
3373         BT_DBG("%s", hdev->name);
3374
3375         if (!hci_conn_num(hdev, LE_LINK))
3376                 return;
3377
3378         if (!test_bit(HCI_RAW, &hdev->flags)) {
3379                 /* LE tx timeout must be longer than maximum
3380                  * link supervision timeout (40.9 seconds) */
3381                 if (!hdev->le_cnt && hdev->le_pkts &&
3382                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3383                         hci_link_tx_to(hdev, LE_LINK);
3384         }
3385
3386         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3387         tmp = cnt;
3388         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3389                 u32 priority = (skb_peek(&chan->data_q))->priority;
3390                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3391                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3392                                skb->len, skb->priority);
3393
3394                         /* Stop if priority has changed */
3395                         if (skb->priority < priority)
3396                                 break;
3397
3398                         skb = skb_dequeue(&chan->data_q);
3399
3400                         hci_send_frame(hdev, skb);
3401                         hdev->le_last_tx = jiffies;
3402
3403                         cnt--;
3404                         chan->sent++;
3405                         chan->conn->sent++;
3406                 }
3407         }
3408
3409         if (hdev->le_pkts)
3410                 hdev->le_cnt = cnt;
3411         else
3412                 hdev->acl_cnt = cnt;
3413
3414         if (cnt != tmp)
3415                 hci_prio_recalculate(hdev, LE_LINK);
3416 }
3417
3418 static void hci_tx_work(struct work_struct *work)
3419 {
3420         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3421         struct sk_buff *skb;
3422
3423         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3424                hdev->sco_cnt, hdev->le_cnt);
3425
3426         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3427                 /* Schedule queues and send stuff to HCI driver */
3428                 hci_sched_acl(hdev);
3429                 hci_sched_sco(hdev);
3430                 hci_sched_esco(hdev);
3431                 hci_sched_le(hdev);
3432         }
3433
3434         /* Send next queued raw (unknown type) packet */
3435         while ((skb = skb_dequeue(&hdev->raw_q)))
3436                 hci_send_frame(hdev, skb);
3437 }
3438
3439 /* ----- HCI RX task (incoming data processing) ----- */
3440
3441 /* ACL data packet */
3442 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3443 {
3444         struct hci_acl_hdr *hdr = (void *) skb->data;
3445         struct hci_conn *conn;
3446         __u16 handle, flags;
3447
3448         skb_pull(skb, HCI_ACL_HDR_SIZE);
3449
3450         handle = __le16_to_cpu(hdr->handle);
3451         flags  = hci_flags(handle);
3452         handle = hci_handle(handle);
3453
3454         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3455                handle, flags);
3456
3457         hdev->stat.acl_rx++;
3458
3459         hci_dev_lock(hdev);
3460         conn = hci_conn_hash_lookup_handle(hdev, handle);
3461         hci_dev_unlock(hdev);
3462
3463         if (conn) {
3464                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3465
3466                 /* Send to upper protocol */
3467                 l2cap_recv_acldata(conn, skb, flags);
3468                 return;
3469         } else {
3470                 BT_ERR("%s ACL packet for unknown connection handle %d",
3471                        hdev->name, handle);
3472         }
3473
3474         kfree_skb(skb);
3475 }
3476
3477 /* SCO data packet */
3478 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3479 {
3480         struct hci_sco_hdr *hdr = (void *) skb->data;
3481         struct hci_conn *conn;
3482         __u16 handle;
3483
3484         skb_pull(skb, HCI_SCO_HDR_SIZE);
3485
3486         handle = __le16_to_cpu(hdr->handle);
3487
3488         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3489
3490         hdev->stat.sco_rx++;
3491
3492         hci_dev_lock(hdev);
3493         conn = hci_conn_hash_lookup_handle(hdev, handle);
3494         hci_dev_unlock(hdev);
3495
3496         if (conn) {
3497                 /* Send to upper protocol */
3498                 sco_recv_scodata(conn, skb);
3499                 return;
3500         } else {
3501                 BT_ERR("%s SCO packet for unknown connection handle %d",
3502                        hdev->name, handle);
3503         }
3504
3505         kfree_skb(skb);
3506 }
3507
3508 static bool hci_req_is_complete(struct hci_dev *hdev)
3509 {
3510         struct sk_buff *skb;
3511
3512         skb = skb_peek(&hdev->cmd_q);
3513         if (!skb)
3514                 return true;
3515
3516         return bt_cb(skb)->req.start;
3517 }
3518
3519 static void hci_resend_last(struct hci_dev *hdev)
3520 {
3521         struct hci_command_hdr *sent;
3522         struct sk_buff *skb;
3523         u16 opcode;
3524
3525         if (!hdev->sent_cmd)
3526                 return;
3527
3528         sent = (void *) hdev->sent_cmd->data;
3529         opcode = __le16_to_cpu(sent->opcode);
3530         if (opcode == HCI_OP_RESET)
3531                 return;
3532
3533         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3534         if (!skb)
3535                 return;
3536
3537         skb_queue_head(&hdev->cmd_q, skb);
3538         queue_work(hdev->workqueue, &hdev->cmd_work);
3539 }
3540
3541 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3542 {
3543         hci_req_complete_t req_complete = NULL;
3544         struct sk_buff *skb;
3545         unsigned long flags;
3546
3547         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3548
3549         /* If the completed command doesn't match the last one that was
3550          * sent we need to do special handling of it.
3551          */
3552         if (!hci_sent_cmd_data(hdev, opcode)) {
3553                 /* Some CSR based controllers generate a spontaneous
3554                  * reset complete event during init and any pending
3555                  * command will never be completed. In such a case we
3556                  * need to resend whatever was the last sent
3557                  * command.
3558                  */
3559                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3560                         hci_resend_last(hdev);
3561
3562                 return;
3563         }
3564
3565         /* If the command succeeded and there's still more commands in
3566          * this request the request is not yet complete.
3567          */
3568         if (!status && !hci_req_is_complete(hdev))
3569                 return;
3570
3571         /* If this was the last command in a request the complete
3572          * callback would be found in hdev->sent_cmd instead of the
3573          * command queue (hdev->cmd_q).
3574          */
3575         if (hdev->sent_cmd) {
3576                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3577
3578                 if (req_complete) {
3579                         /* We must set the complete callback to NULL to
3580                          * avoid calling the callback more than once if
3581                          * this function gets called again.
3582                          */
3583                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
3584
3585                         goto call_complete;
3586                 }
3587         }
3588
3589         /* Remove all pending commands belonging to this request */
3590         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3591         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3592                 if (bt_cb(skb)->req.start) {
3593                         __skb_queue_head(&hdev->cmd_q, skb);
3594                         break;
3595                 }
3596
3597                 req_complete = bt_cb(skb)->req.complete;
3598                 kfree_skb(skb);
3599         }
3600         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3601
3602 call_complete:
3603         if (req_complete)
3604                 req_complete(hdev, status);
3605 }
3606
3607 static void hci_rx_work(struct work_struct *work)
3608 {
3609         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3610         struct sk_buff *skb;
3611
3612         BT_DBG("%s", hdev->name);
3613
3614         while ((skb = skb_dequeue(&hdev->rx_q))) {
3615                 /* Send copy to monitor */
3616                 hci_send_to_monitor(hdev, skb);
3617
3618                 if (atomic_read(&hdev->promisc)) {
3619                         /* Send copy to the sockets */
3620                         hci_send_to_sock(hdev, skb);
3621                 }
3622
3623                 if (test_bit(HCI_RAW, &hdev->flags) ||
3624                     test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3625                         kfree_skb(skb);
3626                         continue;
3627                 }
3628
3629                 if (test_bit(HCI_INIT, &hdev->flags)) {
3630                         /* Don't process data packets in this states. */
3631                         switch (bt_cb(skb)->pkt_type) {
3632                         case HCI_ACLDATA_PKT:
3633                         case HCI_SCODATA_PKT:
3634                                 kfree_skb(skb);
3635                                 continue;
3636                         }
3637                 }
3638
3639                 /* Process frame */
3640                 switch (bt_cb(skb)->pkt_type) {
3641                 case HCI_EVENT_PKT:
3642                         BT_DBG("%s Event packet", hdev->name);
3643                         hci_event_packet(hdev, skb);
3644                         break;
3645
3646                 case HCI_ACLDATA_PKT:
3647                         BT_DBG("%s ACL data packet", hdev->name);
3648                         hci_acldata_packet(hdev, skb);
3649                         break;
3650
3651                 case HCI_SCODATA_PKT:
3652                         BT_DBG("%s SCO data packet", hdev->name);
3653                         hci_scodata_packet(hdev, skb);
3654                         break;
3655
3656                 default:
3657                         kfree_skb(skb);
3658                         break;
3659                 }
3660         }
3661 }
3662
3663 static void hci_cmd_work(struct work_struct *work)
3664 {
3665         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3666         struct sk_buff *skb;
3667
3668         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3669                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3670
3671         /* Send queued commands */
3672         if (atomic_read(&hdev->cmd_cnt)) {
3673                 skb = skb_dequeue(&hdev->cmd_q);
3674                 if (!skb)
3675                         return;
3676
3677                 kfree_skb(hdev->sent_cmd);
3678
3679                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
3680                 if (hdev->sent_cmd) {
3681                         atomic_dec(&hdev->cmd_cnt);
3682                         hci_send_frame(hdev, skb);
3683                         if (test_bit(HCI_RESET, &hdev->flags))
3684                                 del_timer(&hdev->cmd_timer);
3685                         else
3686                                 mod_timer(&hdev->cmd_timer,
3687                                           jiffies + HCI_CMD_TIMEOUT);
3688                 } else {
3689                         skb_queue_head(&hdev->cmd_q, skb);
3690                         queue_work(hdev->workqueue, &hdev->cmd_work);
3691                 }
3692         }
3693 }
3694
3695 u8 bdaddr_to_le(u8 bdaddr_type)
3696 {
3697         switch (bdaddr_type) {
3698         case BDADDR_LE_PUBLIC:
3699                 return ADDR_LE_DEV_PUBLIC;
3700
3701         default:
3702                 /* Fallback to LE Random address type */
3703                 return ADDR_LE_DEV_RANDOM;
3704         }
3705 }