Bluetooth: Add support for vendor specific diagnostic channel
[cascardo/linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
47
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
51
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_MUTEX(hci_cb_list_lock);
55
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
58
59 /* ----- HCI requests ----- */
60
61 #define HCI_REQ_DONE      0
62 #define HCI_REQ_PEND      1
63 #define HCI_REQ_CANCELED  2
64
65 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
67
68 /* ---- HCI notifications ---- */
69
70 static void hci_notify(struct hci_dev *hdev, int event)
71 {
72         hci_sock_dev_event(hdev, event);
73 }
74
75 /* ---- HCI debugfs entries ---- */
76
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78                              size_t count, loff_t *ppos)
79 {
80         struct hci_dev *hdev = file->private_data;
81         char buf[3];
82
83         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
84         buf[1] = '\n';
85         buf[2] = '\0';
86         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 }
88
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90                               size_t count, loff_t *ppos)
91 {
92         struct hci_dev *hdev = file->private_data;
93         struct sk_buff *skb;
94         char buf[32];
95         size_t buf_size = min(count, (sizeof(buf)-1));
96         bool enable;
97
98         if (!test_bit(HCI_UP, &hdev->flags))
99                 return -ENETDOWN;
100
101         if (copy_from_user(buf, user_buf, buf_size))
102                 return -EFAULT;
103
104         buf[buf_size] = '\0';
105         if (strtobool(buf, &enable))
106                 return -EINVAL;
107
108         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
109                 return -EALREADY;
110
111         hci_req_lock(hdev);
112         if (enable)
113                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
114                                      HCI_CMD_TIMEOUT);
115         else
116                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117                                      HCI_CMD_TIMEOUT);
118         hci_req_unlock(hdev);
119
120         if (IS_ERR(skb))
121                 return PTR_ERR(skb);
122
123         kfree_skb(skb);
124
125         hci_dev_change_flag(hdev, HCI_DUT_MODE);
126
127         return count;
128 }
129
130 static const struct file_operations dut_mode_fops = {
131         .open           = simple_open,
132         .read           = dut_mode_read,
133         .write          = dut_mode_write,
134         .llseek         = default_llseek,
135 };
136
137 /* ---- HCI requests ---- */
138
139 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
140                                   struct sk_buff *skb)
141 {
142         BT_DBG("%s result 0x%2.2x", hdev->name, result);
143
144         if (hdev->req_status == HCI_REQ_PEND) {
145                 hdev->req_result = result;
146                 hdev->req_status = HCI_REQ_DONE;
147                 if (skb)
148                         hdev->req_skb = skb_get(skb);
149                 wake_up_interruptible(&hdev->req_wait_q);
150         }
151 }
152
153 static void hci_req_cancel(struct hci_dev *hdev, int err)
154 {
155         BT_DBG("%s err 0x%2.2x", hdev->name, err);
156
157         if (hdev->req_status == HCI_REQ_PEND) {
158                 hdev->req_result = err;
159                 hdev->req_status = HCI_REQ_CANCELED;
160                 wake_up_interruptible(&hdev->req_wait_q);
161         }
162 }
163
164 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
165                                   const void *param, u8 event, u32 timeout)
166 {
167         DECLARE_WAITQUEUE(wait, current);
168         struct hci_request req;
169         struct sk_buff *skb;
170         int err = 0;
171
172         BT_DBG("%s", hdev->name);
173
174         hci_req_init(&req, hdev);
175
176         hci_req_add_ev(&req, opcode, plen, param, event);
177
178         hdev->req_status = HCI_REQ_PEND;
179
180         add_wait_queue(&hdev->req_wait_q, &wait);
181         set_current_state(TASK_INTERRUPTIBLE);
182
183         err = hci_req_run_skb(&req, hci_req_sync_complete);
184         if (err < 0) {
185                 remove_wait_queue(&hdev->req_wait_q, &wait);
186                 set_current_state(TASK_RUNNING);
187                 return ERR_PTR(err);
188         }
189
190         schedule_timeout(timeout);
191
192         remove_wait_queue(&hdev->req_wait_q, &wait);
193
194         if (signal_pending(current))
195                 return ERR_PTR(-EINTR);
196
197         switch (hdev->req_status) {
198         case HCI_REQ_DONE:
199                 err = -bt_to_errno(hdev->req_result);
200                 break;
201
202         case HCI_REQ_CANCELED:
203                 err = -hdev->req_result;
204                 break;
205
206         default:
207                 err = -ETIMEDOUT;
208                 break;
209         }
210
211         hdev->req_status = hdev->req_result = 0;
212         skb = hdev->req_skb;
213         hdev->req_skb = NULL;
214
215         BT_DBG("%s end: err %d", hdev->name, err);
216
217         if (err < 0) {
218                 kfree_skb(skb);
219                 return ERR_PTR(err);
220         }
221
222         if (!skb)
223                 return ERR_PTR(-ENODATA);
224
225         return skb;
226 }
227 EXPORT_SYMBOL(__hci_cmd_sync_ev);
228
229 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
230                                const void *param, u32 timeout)
231 {
232         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
233 }
234 EXPORT_SYMBOL(__hci_cmd_sync);
235
236 /* Execute request and wait for completion. */
237 static int __hci_req_sync(struct hci_dev *hdev,
238                           void (*func)(struct hci_request *req,
239                                       unsigned long opt),
240                           unsigned long opt, __u32 timeout)
241 {
242         struct hci_request req;
243         DECLARE_WAITQUEUE(wait, current);
244         int err = 0;
245
246         BT_DBG("%s start", hdev->name);
247
248         hci_req_init(&req, hdev);
249
250         hdev->req_status = HCI_REQ_PEND;
251
252         func(&req, opt);
253
254         add_wait_queue(&hdev->req_wait_q, &wait);
255         set_current_state(TASK_INTERRUPTIBLE);
256
257         err = hci_req_run_skb(&req, hci_req_sync_complete);
258         if (err < 0) {
259                 hdev->req_status = 0;
260
261                 remove_wait_queue(&hdev->req_wait_q, &wait);
262                 set_current_state(TASK_RUNNING);
263
264                 /* ENODATA means the HCI request command queue is empty.
265                  * This can happen when a request with conditionals doesn't
266                  * trigger any commands to be sent. This is normal behavior
267                  * and should not trigger an error return.
268                  */
269                 if (err == -ENODATA)
270                         return 0;
271
272                 return err;
273         }
274
275         schedule_timeout(timeout);
276
277         remove_wait_queue(&hdev->req_wait_q, &wait);
278
279         if (signal_pending(current))
280                 return -EINTR;
281
282         switch (hdev->req_status) {
283         case HCI_REQ_DONE:
284                 err = -bt_to_errno(hdev->req_result);
285                 break;
286
287         case HCI_REQ_CANCELED:
288                 err = -hdev->req_result;
289                 break;
290
291         default:
292                 err = -ETIMEDOUT;
293                 break;
294         }
295
296         hdev->req_status = hdev->req_result = 0;
297
298         BT_DBG("%s end: err %d", hdev->name, err);
299
300         return err;
301 }
302
303 static int hci_req_sync(struct hci_dev *hdev,
304                         void (*req)(struct hci_request *req,
305                                     unsigned long opt),
306                         unsigned long opt, __u32 timeout)
307 {
308         int ret;
309
310         if (!test_bit(HCI_UP, &hdev->flags))
311                 return -ENETDOWN;
312
313         /* Serialize all requests */
314         hci_req_lock(hdev);
315         ret = __hci_req_sync(hdev, req, opt, timeout);
316         hci_req_unlock(hdev);
317
318         return ret;
319 }
320
321 static void hci_reset_req(struct hci_request *req, unsigned long opt)
322 {
323         BT_DBG("%s %ld", req->hdev->name, opt);
324
325         /* Reset device */
326         set_bit(HCI_RESET, &req->hdev->flags);
327         hci_req_add(req, HCI_OP_RESET, 0, NULL);
328 }
329
330 static void bredr_init(struct hci_request *req)
331 {
332         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
333
334         /* Read Local Supported Features */
335         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
336
337         /* Read Local Version */
338         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
339
340         /* Read BD Address */
341         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
342 }
343
344 static void amp_init1(struct hci_request *req)
345 {
346         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
347
348         /* Read Local Version */
349         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
350
351         /* Read Local Supported Commands */
352         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
353
354         /* Read Local AMP Info */
355         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
356
357         /* Read Data Blk size */
358         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
359
360         /* Read Flow Control Mode */
361         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
362
363         /* Read Location Data */
364         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
365 }
366
367 static void amp_init2(struct hci_request *req)
368 {
369         /* Read Local Supported Features. Not all AMP controllers
370          * support this so it's placed conditionally in the second
371          * stage init.
372          */
373         if (req->hdev->commands[14] & 0x20)
374                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
375 }
376
377 static void hci_init1_req(struct hci_request *req, unsigned long opt)
378 {
379         struct hci_dev *hdev = req->hdev;
380
381         BT_DBG("%s %ld", hdev->name, opt);
382
383         /* Reset */
384         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
385                 hci_reset_req(req, 0);
386
387         switch (hdev->dev_type) {
388         case HCI_BREDR:
389                 bredr_init(req);
390                 break;
391
392         case HCI_AMP:
393                 amp_init1(req);
394                 break;
395
396         default:
397                 BT_ERR("Unknown device type %d", hdev->dev_type);
398                 break;
399         }
400 }
401
402 static void bredr_setup(struct hci_request *req)
403 {
404         __le16 param;
405         __u8 flt_type;
406
407         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
408         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
409
410         /* Read Class of Device */
411         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
412
413         /* Read Local Name */
414         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
415
416         /* Read Voice Setting */
417         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
418
419         /* Read Number of Supported IAC */
420         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
421
422         /* Read Current IAC LAP */
423         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
424
425         /* Clear Event Filters */
426         flt_type = HCI_FLT_CLEAR_ALL;
427         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
428
429         /* Connection accept timeout ~20 secs */
430         param = cpu_to_le16(0x7d00);
431         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
432 }
433
434 static void le_setup(struct hci_request *req)
435 {
436         struct hci_dev *hdev = req->hdev;
437
438         /* Read LE Buffer Size */
439         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
440
441         /* Read LE Local Supported Features */
442         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
443
444         /* Read LE Supported States */
445         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
446
447         /* Read LE White List Size */
448         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
449
450         /* Clear LE White List */
451         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
452
453         /* LE-only controllers have LE implicitly enabled */
454         if (!lmp_bredr_capable(hdev))
455                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
456 }
457
458 static void hci_setup_event_mask(struct hci_request *req)
459 {
460         struct hci_dev *hdev = req->hdev;
461
462         /* The second byte is 0xff instead of 0x9f (two reserved bits
463          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
464          * command otherwise.
465          */
466         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
467
468         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
469          * any event mask for pre 1.2 devices.
470          */
471         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
472                 return;
473
474         if (lmp_bredr_capable(hdev)) {
475                 events[4] |= 0x01; /* Flow Specification Complete */
476                 events[4] |= 0x02; /* Inquiry Result with RSSI */
477                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
478                 events[5] |= 0x08; /* Synchronous Connection Complete */
479                 events[5] |= 0x10; /* Synchronous Connection Changed */
480         } else {
481                 /* Use a different default for LE-only devices */
482                 memset(events, 0, sizeof(events));
483                 events[0] |= 0x10; /* Disconnection Complete */
484                 events[1] |= 0x08; /* Read Remote Version Information Complete */
485                 events[1] |= 0x20; /* Command Complete */
486                 events[1] |= 0x40; /* Command Status */
487                 events[1] |= 0x80; /* Hardware Error */
488                 events[2] |= 0x04; /* Number of Completed Packets */
489                 events[3] |= 0x02; /* Data Buffer Overflow */
490
491                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
492                         events[0] |= 0x80; /* Encryption Change */
493                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
494                 }
495         }
496
497         if (lmp_inq_rssi_capable(hdev))
498                 events[4] |= 0x02; /* Inquiry Result with RSSI */
499
500         if (lmp_sniffsubr_capable(hdev))
501                 events[5] |= 0x20; /* Sniff Subrating */
502
503         if (lmp_pause_enc_capable(hdev))
504                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
505
506         if (lmp_ext_inq_capable(hdev))
507                 events[5] |= 0x40; /* Extended Inquiry Result */
508
509         if (lmp_no_flush_capable(hdev))
510                 events[7] |= 0x01; /* Enhanced Flush Complete */
511
512         if (lmp_lsto_capable(hdev))
513                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
514
515         if (lmp_ssp_capable(hdev)) {
516                 events[6] |= 0x01;      /* IO Capability Request */
517                 events[6] |= 0x02;      /* IO Capability Response */
518                 events[6] |= 0x04;      /* User Confirmation Request */
519                 events[6] |= 0x08;      /* User Passkey Request */
520                 events[6] |= 0x10;      /* Remote OOB Data Request */
521                 events[6] |= 0x20;      /* Simple Pairing Complete */
522                 events[7] |= 0x04;      /* User Passkey Notification */
523                 events[7] |= 0x08;      /* Keypress Notification */
524                 events[7] |= 0x10;      /* Remote Host Supported
525                                          * Features Notification
526                                          */
527         }
528
529         if (lmp_le_capable(hdev))
530                 events[7] |= 0x20;      /* LE Meta-Event */
531
532         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
533 }
534
535 static void hci_init2_req(struct hci_request *req, unsigned long opt)
536 {
537         struct hci_dev *hdev = req->hdev;
538
539         if (hdev->dev_type == HCI_AMP)
540                 return amp_init2(req);
541
542         if (lmp_bredr_capable(hdev))
543                 bredr_setup(req);
544         else
545                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
546
547         if (lmp_le_capable(hdev))
548                 le_setup(req);
549
550         /* All Bluetooth 1.2 and later controllers should support the
551          * HCI command for reading the local supported commands.
552          *
553          * Unfortunately some controllers indicate Bluetooth 1.2 support,
554          * but do not have support for this command. If that is the case,
555          * the driver can quirk the behavior and skip reading the local
556          * supported commands.
557          */
558         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
559             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
560                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
561
562         if (lmp_ssp_capable(hdev)) {
563                 /* When SSP is available, then the host features page
564                  * should also be available as well. However some
565                  * controllers list the max_page as 0 as long as SSP
566                  * has not been enabled. To achieve proper debugging
567                  * output, force the minimum max_page to 1 at least.
568                  */
569                 hdev->max_page = 0x01;
570
571                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
572                         u8 mode = 0x01;
573
574                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
575                                     sizeof(mode), &mode);
576                 } else {
577                         struct hci_cp_write_eir cp;
578
579                         memset(hdev->eir, 0, sizeof(hdev->eir));
580                         memset(&cp, 0, sizeof(cp));
581
582                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
583                 }
584         }
585
586         if (lmp_inq_rssi_capable(hdev) ||
587             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
588                 u8 mode;
589
590                 /* If Extended Inquiry Result events are supported, then
591                  * they are clearly preferred over Inquiry Result with RSSI
592                  * events.
593                  */
594                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
595
596                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
597         }
598
599         if (lmp_inq_tx_pwr_capable(hdev))
600                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
601
602         if (lmp_ext_feat_capable(hdev)) {
603                 struct hci_cp_read_local_ext_features cp;
604
605                 cp.page = 0x01;
606                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
607                             sizeof(cp), &cp);
608         }
609
610         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
611                 u8 enable = 1;
612                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
613                             &enable);
614         }
615 }
616
617 static void hci_setup_link_policy(struct hci_request *req)
618 {
619         struct hci_dev *hdev = req->hdev;
620         struct hci_cp_write_def_link_policy cp;
621         u16 link_policy = 0;
622
623         if (lmp_rswitch_capable(hdev))
624                 link_policy |= HCI_LP_RSWITCH;
625         if (lmp_hold_capable(hdev))
626                 link_policy |= HCI_LP_HOLD;
627         if (lmp_sniff_capable(hdev))
628                 link_policy |= HCI_LP_SNIFF;
629         if (lmp_park_capable(hdev))
630                 link_policy |= HCI_LP_PARK;
631
632         cp.policy = cpu_to_le16(link_policy);
633         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
634 }
635
636 static void hci_set_le_support(struct hci_request *req)
637 {
638         struct hci_dev *hdev = req->hdev;
639         struct hci_cp_write_le_host_supported cp;
640
641         /* LE-only devices do not support explicit enablement */
642         if (!lmp_bredr_capable(hdev))
643                 return;
644
645         memset(&cp, 0, sizeof(cp));
646
647         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
648                 cp.le = 0x01;
649                 cp.simul = 0x00;
650         }
651
652         if (cp.le != lmp_host_le_capable(hdev))
653                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
654                             &cp);
655 }
656
657 static void hci_set_event_mask_page_2(struct hci_request *req)
658 {
659         struct hci_dev *hdev = req->hdev;
660         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
661
662         /* If Connectionless Slave Broadcast master role is supported
663          * enable all necessary events for it.
664          */
665         if (lmp_csb_master_capable(hdev)) {
666                 events[1] |= 0x40;      /* Triggered Clock Capture */
667                 events[1] |= 0x80;      /* Synchronization Train Complete */
668                 events[2] |= 0x10;      /* Slave Page Response Timeout */
669                 events[2] |= 0x20;      /* CSB Channel Map Change */
670         }
671
672         /* If Connectionless Slave Broadcast slave role is supported
673          * enable all necessary events for it.
674          */
675         if (lmp_csb_slave_capable(hdev)) {
676                 events[2] |= 0x01;      /* Synchronization Train Received */
677                 events[2] |= 0x02;      /* CSB Receive */
678                 events[2] |= 0x04;      /* CSB Timeout */
679                 events[2] |= 0x08;      /* Truncated Page Complete */
680         }
681
682         /* Enable Authenticated Payload Timeout Expired event if supported */
683         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
684                 events[2] |= 0x80;
685
686         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
687 }
688
689 static void hci_init3_req(struct hci_request *req, unsigned long opt)
690 {
691         struct hci_dev *hdev = req->hdev;
692         u8 p;
693
694         hci_setup_event_mask(req);
695
696         if (hdev->commands[6] & 0x20 &&
697             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
698                 struct hci_cp_read_stored_link_key cp;
699
700                 bacpy(&cp.bdaddr, BDADDR_ANY);
701                 cp.read_all = 0x01;
702                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
703         }
704
705         if (hdev->commands[5] & 0x10)
706                 hci_setup_link_policy(req);
707
708         if (hdev->commands[8] & 0x01)
709                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
710
711         /* Some older Broadcom based Bluetooth 1.2 controllers do not
712          * support the Read Page Scan Type command. Check support for
713          * this command in the bit mask of supported commands.
714          */
715         if (hdev->commands[13] & 0x01)
716                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
717
718         if (lmp_le_capable(hdev)) {
719                 u8 events[8];
720
721                 memset(events, 0, sizeof(events));
722                 events[0] = 0x0f;
723
724                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
725                         events[0] |= 0x10;      /* LE Long Term Key Request */
726
727                 /* If controller supports the Connection Parameters Request
728                  * Link Layer Procedure, enable the corresponding event.
729                  */
730                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
731                         events[0] |= 0x20;      /* LE Remote Connection
732                                                  * Parameter Request
733                                                  */
734
735                 /* If the controller supports the Data Length Extension
736                  * feature, enable the corresponding event.
737                  */
738                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
739                         events[0] |= 0x40;      /* LE Data Length Change */
740
741                 /* If the controller supports Extended Scanner Filter
742                  * Policies, enable the correspondig event.
743                  */
744                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
745                         events[1] |= 0x04;      /* LE Direct Advertising
746                                                  * Report
747                                                  */
748
749                 /* If the controller supports the LE Read Local P-256
750                  * Public Key command, enable the corresponding event.
751                  */
752                 if (hdev->commands[34] & 0x02)
753                         events[0] |= 0x80;      /* LE Read Local P-256
754                                                  * Public Key Complete
755                                                  */
756
757                 /* If the controller supports the LE Generate DHKey
758                  * command, enable the corresponding event.
759                  */
760                 if (hdev->commands[34] & 0x04)
761                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
762
763                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
764                             events);
765
766                 if (hdev->commands[25] & 0x40) {
767                         /* Read LE Advertising Channel TX Power */
768                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
769                 }
770
771                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
772                         /* Read LE Maximum Data Length */
773                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
774
775                         /* Read LE Suggested Default Data Length */
776                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
777                 }
778
779                 hci_set_le_support(req);
780         }
781
782         /* Read features beyond page 1 if available */
783         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
784                 struct hci_cp_read_local_ext_features cp;
785
786                 cp.page = p;
787                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
788                             sizeof(cp), &cp);
789         }
790 }
791
792 static void hci_init4_req(struct hci_request *req, unsigned long opt)
793 {
794         struct hci_dev *hdev = req->hdev;
795
796         /* Some Broadcom based Bluetooth controllers do not support the
797          * Delete Stored Link Key command. They are clearly indicating its
798          * absence in the bit mask of supported commands.
799          *
800          * Check the supported commands and only if the the command is marked
801          * as supported send it. If not supported assume that the controller
802          * does not have actual support for stored link keys which makes this
803          * command redundant anyway.
804          *
805          * Some controllers indicate that they support handling deleting
806          * stored link keys, but they don't. The quirk lets a driver
807          * just disable this command.
808          */
809         if (hdev->commands[6] & 0x80 &&
810             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
811                 struct hci_cp_delete_stored_link_key cp;
812
813                 bacpy(&cp.bdaddr, BDADDR_ANY);
814                 cp.delete_all = 0x01;
815                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
816                             sizeof(cp), &cp);
817         }
818
819         /* Set event mask page 2 if the HCI command for it is supported */
820         if (hdev->commands[22] & 0x04)
821                 hci_set_event_mask_page_2(req);
822
823         /* Read local codec list if the HCI command is supported */
824         if (hdev->commands[29] & 0x20)
825                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
826
827         /* Get MWS transport configuration if the HCI command is supported */
828         if (hdev->commands[30] & 0x08)
829                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
830
831         /* Check for Synchronization Train support */
832         if (lmp_sync_train_capable(hdev))
833                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
834
835         /* Enable Secure Connections if supported and configured */
836         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
837             bredr_sc_enabled(hdev)) {
838                 u8 support = 0x01;
839
840                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
841                             sizeof(support), &support);
842         }
843 }
844
845 static int __hci_init(struct hci_dev *hdev)
846 {
847         int err;
848
849         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
850         if (err < 0)
851                 return err;
852
853         /* The Device Under Test (DUT) mode is special and available for
854          * all controller types. So just create it early on.
855          */
856         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
857                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
858                                     &dut_mode_fops);
859         }
860
861         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
862         if (err < 0)
863                 return err;
864
865         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
866          * BR/EDR/LE type controllers. AMP controllers only need the
867          * first two stages of init.
868          */
869         if (hdev->dev_type != HCI_BREDR)
870                 return 0;
871
872         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
873         if (err < 0)
874                 return err;
875
876         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
877         if (err < 0)
878                 return err;
879
880         /* This function is only called when the controller is actually in
881          * configured state. When the controller is marked as unconfigured,
882          * this initialization procedure is not run.
883          *
884          * It means that it is possible that a controller runs through its
885          * setup phase and then discovers missing settings. If that is the
886          * case, then this function will not be called. It then will only
887          * be called during the config phase.
888          *
889          * So only when in setup phase or config phase, create the debugfs
890          * entries and register the SMP channels.
891          */
892         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
893             !hci_dev_test_flag(hdev, HCI_CONFIG))
894                 return 0;
895
896         hci_debugfs_create_common(hdev);
897
898         if (lmp_bredr_capable(hdev))
899                 hci_debugfs_create_bredr(hdev);
900
901         if (lmp_le_capable(hdev))
902                 hci_debugfs_create_le(hdev);
903
904         return 0;
905 }
906
907 static void hci_init0_req(struct hci_request *req, unsigned long opt)
908 {
909         struct hci_dev *hdev = req->hdev;
910
911         BT_DBG("%s %ld", hdev->name, opt);
912
913         /* Reset */
914         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
915                 hci_reset_req(req, 0);
916
917         /* Read Local Version */
918         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
919
920         /* Read BD Address */
921         if (hdev->set_bdaddr)
922                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
923 }
924
925 static int __hci_unconf_init(struct hci_dev *hdev)
926 {
927         int err;
928
929         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
930                 return 0;
931
932         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
933         if (err < 0)
934                 return err;
935
936         return 0;
937 }
938
939 static void hci_scan_req(struct hci_request *req, unsigned long opt)
940 {
941         __u8 scan = opt;
942
943         BT_DBG("%s %x", req->hdev->name, scan);
944
945         /* Inquiry and Page scans */
946         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
947 }
948
949 static void hci_auth_req(struct hci_request *req, unsigned long opt)
950 {
951         __u8 auth = opt;
952
953         BT_DBG("%s %x", req->hdev->name, auth);
954
955         /* Authentication */
956         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
957 }
958
959 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
960 {
961         __u8 encrypt = opt;
962
963         BT_DBG("%s %x", req->hdev->name, encrypt);
964
965         /* Encryption */
966         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
967 }
968
969 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
970 {
971         __le16 policy = cpu_to_le16(opt);
972
973         BT_DBG("%s %x", req->hdev->name, policy);
974
975         /* Default link policy */
976         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
977 }
978
979 /* Get HCI device by index.
980  * Device is held on return. */
981 struct hci_dev *hci_dev_get(int index)
982 {
983         struct hci_dev *hdev = NULL, *d;
984
985         BT_DBG("%d", index);
986
987         if (index < 0)
988                 return NULL;
989
990         read_lock(&hci_dev_list_lock);
991         list_for_each_entry(d, &hci_dev_list, list) {
992                 if (d->id == index) {
993                         hdev = hci_dev_hold(d);
994                         break;
995                 }
996         }
997         read_unlock(&hci_dev_list_lock);
998         return hdev;
999 }
1000
1001 /* ---- Inquiry support ---- */
1002
1003 bool hci_discovery_active(struct hci_dev *hdev)
1004 {
1005         struct discovery_state *discov = &hdev->discovery;
1006
1007         switch (discov->state) {
1008         case DISCOVERY_FINDING:
1009         case DISCOVERY_RESOLVING:
1010                 return true;
1011
1012         default:
1013                 return false;
1014         }
1015 }
1016
1017 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1018 {
1019         int old_state = hdev->discovery.state;
1020
1021         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1022
1023         if (old_state == state)
1024                 return;
1025
1026         hdev->discovery.state = state;
1027
1028         switch (state) {
1029         case DISCOVERY_STOPPED:
1030                 hci_update_background_scan(hdev);
1031
1032                 if (old_state != DISCOVERY_STARTING)
1033                         mgmt_discovering(hdev, 0);
1034                 break;
1035         case DISCOVERY_STARTING:
1036                 break;
1037         case DISCOVERY_FINDING:
1038                 mgmt_discovering(hdev, 1);
1039                 break;
1040         case DISCOVERY_RESOLVING:
1041                 break;
1042         case DISCOVERY_STOPPING:
1043                 break;
1044         }
1045 }
1046
1047 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1048 {
1049         struct discovery_state *cache = &hdev->discovery;
1050         struct inquiry_entry *p, *n;
1051
1052         list_for_each_entry_safe(p, n, &cache->all, all) {
1053                 list_del(&p->all);
1054                 kfree(p);
1055         }
1056
1057         INIT_LIST_HEAD(&cache->unknown);
1058         INIT_LIST_HEAD(&cache->resolve);
1059 }
1060
1061 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1062                                                bdaddr_t *bdaddr)
1063 {
1064         struct discovery_state *cache = &hdev->discovery;
1065         struct inquiry_entry *e;
1066
1067         BT_DBG("cache %p, %pMR", cache, bdaddr);
1068
1069         list_for_each_entry(e, &cache->all, all) {
1070                 if (!bacmp(&e->data.bdaddr, bdaddr))
1071                         return e;
1072         }
1073
1074         return NULL;
1075 }
1076
1077 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1078                                                        bdaddr_t *bdaddr)
1079 {
1080         struct discovery_state *cache = &hdev->discovery;
1081         struct inquiry_entry *e;
1082
1083         BT_DBG("cache %p, %pMR", cache, bdaddr);
1084
1085         list_for_each_entry(e, &cache->unknown, list) {
1086                 if (!bacmp(&e->data.bdaddr, bdaddr))
1087                         return e;
1088         }
1089
1090         return NULL;
1091 }
1092
1093 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1094                                                        bdaddr_t *bdaddr,
1095                                                        int state)
1096 {
1097         struct discovery_state *cache = &hdev->discovery;
1098         struct inquiry_entry *e;
1099
1100         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1101
1102         list_for_each_entry(e, &cache->resolve, list) {
1103                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1104                         return e;
1105                 if (!bacmp(&e->data.bdaddr, bdaddr))
1106                         return e;
1107         }
1108
1109         return NULL;
1110 }
1111
1112 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1113                                       struct inquiry_entry *ie)
1114 {
1115         struct discovery_state *cache = &hdev->discovery;
1116         struct list_head *pos = &cache->resolve;
1117         struct inquiry_entry *p;
1118
1119         list_del(&ie->list);
1120
1121         list_for_each_entry(p, &cache->resolve, list) {
1122                 if (p->name_state != NAME_PENDING &&
1123                     abs(p->data.rssi) >= abs(ie->data.rssi))
1124                         break;
1125                 pos = &p->list;
1126         }
1127
1128         list_add(&ie->list, pos);
1129 }
1130
1131 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1132                              bool name_known)
1133 {
1134         struct discovery_state *cache = &hdev->discovery;
1135         struct inquiry_entry *ie;
1136         u32 flags = 0;
1137
1138         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1139
1140         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1141
1142         if (!data->ssp_mode)
1143                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1144
1145         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1146         if (ie) {
1147                 if (!ie->data.ssp_mode)
1148                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1149
1150                 if (ie->name_state == NAME_NEEDED &&
1151                     data->rssi != ie->data.rssi) {
1152                         ie->data.rssi = data->rssi;
1153                         hci_inquiry_cache_update_resolve(hdev, ie);
1154                 }
1155
1156                 goto update;
1157         }
1158
1159         /* Entry not in the cache. Add new one. */
1160         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1161         if (!ie) {
1162                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1163                 goto done;
1164         }
1165
1166         list_add(&ie->all, &cache->all);
1167
1168         if (name_known) {
1169                 ie->name_state = NAME_KNOWN;
1170         } else {
1171                 ie->name_state = NAME_NOT_KNOWN;
1172                 list_add(&ie->list, &cache->unknown);
1173         }
1174
1175 update:
1176         if (name_known && ie->name_state != NAME_KNOWN &&
1177             ie->name_state != NAME_PENDING) {
1178                 ie->name_state = NAME_KNOWN;
1179                 list_del(&ie->list);
1180         }
1181
1182         memcpy(&ie->data, data, sizeof(*data));
1183         ie->timestamp = jiffies;
1184         cache->timestamp = jiffies;
1185
1186         if (ie->name_state == NAME_NOT_KNOWN)
1187                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1188
1189 done:
1190         return flags;
1191 }
1192
1193 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1194 {
1195         struct discovery_state *cache = &hdev->discovery;
1196         struct inquiry_info *info = (struct inquiry_info *) buf;
1197         struct inquiry_entry *e;
1198         int copied = 0;
1199
1200         list_for_each_entry(e, &cache->all, all) {
1201                 struct inquiry_data *data = &e->data;
1202
1203                 if (copied >= num)
1204                         break;
1205
1206                 bacpy(&info->bdaddr, &data->bdaddr);
1207                 info->pscan_rep_mode    = data->pscan_rep_mode;
1208                 info->pscan_period_mode = data->pscan_period_mode;
1209                 info->pscan_mode        = data->pscan_mode;
1210                 memcpy(info->dev_class, data->dev_class, 3);
1211                 info->clock_offset      = data->clock_offset;
1212
1213                 info++;
1214                 copied++;
1215         }
1216
1217         BT_DBG("cache %p, copied %d", cache, copied);
1218         return copied;
1219 }
1220
1221 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1222 {
1223         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1224         struct hci_dev *hdev = req->hdev;
1225         struct hci_cp_inquiry cp;
1226
1227         BT_DBG("%s", hdev->name);
1228
1229         if (test_bit(HCI_INQUIRY, &hdev->flags))
1230                 return;
1231
1232         /* Start Inquiry */
1233         memcpy(&cp.lap, &ir->lap, 3);
1234         cp.length  = ir->length;
1235         cp.num_rsp = ir->num_rsp;
1236         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1237 }
1238
1239 int hci_inquiry(void __user *arg)
1240 {
1241         __u8 __user *ptr = arg;
1242         struct hci_inquiry_req ir;
1243         struct hci_dev *hdev;
1244         int err = 0, do_inquiry = 0, max_rsp;
1245         long timeo;
1246         __u8 *buf;
1247
1248         if (copy_from_user(&ir, ptr, sizeof(ir)))
1249                 return -EFAULT;
1250
1251         hdev = hci_dev_get(ir.dev_id);
1252         if (!hdev)
1253                 return -ENODEV;
1254
1255         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1256                 err = -EBUSY;
1257                 goto done;
1258         }
1259
1260         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1261                 err = -EOPNOTSUPP;
1262                 goto done;
1263         }
1264
1265         if (hdev->dev_type != HCI_BREDR) {
1266                 err = -EOPNOTSUPP;
1267                 goto done;
1268         }
1269
1270         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1271                 err = -EOPNOTSUPP;
1272                 goto done;
1273         }
1274
1275         hci_dev_lock(hdev);
1276         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1277             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1278                 hci_inquiry_cache_flush(hdev);
1279                 do_inquiry = 1;
1280         }
1281         hci_dev_unlock(hdev);
1282
1283         timeo = ir.length * msecs_to_jiffies(2000);
1284
1285         if (do_inquiry) {
1286                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1287                                    timeo);
1288                 if (err < 0)
1289                         goto done;
1290
1291                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1292                  * cleared). If it is interrupted by a signal, return -EINTR.
1293                  */
1294                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1295                                 TASK_INTERRUPTIBLE))
1296                         return -EINTR;
1297         }
1298
1299         /* for unlimited number of responses we will use buffer with
1300          * 255 entries
1301          */
1302         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1303
1304         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1305          * copy it to the user space.
1306          */
1307         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1308         if (!buf) {
1309                 err = -ENOMEM;
1310                 goto done;
1311         }
1312
1313         hci_dev_lock(hdev);
1314         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1315         hci_dev_unlock(hdev);
1316
1317         BT_DBG("num_rsp %d", ir.num_rsp);
1318
1319         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1320                 ptr += sizeof(ir);
1321                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1322                                  ir.num_rsp))
1323                         err = -EFAULT;
1324         } else
1325                 err = -EFAULT;
1326
1327         kfree(buf);
1328
1329 done:
1330         hci_dev_put(hdev);
1331         return err;
1332 }
1333
1334 static int hci_dev_do_open(struct hci_dev *hdev)
1335 {
1336         int ret = 0;
1337
1338         BT_DBG("%s %p", hdev->name, hdev);
1339
1340         hci_req_lock(hdev);
1341
1342         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1343                 ret = -ENODEV;
1344                 goto done;
1345         }
1346
1347         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1348             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1349                 /* Check for rfkill but allow the HCI setup stage to
1350                  * proceed (which in itself doesn't cause any RF activity).
1351                  */
1352                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1353                         ret = -ERFKILL;
1354                         goto done;
1355                 }
1356
1357                 /* Check for valid public address or a configured static
1358                  * random adddress, but let the HCI setup proceed to
1359                  * be able to determine if there is a public address
1360                  * or not.
1361                  *
1362                  * In case of user channel usage, it is not important
1363                  * if a public address or static random address is
1364                  * available.
1365                  *
1366                  * This check is only valid for BR/EDR controllers
1367                  * since AMP controllers do not have an address.
1368                  */
1369                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1370                     hdev->dev_type == HCI_BREDR &&
1371                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1372                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1373                         ret = -EADDRNOTAVAIL;
1374                         goto done;
1375                 }
1376         }
1377
1378         if (test_bit(HCI_UP, &hdev->flags)) {
1379                 ret = -EALREADY;
1380                 goto done;
1381         }
1382
1383         if (hdev->open(hdev)) {
1384                 ret = -EIO;
1385                 goto done;
1386         }
1387
1388         set_bit(HCI_RUNNING, &hdev->flags);
1389         hci_notify(hdev, HCI_DEV_OPEN);
1390
1391         atomic_set(&hdev->cmd_cnt, 1);
1392         set_bit(HCI_INIT, &hdev->flags);
1393
1394         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1395                 if (hdev->setup)
1396                         ret = hdev->setup(hdev);
1397
1398                 /* The transport driver can set these quirks before
1399                  * creating the HCI device or in its setup callback.
1400                  *
1401                  * In case any of them is set, the controller has to
1402                  * start up as unconfigured.
1403                  */
1404                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1405                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1406                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1407
1408                 /* For an unconfigured controller it is required to
1409                  * read at least the version information provided by
1410                  * the Read Local Version Information command.
1411                  *
1412                  * If the set_bdaddr driver callback is provided, then
1413                  * also the original Bluetooth public device address
1414                  * will be read using the Read BD Address command.
1415                  */
1416                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1417                         ret = __hci_unconf_init(hdev);
1418         }
1419
1420         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1421                 /* If public address change is configured, ensure that
1422                  * the address gets programmed. If the driver does not
1423                  * support changing the public address, fail the power
1424                  * on procedure.
1425                  */
1426                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1427                     hdev->set_bdaddr)
1428                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1429                 else
1430                         ret = -EADDRNOTAVAIL;
1431         }
1432
1433         if (!ret) {
1434                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1435                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1436                         ret = __hci_init(hdev);
1437         }
1438
1439         clear_bit(HCI_INIT, &hdev->flags);
1440
1441         if (!ret) {
1442                 hci_dev_hold(hdev);
1443                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1444                 set_bit(HCI_UP, &hdev->flags);
1445                 hci_notify(hdev, HCI_DEV_UP);
1446                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1447                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1448                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1449                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1450                     hdev->dev_type == HCI_BREDR) {
1451                         hci_dev_lock(hdev);
1452                         mgmt_powered(hdev, 1);
1453                         hci_dev_unlock(hdev);
1454                 }
1455         } else {
1456                 /* Init failed, cleanup */
1457                 flush_work(&hdev->tx_work);
1458                 flush_work(&hdev->cmd_work);
1459                 flush_work(&hdev->rx_work);
1460
1461                 skb_queue_purge(&hdev->cmd_q);
1462                 skb_queue_purge(&hdev->rx_q);
1463
1464                 if (hdev->flush)
1465                         hdev->flush(hdev);
1466
1467                 if (hdev->sent_cmd) {
1468                         kfree_skb(hdev->sent_cmd);
1469                         hdev->sent_cmd = NULL;
1470                 }
1471
1472                 clear_bit(HCI_RUNNING, &hdev->flags);
1473                 hci_notify(hdev, HCI_DEV_CLOSE);
1474
1475                 hdev->close(hdev);
1476                 hdev->flags &= BIT(HCI_RAW);
1477         }
1478
1479 done:
1480         hci_req_unlock(hdev);
1481         return ret;
1482 }
1483
1484 /* ---- HCI ioctl helpers ---- */
1485
1486 int hci_dev_open(__u16 dev)
1487 {
1488         struct hci_dev *hdev;
1489         int err;
1490
1491         hdev = hci_dev_get(dev);
1492         if (!hdev)
1493                 return -ENODEV;
1494
1495         /* Devices that are marked as unconfigured can only be powered
1496          * up as user channel. Trying to bring them up as normal devices
1497          * will result into a failure. Only user channel operation is
1498          * possible.
1499          *
1500          * When this function is called for a user channel, the flag
1501          * HCI_USER_CHANNEL will be set first before attempting to
1502          * open the device.
1503          */
1504         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1505             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1506                 err = -EOPNOTSUPP;
1507                 goto done;
1508         }
1509
1510         /* We need to ensure that no other power on/off work is pending
1511          * before proceeding to call hci_dev_do_open. This is
1512          * particularly important if the setup procedure has not yet
1513          * completed.
1514          */
1515         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1516                 cancel_delayed_work(&hdev->power_off);
1517
1518         /* After this call it is guaranteed that the setup procedure
1519          * has finished. This means that error conditions like RFKILL
1520          * or no valid public or static random address apply.
1521          */
1522         flush_workqueue(hdev->req_workqueue);
1523
1524         /* For controllers not using the management interface and that
1525          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1526          * so that pairing works for them. Once the management interface
1527          * is in use this bit will be cleared again and userspace has
1528          * to explicitly enable it.
1529          */
1530         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1531             !hci_dev_test_flag(hdev, HCI_MGMT))
1532                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1533
1534         err = hci_dev_do_open(hdev);
1535
1536 done:
1537         hci_dev_put(hdev);
1538         return err;
1539 }
1540
1541 /* This function requires the caller holds hdev->lock */
1542 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1543 {
1544         struct hci_conn_params *p;
1545
1546         list_for_each_entry(p, &hdev->le_conn_params, list) {
1547                 if (p->conn) {
1548                         hci_conn_drop(p->conn);
1549                         hci_conn_put(p->conn);
1550                         p->conn = NULL;
1551                 }
1552                 list_del_init(&p->action);
1553         }
1554
1555         BT_DBG("All LE pending actions cleared");
1556 }
1557
1558 int hci_dev_do_close(struct hci_dev *hdev)
1559 {
1560         BT_DBG("%s %p", hdev->name, hdev);
1561
1562         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1563             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1564             test_bit(HCI_UP, &hdev->flags)) {
1565                 /* Execute vendor specific shutdown routine */
1566                 if (hdev->shutdown)
1567                         hdev->shutdown(hdev);
1568         }
1569
1570         cancel_delayed_work(&hdev->power_off);
1571
1572         hci_req_cancel(hdev, ENODEV);
1573         hci_req_lock(hdev);
1574
1575         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1576                 cancel_delayed_work_sync(&hdev->cmd_timer);
1577                 hci_req_unlock(hdev);
1578                 return 0;
1579         }
1580
1581         /* Flush RX and TX works */
1582         flush_work(&hdev->tx_work);
1583         flush_work(&hdev->rx_work);
1584
1585         if (hdev->discov_timeout > 0) {
1586                 cancel_delayed_work(&hdev->discov_off);
1587                 hdev->discov_timeout = 0;
1588                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1589                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1590         }
1591
1592         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1593                 cancel_delayed_work(&hdev->service_cache);
1594
1595         cancel_delayed_work_sync(&hdev->le_scan_disable);
1596         cancel_delayed_work_sync(&hdev->le_scan_restart);
1597
1598         if (hci_dev_test_flag(hdev, HCI_MGMT))
1599                 cancel_delayed_work_sync(&hdev->rpa_expired);
1600
1601         if (hdev->adv_instance_timeout) {
1602                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1603                 hdev->adv_instance_timeout = 0;
1604         }
1605
1606         /* Avoid potential lockdep warnings from the *_flush() calls by
1607          * ensuring the workqueue is empty up front.
1608          */
1609         drain_workqueue(hdev->workqueue);
1610
1611         hci_dev_lock(hdev);
1612
1613         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1614
1615         if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1616                 if (hdev->dev_type == HCI_BREDR)
1617                         mgmt_powered(hdev, 0);
1618         }
1619
1620         hci_inquiry_cache_flush(hdev);
1621         hci_pend_le_actions_clear(hdev);
1622         hci_conn_hash_flush(hdev);
1623         hci_dev_unlock(hdev);
1624
1625         smp_unregister(hdev);
1626
1627         hci_notify(hdev, HCI_DEV_DOWN);
1628
1629         if (hdev->flush)
1630                 hdev->flush(hdev);
1631
1632         /* Reset device */
1633         skb_queue_purge(&hdev->cmd_q);
1634         atomic_set(&hdev->cmd_cnt, 1);
1635         if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1636             !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1637             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1638                 set_bit(HCI_INIT, &hdev->flags);
1639                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1640                 clear_bit(HCI_INIT, &hdev->flags);
1641         }
1642
1643         /* flush cmd  work */
1644         flush_work(&hdev->cmd_work);
1645
1646         /* Drop queues */
1647         skb_queue_purge(&hdev->rx_q);
1648         skb_queue_purge(&hdev->cmd_q);
1649         skb_queue_purge(&hdev->raw_q);
1650
1651         /* Drop last sent command */
1652         if (hdev->sent_cmd) {
1653                 cancel_delayed_work_sync(&hdev->cmd_timer);
1654                 kfree_skb(hdev->sent_cmd);
1655                 hdev->sent_cmd = NULL;
1656         }
1657
1658         clear_bit(HCI_RUNNING, &hdev->flags);
1659         hci_notify(hdev, HCI_DEV_CLOSE);
1660
1661         /* After this point our queues are empty
1662          * and no tasks are scheduled. */
1663         hdev->close(hdev);
1664
1665         /* Clear flags */
1666         hdev->flags &= BIT(HCI_RAW);
1667         hci_dev_clear_volatile_flags(hdev);
1668
1669         /* Controller radio is available but is currently powered down */
1670         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1671
1672         memset(hdev->eir, 0, sizeof(hdev->eir));
1673         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1674         bacpy(&hdev->random_addr, BDADDR_ANY);
1675
1676         hci_req_unlock(hdev);
1677
1678         hci_dev_put(hdev);
1679         return 0;
1680 }
1681
1682 int hci_dev_close(__u16 dev)
1683 {
1684         struct hci_dev *hdev;
1685         int err;
1686
1687         hdev = hci_dev_get(dev);
1688         if (!hdev)
1689                 return -ENODEV;
1690
1691         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1692                 err = -EBUSY;
1693                 goto done;
1694         }
1695
1696         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1697                 cancel_delayed_work(&hdev->power_off);
1698
1699         err = hci_dev_do_close(hdev);
1700
1701 done:
1702         hci_dev_put(hdev);
1703         return err;
1704 }
1705
1706 static int hci_dev_do_reset(struct hci_dev *hdev)
1707 {
1708         int ret;
1709
1710         BT_DBG("%s %p", hdev->name, hdev);
1711
1712         hci_req_lock(hdev);
1713
1714         /* Drop queues */
1715         skb_queue_purge(&hdev->rx_q);
1716         skb_queue_purge(&hdev->cmd_q);
1717
1718         /* Avoid potential lockdep warnings from the *_flush() calls by
1719          * ensuring the workqueue is empty up front.
1720          */
1721         drain_workqueue(hdev->workqueue);
1722
1723         hci_dev_lock(hdev);
1724         hci_inquiry_cache_flush(hdev);
1725         hci_conn_hash_flush(hdev);
1726         hci_dev_unlock(hdev);
1727
1728         if (hdev->flush)
1729                 hdev->flush(hdev);
1730
1731         atomic_set(&hdev->cmd_cnt, 1);
1732         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1733
1734         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1735
1736         hci_req_unlock(hdev);
1737         return ret;
1738 }
1739
1740 int hci_dev_reset(__u16 dev)
1741 {
1742         struct hci_dev *hdev;
1743         int err;
1744
1745         hdev = hci_dev_get(dev);
1746         if (!hdev)
1747                 return -ENODEV;
1748
1749         if (!test_bit(HCI_UP, &hdev->flags)) {
1750                 err = -ENETDOWN;
1751                 goto done;
1752         }
1753
1754         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1755                 err = -EBUSY;
1756                 goto done;
1757         }
1758
1759         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1760                 err = -EOPNOTSUPP;
1761                 goto done;
1762         }
1763
1764         err = hci_dev_do_reset(hdev);
1765
1766 done:
1767         hci_dev_put(hdev);
1768         return err;
1769 }
1770
1771 int hci_dev_reset_stat(__u16 dev)
1772 {
1773         struct hci_dev *hdev;
1774         int ret = 0;
1775
1776         hdev = hci_dev_get(dev);
1777         if (!hdev)
1778                 return -ENODEV;
1779
1780         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1781                 ret = -EBUSY;
1782                 goto done;
1783         }
1784
1785         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1786                 ret = -EOPNOTSUPP;
1787                 goto done;
1788         }
1789
1790         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1791
1792 done:
1793         hci_dev_put(hdev);
1794         return ret;
1795 }
1796
1797 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1798 {
1799         bool conn_changed, discov_changed;
1800
1801         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1802
1803         if ((scan & SCAN_PAGE))
1804                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1805                                                           HCI_CONNECTABLE);
1806         else
1807                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1808                                                            HCI_CONNECTABLE);
1809
1810         if ((scan & SCAN_INQUIRY)) {
1811                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1812                                                             HCI_DISCOVERABLE);
1813         } else {
1814                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1815                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1816                                                              HCI_DISCOVERABLE);
1817         }
1818
1819         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1820                 return;
1821
1822         if (conn_changed || discov_changed) {
1823                 /* In case this was disabled through mgmt */
1824                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1825
1826                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1827                         mgmt_update_adv_data(hdev);
1828
1829                 mgmt_new_settings(hdev);
1830         }
1831 }
1832
1833 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1834 {
1835         struct hci_dev *hdev;
1836         struct hci_dev_req dr;
1837         int err = 0;
1838
1839         if (copy_from_user(&dr, arg, sizeof(dr)))
1840                 return -EFAULT;
1841
1842         hdev = hci_dev_get(dr.dev_id);
1843         if (!hdev)
1844                 return -ENODEV;
1845
1846         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1847                 err = -EBUSY;
1848                 goto done;
1849         }
1850
1851         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1852                 err = -EOPNOTSUPP;
1853                 goto done;
1854         }
1855
1856         if (hdev->dev_type != HCI_BREDR) {
1857                 err = -EOPNOTSUPP;
1858                 goto done;
1859         }
1860
1861         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1862                 err = -EOPNOTSUPP;
1863                 goto done;
1864         }
1865
1866         switch (cmd) {
1867         case HCISETAUTH:
1868                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1869                                    HCI_INIT_TIMEOUT);
1870                 break;
1871
1872         case HCISETENCRYPT:
1873                 if (!lmp_encrypt_capable(hdev)) {
1874                         err = -EOPNOTSUPP;
1875                         break;
1876                 }
1877
1878                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1879                         /* Auth must be enabled first */
1880                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1881                                            HCI_INIT_TIMEOUT);
1882                         if (err)
1883                                 break;
1884                 }
1885
1886                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1887                                    HCI_INIT_TIMEOUT);
1888                 break;
1889
1890         case HCISETSCAN:
1891                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1892                                    HCI_INIT_TIMEOUT);
1893
1894                 /* Ensure that the connectable and discoverable states
1895                  * get correctly modified as this was a non-mgmt change.
1896                  */
1897                 if (!err)
1898                         hci_update_scan_state(hdev, dr.dev_opt);
1899                 break;
1900
1901         case HCISETLINKPOL:
1902                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1903                                    HCI_INIT_TIMEOUT);
1904                 break;
1905
1906         case HCISETLINKMODE:
1907                 hdev->link_mode = ((__u16) dr.dev_opt) &
1908                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1909                 break;
1910
1911         case HCISETPTYPE:
1912                 hdev->pkt_type = (__u16) dr.dev_opt;
1913                 break;
1914
1915         case HCISETACLMTU:
1916                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1917                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1918                 break;
1919
1920         case HCISETSCOMTU:
1921                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1922                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1923                 break;
1924
1925         default:
1926                 err = -EINVAL;
1927                 break;
1928         }
1929
1930 done:
1931         hci_dev_put(hdev);
1932         return err;
1933 }
1934
1935 int hci_get_dev_list(void __user *arg)
1936 {
1937         struct hci_dev *hdev;
1938         struct hci_dev_list_req *dl;
1939         struct hci_dev_req *dr;
1940         int n = 0, size, err;
1941         __u16 dev_num;
1942
1943         if (get_user(dev_num, (__u16 __user *) arg))
1944                 return -EFAULT;
1945
1946         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1947                 return -EINVAL;
1948
1949         size = sizeof(*dl) + dev_num * sizeof(*dr);
1950
1951         dl = kzalloc(size, GFP_KERNEL);
1952         if (!dl)
1953                 return -ENOMEM;
1954
1955         dr = dl->dev_req;
1956
1957         read_lock(&hci_dev_list_lock);
1958         list_for_each_entry(hdev, &hci_dev_list, list) {
1959                 unsigned long flags = hdev->flags;
1960
1961                 /* When the auto-off is configured it means the transport
1962                  * is running, but in that case still indicate that the
1963                  * device is actually down.
1964                  */
1965                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1966                         flags &= ~BIT(HCI_UP);
1967
1968                 (dr + n)->dev_id  = hdev->id;
1969                 (dr + n)->dev_opt = flags;
1970
1971                 if (++n >= dev_num)
1972                         break;
1973         }
1974         read_unlock(&hci_dev_list_lock);
1975
1976         dl->dev_num = n;
1977         size = sizeof(*dl) + n * sizeof(*dr);
1978
1979         err = copy_to_user(arg, dl, size);
1980         kfree(dl);
1981
1982         return err ? -EFAULT : 0;
1983 }
1984
1985 int hci_get_dev_info(void __user *arg)
1986 {
1987         struct hci_dev *hdev;
1988         struct hci_dev_info di;
1989         unsigned long flags;
1990         int err = 0;
1991
1992         if (copy_from_user(&di, arg, sizeof(di)))
1993                 return -EFAULT;
1994
1995         hdev = hci_dev_get(di.dev_id);
1996         if (!hdev)
1997                 return -ENODEV;
1998
1999         /* When the auto-off is configured it means the transport
2000          * is running, but in that case still indicate that the
2001          * device is actually down.
2002          */
2003         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2004                 flags = hdev->flags & ~BIT(HCI_UP);
2005         else
2006                 flags = hdev->flags;
2007
2008         strcpy(di.name, hdev->name);
2009         di.bdaddr   = hdev->bdaddr;
2010         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2011         di.flags    = flags;
2012         di.pkt_type = hdev->pkt_type;
2013         if (lmp_bredr_capable(hdev)) {
2014                 di.acl_mtu  = hdev->acl_mtu;
2015                 di.acl_pkts = hdev->acl_pkts;
2016                 di.sco_mtu  = hdev->sco_mtu;
2017                 di.sco_pkts = hdev->sco_pkts;
2018         } else {
2019                 di.acl_mtu  = hdev->le_mtu;
2020                 di.acl_pkts = hdev->le_pkts;
2021                 di.sco_mtu  = 0;
2022                 di.sco_pkts = 0;
2023         }
2024         di.link_policy = hdev->link_policy;
2025         di.link_mode   = hdev->link_mode;
2026
2027         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2028         memcpy(&di.features, &hdev->features, sizeof(di.features));
2029
2030         if (copy_to_user(arg, &di, sizeof(di)))
2031                 err = -EFAULT;
2032
2033         hci_dev_put(hdev);
2034
2035         return err;
2036 }
2037
2038 /* ---- Interface to HCI drivers ---- */
2039
2040 static int hci_rfkill_set_block(void *data, bool blocked)
2041 {
2042         struct hci_dev *hdev = data;
2043
2044         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2045
2046         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2047                 return -EBUSY;
2048
2049         if (blocked) {
2050                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2051                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2052                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2053                         hci_dev_do_close(hdev);
2054         } else {
2055                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2056         }
2057
2058         return 0;
2059 }
2060
2061 static const struct rfkill_ops hci_rfkill_ops = {
2062         .set_block = hci_rfkill_set_block,
2063 };
2064
2065 static void hci_power_on(struct work_struct *work)
2066 {
2067         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2068         int err;
2069
2070         BT_DBG("%s", hdev->name);
2071
2072         err = hci_dev_do_open(hdev);
2073         if (err < 0) {
2074                 hci_dev_lock(hdev);
2075                 mgmt_set_powered_failed(hdev, err);
2076                 hci_dev_unlock(hdev);
2077                 return;
2078         }
2079
2080         /* During the HCI setup phase, a few error conditions are
2081          * ignored and they need to be checked now. If they are still
2082          * valid, it is important to turn the device back off.
2083          */
2084         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2085             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2086             (hdev->dev_type == HCI_BREDR &&
2087              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2088              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2089                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2090                 hci_dev_do_close(hdev);
2091         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2092                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2093                                    HCI_AUTO_OFF_TIMEOUT);
2094         }
2095
2096         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2097                 /* For unconfigured devices, set the HCI_RAW flag
2098                  * so that userspace can easily identify them.
2099                  */
2100                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2101                         set_bit(HCI_RAW, &hdev->flags);
2102
2103                 /* For fully configured devices, this will send
2104                  * the Index Added event. For unconfigured devices,
2105                  * it will send Unconfigued Index Added event.
2106                  *
2107                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2108                  * and no event will be send.
2109                  */
2110                 mgmt_index_added(hdev);
2111         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2112                 /* When the controller is now configured, then it
2113                  * is important to clear the HCI_RAW flag.
2114                  */
2115                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2116                         clear_bit(HCI_RAW, &hdev->flags);
2117
2118                 /* Powering on the controller with HCI_CONFIG set only
2119                  * happens with the transition from unconfigured to
2120                  * configured. This will send the Index Added event.
2121                  */
2122                 mgmt_index_added(hdev);
2123         }
2124 }
2125
2126 static void hci_power_off(struct work_struct *work)
2127 {
2128         struct hci_dev *hdev = container_of(work, struct hci_dev,
2129                                             power_off.work);
2130
2131         BT_DBG("%s", hdev->name);
2132
2133         hci_dev_do_close(hdev);
2134 }
2135
2136 static void hci_error_reset(struct work_struct *work)
2137 {
2138         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2139
2140         BT_DBG("%s", hdev->name);
2141
2142         if (hdev->hw_error)
2143                 hdev->hw_error(hdev, hdev->hw_error_code);
2144         else
2145                 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2146                        hdev->hw_error_code);
2147
2148         if (hci_dev_do_close(hdev))
2149                 return;
2150
2151         hci_dev_do_open(hdev);
2152 }
2153
2154 static void hci_discov_off(struct work_struct *work)
2155 {
2156         struct hci_dev *hdev;
2157
2158         hdev = container_of(work, struct hci_dev, discov_off.work);
2159
2160         BT_DBG("%s", hdev->name);
2161
2162         mgmt_discoverable_timeout(hdev);
2163 }
2164
2165 static void hci_adv_timeout_expire(struct work_struct *work)
2166 {
2167         struct hci_dev *hdev;
2168
2169         hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2170
2171         BT_DBG("%s", hdev->name);
2172
2173         mgmt_adv_timeout_expired(hdev);
2174 }
2175
2176 void hci_uuids_clear(struct hci_dev *hdev)
2177 {
2178         struct bt_uuid *uuid, *tmp;
2179
2180         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2181                 list_del(&uuid->list);
2182                 kfree(uuid);
2183         }
2184 }
2185
2186 void hci_link_keys_clear(struct hci_dev *hdev)
2187 {
2188         struct link_key *key;
2189
2190         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2191                 list_del_rcu(&key->list);
2192                 kfree_rcu(key, rcu);
2193         }
2194 }
2195
2196 void hci_smp_ltks_clear(struct hci_dev *hdev)
2197 {
2198         struct smp_ltk *k;
2199
2200         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2201                 list_del_rcu(&k->list);
2202                 kfree_rcu(k, rcu);
2203         }
2204 }
2205
2206 void hci_smp_irks_clear(struct hci_dev *hdev)
2207 {
2208         struct smp_irk *k;
2209
2210         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2211                 list_del_rcu(&k->list);
2212                 kfree_rcu(k, rcu);
2213         }
2214 }
2215
2216 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2217 {
2218         struct link_key *k;
2219
2220         rcu_read_lock();
2221         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2222                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2223                         rcu_read_unlock();
2224                         return k;
2225                 }
2226         }
2227         rcu_read_unlock();
2228
2229         return NULL;
2230 }
2231
2232 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2233                                u8 key_type, u8 old_key_type)
2234 {
2235         /* Legacy key */
2236         if (key_type < 0x03)
2237                 return true;
2238
2239         /* Debug keys are insecure so don't store them persistently */
2240         if (key_type == HCI_LK_DEBUG_COMBINATION)
2241                 return false;
2242
2243         /* Changed combination key and there's no previous one */
2244         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2245                 return false;
2246
2247         /* Security mode 3 case */
2248         if (!conn)
2249                 return true;
2250
2251         /* BR/EDR key derived using SC from an LE link */
2252         if (conn->type == LE_LINK)
2253                 return true;
2254
2255         /* Neither local nor remote side had no-bonding as requirement */
2256         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2257                 return true;
2258
2259         /* Local side had dedicated bonding as requirement */
2260         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2261                 return true;
2262
2263         /* Remote side had dedicated bonding as requirement */
2264         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2265                 return true;
2266
2267         /* If none of the above criteria match, then don't store the key
2268          * persistently */
2269         return false;
2270 }
2271
2272 static u8 ltk_role(u8 type)
2273 {
2274         if (type == SMP_LTK)
2275                 return HCI_ROLE_MASTER;
2276
2277         return HCI_ROLE_SLAVE;
2278 }
2279
2280 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2281                              u8 addr_type, u8 role)
2282 {
2283         struct smp_ltk *k;
2284
2285         rcu_read_lock();
2286         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2287                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2288                         continue;
2289
2290                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2291                         rcu_read_unlock();
2292                         return k;
2293                 }
2294         }
2295         rcu_read_unlock();
2296
2297         return NULL;
2298 }
2299
2300 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2301 {
2302         struct smp_irk *irk;
2303
2304         rcu_read_lock();
2305         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2306                 if (!bacmp(&irk->rpa, rpa)) {
2307                         rcu_read_unlock();
2308                         return irk;
2309                 }
2310         }
2311
2312         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2313                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2314                         bacpy(&irk->rpa, rpa);
2315                         rcu_read_unlock();
2316                         return irk;
2317                 }
2318         }
2319         rcu_read_unlock();
2320
2321         return NULL;
2322 }
2323
2324 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2325                                      u8 addr_type)
2326 {
2327         struct smp_irk *irk;
2328
2329         /* Identity Address must be public or static random */
2330         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2331                 return NULL;
2332
2333         rcu_read_lock();
2334         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2335                 if (addr_type == irk->addr_type &&
2336                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2337                         rcu_read_unlock();
2338                         return irk;
2339                 }
2340         }
2341         rcu_read_unlock();
2342
2343         return NULL;
2344 }
2345
2346 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2347                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2348                                   u8 pin_len, bool *persistent)
2349 {
2350         struct link_key *key, *old_key;
2351         u8 old_key_type;
2352
2353         old_key = hci_find_link_key(hdev, bdaddr);
2354         if (old_key) {
2355                 old_key_type = old_key->type;
2356                 key = old_key;
2357         } else {
2358                 old_key_type = conn ? conn->key_type : 0xff;
2359                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2360                 if (!key)
2361                         return NULL;
2362                 list_add_rcu(&key->list, &hdev->link_keys);
2363         }
2364
2365         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2366
2367         /* Some buggy controller combinations generate a changed
2368          * combination key for legacy pairing even when there's no
2369          * previous key */
2370         if (type == HCI_LK_CHANGED_COMBINATION &&
2371             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2372                 type = HCI_LK_COMBINATION;
2373                 if (conn)
2374                         conn->key_type = type;
2375         }
2376
2377         bacpy(&key->bdaddr, bdaddr);
2378         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2379         key->pin_len = pin_len;
2380
2381         if (type == HCI_LK_CHANGED_COMBINATION)
2382                 key->type = old_key_type;
2383         else
2384                 key->type = type;
2385
2386         if (persistent)
2387                 *persistent = hci_persistent_key(hdev, conn, type,
2388                                                  old_key_type);
2389
2390         return key;
2391 }
2392
2393 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2394                             u8 addr_type, u8 type, u8 authenticated,
2395                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2396 {
2397         struct smp_ltk *key, *old_key;
2398         u8 role = ltk_role(type);
2399
2400         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2401         if (old_key)
2402                 key = old_key;
2403         else {
2404                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2405                 if (!key)
2406                         return NULL;
2407                 list_add_rcu(&key->list, &hdev->long_term_keys);
2408         }
2409
2410         bacpy(&key->bdaddr, bdaddr);
2411         key->bdaddr_type = addr_type;
2412         memcpy(key->val, tk, sizeof(key->val));
2413         key->authenticated = authenticated;
2414         key->ediv = ediv;
2415         key->rand = rand;
2416         key->enc_size = enc_size;
2417         key->type = type;
2418
2419         return key;
2420 }
2421
2422 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2423                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2424 {
2425         struct smp_irk *irk;
2426
2427         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2428         if (!irk) {
2429                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2430                 if (!irk)
2431                         return NULL;
2432
2433                 bacpy(&irk->bdaddr, bdaddr);
2434                 irk->addr_type = addr_type;
2435
2436                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2437         }
2438
2439         memcpy(irk->val, val, 16);
2440         bacpy(&irk->rpa, rpa);
2441
2442         return irk;
2443 }
2444
2445 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2446 {
2447         struct link_key *key;
2448
2449         key = hci_find_link_key(hdev, bdaddr);
2450         if (!key)
2451                 return -ENOENT;
2452
2453         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2454
2455         list_del_rcu(&key->list);
2456         kfree_rcu(key, rcu);
2457
2458         return 0;
2459 }
2460
2461 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2462 {
2463         struct smp_ltk *k;
2464         int removed = 0;
2465
2466         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2467                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2468                         continue;
2469
2470                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2471
2472                 list_del_rcu(&k->list);
2473                 kfree_rcu(k, rcu);
2474                 removed++;
2475         }
2476
2477         return removed ? 0 : -ENOENT;
2478 }
2479
2480 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2481 {
2482         struct smp_irk *k;
2483
2484         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2485                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2486                         continue;
2487
2488                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2489
2490                 list_del_rcu(&k->list);
2491                 kfree_rcu(k, rcu);
2492         }
2493 }
2494
2495 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2496 {
2497         struct smp_ltk *k;
2498         struct smp_irk *irk;
2499         u8 addr_type;
2500
2501         if (type == BDADDR_BREDR) {
2502                 if (hci_find_link_key(hdev, bdaddr))
2503                         return true;
2504                 return false;
2505         }
2506
2507         /* Convert to HCI addr type which struct smp_ltk uses */
2508         if (type == BDADDR_LE_PUBLIC)
2509                 addr_type = ADDR_LE_DEV_PUBLIC;
2510         else
2511                 addr_type = ADDR_LE_DEV_RANDOM;
2512
2513         irk = hci_get_irk(hdev, bdaddr, addr_type);
2514         if (irk) {
2515                 bdaddr = &irk->bdaddr;
2516                 addr_type = irk->addr_type;
2517         }
2518
2519         rcu_read_lock();
2520         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2521                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2522                         rcu_read_unlock();
2523                         return true;
2524                 }
2525         }
2526         rcu_read_unlock();
2527
2528         return false;
2529 }
2530
2531 /* HCI command timer function */
2532 static void hci_cmd_timeout(struct work_struct *work)
2533 {
2534         struct hci_dev *hdev = container_of(work, struct hci_dev,
2535                                             cmd_timer.work);
2536
2537         if (hdev->sent_cmd) {
2538                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2539                 u16 opcode = __le16_to_cpu(sent->opcode);
2540
2541                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2542         } else {
2543                 BT_ERR("%s command tx timeout", hdev->name);
2544         }
2545
2546         atomic_set(&hdev->cmd_cnt, 1);
2547         queue_work(hdev->workqueue, &hdev->cmd_work);
2548 }
2549
2550 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2551                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2552 {
2553         struct oob_data *data;
2554
2555         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2556                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2557                         continue;
2558                 if (data->bdaddr_type != bdaddr_type)
2559                         continue;
2560                 return data;
2561         }
2562
2563         return NULL;
2564 }
2565
2566 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2567                                u8 bdaddr_type)
2568 {
2569         struct oob_data *data;
2570
2571         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2572         if (!data)
2573                 return -ENOENT;
2574
2575         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2576
2577         list_del(&data->list);
2578         kfree(data);
2579
2580         return 0;
2581 }
2582
2583 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2584 {
2585         struct oob_data *data, *n;
2586
2587         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2588                 list_del(&data->list);
2589                 kfree(data);
2590         }
2591 }
2592
2593 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2594                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2595                             u8 *hash256, u8 *rand256)
2596 {
2597         struct oob_data *data;
2598
2599         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2600         if (!data) {
2601                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2602                 if (!data)
2603                         return -ENOMEM;
2604
2605                 bacpy(&data->bdaddr, bdaddr);
2606                 data->bdaddr_type = bdaddr_type;
2607                 list_add(&data->list, &hdev->remote_oob_data);
2608         }
2609
2610         if (hash192 && rand192) {
2611                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2612                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2613                 if (hash256 && rand256)
2614                         data->present = 0x03;
2615         } else {
2616                 memset(data->hash192, 0, sizeof(data->hash192));
2617                 memset(data->rand192, 0, sizeof(data->rand192));
2618                 if (hash256 && rand256)
2619                         data->present = 0x02;
2620                 else
2621                         data->present = 0x00;
2622         }
2623
2624         if (hash256 && rand256) {
2625                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2626                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2627         } else {
2628                 memset(data->hash256, 0, sizeof(data->hash256));
2629                 memset(data->rand256, 0, sizeof(data->rand256));
2630                 if (hash192 && rand192)
2631                         data->present = 0x01;
2632         }
2633
2634         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2635
2636         return 0;
2637 }
2638
2639 /* This function requires the caller holds hdev->lock */
2640 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2641 {
2642         struct adv_info *adv_instance;
2643
2644         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2645                 if (adv_instance->instance == instance)
2646                         return adv_instance;
2647         }
2648
2649         return NULL;
2650 }
2651
2652 /* This function requires the caller holds hdev->lock */
2653 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2654         struct adv_info *cur_instance;
2655
2656         cur_instance = hci_find_adv_instance(hdev, instance);
2657         if (!cur_instance)
2658                 return NULL;
2659
2660         if (cur_instance == list_last_entry(&hdev->adv_instances,
2661                                             struct adv_info, list))
2662                 return list_first_entry(&hdev->adv_instances,
2663                                                  struct adv_info, list);
2664         else
2665                 return list_next_entry(cur_instance, list);
2666 }
2667
2668 /* This function requires the caller holds hdev->lock */
2669 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2670 {
2671         struct adv_info *adv_instance;
2672
2673         adv_instance = hci_find_adv_instance(hdev, instance);
2674         if (!adv_instance)
2675                 return -ENOENT;
2676
2677         BT_DBG("%s removing %dMR", hdev->name, instance);
2678
2679         if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2680                 cancel_delayed_work(&hdev->adv_instance_expire);
2681                 hdev->adv_instance_timeout = 0;
2682         }
2683
2684         list_del(&adv_instance->list);
2685         kfree(adv_instance);
2686
2687         hdev->adv_instance_cnt--;
2688
2689         return 0;
2690 }
2691
2692 /* This function requires the caller holds hdev->lock */
2693 void hci_adv_instances_clear(struct hci_dev *hdev)
2694 {
2695         struct adv_info *adv_instance, *n;
2696
2697         if (hdev->adv_instance_timeout) {
2698                 cancel_delayed_work(&hdev->adv_instance_expire);
2699                 hdev->adv_instance_timeout = 0;
2700         }
2701
2702         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2703                 list_del(&adv_instance->list);
2704                 kfree(adv_instance);
2705         }
2706
2707         hdev->adv_instance_cnt = 0;
2708 }
2709
2710 /* This function requires the caller holds hdev->lock */
2711 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2712                          u16 adv_data_len, u8 *adv_data,
2713                          u16 scan_rsp_len, u8 *scan_rsp_data,
2714                          u16 timeout, u16 duration)
2715 {
2716         struct adv_info *adv_instance;
2717
2718         adv_instance = hci_find_adv_instance(hdev, instance);
2719         if (adv_instance) {
2720                 memset(adv_instance->adv_data, 0,
2721                        sizeof(adv_instance->adv_data));
2722                 memset(adv_instance->scan_rsp_data, 0,
2723                        sizeof(adv_instance->scan_rsp_data));
2724         } else {
2725                 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2726                     instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2727                         return -EOVERFLOW;
2728
2729                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2730                 if (!adv_instance)
2731                         return -ENOMEM;
2732
2733                 adv_instance->pending = true;
2734                 adv_instance->instance = instance;
2735                 list_add(&adv_instance->list, &hdev->adv_instances);
2736                 hdev->adv_instance_cnt++;
2737         }
2738
2739         adv_instance->flags = flags;
2740         adv_instance->adv_data_len = adv_data_len;
2741         adv_instance->scan_rsp_len = scan_rsp_len;
2742
2743         if (adv_data_len)
2744                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2745
2746         if (scan_rsp_len)
2747                 memcpy(adv_instance->scan_rsp_data,
2748                        scan_rsp_data, scan_rsp_len);
2749
2750         adv_instance->timeout = timeout;
2751         adv_instance->remaining_time = timeout;
2752
2753         if (duration == 0)
2754                 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2755         else
2756                 adv_instance->duration = duration;
2757
2758         BT_DBG("%s for %dMR", hdev->name, instance);
2759
2760         return 0;
2761 }
2762
2763 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2764                                          bdaddr_t *bdaddr, u8 type)
2765 {
2766         struct bdaddr_list *b;
2767
2768         list_for_each_entry(b, bdaddr_list, list) {
2769                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2770                         return b;
2771         }
2772
2773         return NULL;
2774 }
2775
2776 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2777 {
2778         struct list_head *p, *n;
2779
2780         list_for_each_safe(p, n, bdaddr_list) {
2781                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2782
2783                 list_del(p);
2784                 kfree(b);
2785         }
2786 }
2787
2788 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2789 {
2790         struct bdaddr_list *entry;
2791
2792         if (!bacmp(bdaddr, BDADDR_ANY))
2793                 return -EBADF;
2794
2795         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2796                 return -EEXIST;
2797
2798         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2799         if (!entry)
2800                 return -ENOMEM;
2801
2802         bacpy(&entry->bdaddr, bdaddr);
2803         entry->bdaddr_type = type;
2804
2805         list_add(&entry->list, list);
2806
2807         return 0;
2808 }
2809
2810 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2811 {
2812         struct bdaddr_list *entry;
2813
2814         if (!bacmp(bdaddr, BDADDR_ANY)) {
2815                 hci_bdaddr_list_clear(list);
2816                 return 0;
2817         }
2818
2819         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2820         if (!entry)
2821                 return -ENOENT;
2822
2823         list_del(&entry->list);
2824         kfree(entry);
2825
2826         return 0;
2827 }
2828
2829 /* This function requires the caller holds hdev->lock */
2830 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2831                                                bdaddr_t *addr, u8 addr_type)
2832 {
2833         struct hci_conn_params *params;
2834
2835         list_for_each_entry(params, &hdev->le_conn_params, list) {
2836                 if (bacmp(&params->addr, addr) == 0 &&
2837                     params->addr_type == addr_type) {
2838                         return params;
2839                 }
2840         }
2841
2842         return NULL;
2843 }
2844
2845 /* This function requires the caller holds hdev->lock */
2846 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2847                                                   bdaddr_t *addr, u8 addr_type)
2848 {
2849         struct hci_conn_params *param;
2850
2851         list_for_each_entry(param, list, action) {
2852                 if (bacmp(&param->addr, addr) == 0 &&
2853                     param->addr_type == addr_type)
2854                         return param;
2855         }
2856
2857         return NULL;
2858 }
2859
2860 /* This function requires the caller holds hdev->lock */
2861 struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
2862                                                     bdaddr_t *addr,
2863                                                     u8 addr_type)
2864 {
2865         struct hci_conn_params *param;
2866
2867         list_for_each_entry(param, &hdev->pend_le_conns, action) {
2868                 if (bacmp(&param->addr, addr) == 0 &&
2869                     param->addr_type == addr_type &&
2870                     param->explicit_connect)
2871                         return param;
2872         }
2873
2874         list_for_each_entry(param, &hdev->pend_le_reports, action) {
2875                 if (bacmp(&param->addr, addr) == 0 &&
2876                     param->addr_type == addr_type &&
2877                     param->explicit_connect)
2878                         return param;
2879         }
2880
2881         return NULL;
2882 }
2883
2884 /* This function requires the caller holds hdev->lock */
2885 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2886                                             bdaddr_t *addr, u8 addr_type)
2887 {
2888         struct hci_conn_params *params;
2889
2890         params = hci_conn_params_lookup(hdev, addr, addr_type);
2891         if (params)
2892                 return params;
2893
2894         params = kzalloc(sizeof(*params), GFP_KERNEL);
2895         if (!params) {
2896                 BT_ERR("Out of memory");
2897                 return NULL;
2898         }
2899
2900         bacpy(&params->addr, addr);
2901         params->addr_type = addr_type;
2902
2903         list_add(&params->list, &hdev->le_conn_params);
2904         INIT_LIST_HEAD(&params->action);
2905
2906         params->conn_min_interval = hdev->le_conn_min_interval;
2907         params->conn_max_interval = hdev->le_conn_max_interval;
2908         params->conn_latency = hdev->le_conn_latency;
2909         params->supervision_timeout = hdev->le_supv_timeout;
2910         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2911
2912         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2913
2914         return params;
2915 }
2916
2917 static void hci_conn_params_free(struct hci_conn_params *params)
2918 {
2919         if (params->conn) {
2920                 hci_conn_drop(params->conn);
2921                 hci_conn_put(params->conn);
2922         }
2923
2924         list_del(&params->action);
2925         list_del(&params->list);
2926         kfree(params);
2927 }
2928
2929 /* This function requires the caller holds hdev->lock */
2930 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2931 {
2932         struct hci_conn_params *params;
2933
2934         params = hci_conn_params_lookup(hdev, addr, addr_type);
2935         if (!params)
2936                 return;
2937
2938         hci_conn_params_free(params);
2939
2940         hci_update_background_scan(hdev);
2941
2942         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2943 }
2944
2945 /* This function requires the caller holds hdev->lock */
2946 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2947 {
2948         struct hci_conn_params *params, *tmp;
2949
2950         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2951                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2952                         continue;
2953
2954                 /* If trying to estabilish one time connection to disabled
2955                  * device, leave the params, but mark them as just once.
2956                  */
2957                 if (params->explicit_connect) {
2958                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2959                         continue;
2960                 }
2961
2962                 list_del(&params->list);
2963                 kfree(params);
2964         }
2965
2966         BT_DBG("All LE disabled connection parameters were removed");
2967 }
2968
2969 /* This function requires the caller holds hdev->lock */
2970 void hci_conn_params_clear_all(struct hci_dev *hdev)
2971 {
2972         struct hci_conn_params *params, *tmp;
2973
2974         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2975                 hci_conn_params_free(params);
2976
2977         hci_update_background_scan(hdev);
2978
2979         BT_DBG("All LE connection parameters were removed");
2980 }
2981
2982 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2983 {
2984         if (status) {
2985                 BT_ERR("Failed to start inquiry: status %d", status);
2986
2987                 hci_dev_lock(hdev);
2988                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2989                 hci_dev_unlock(hdev);
2990                 return;
2991         }
2992 }
2993
2994 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2995                                           u16 opcode)
2996 {
2997         /* General inquiry access code (GIAC) */
2998         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2999         struct hci_cp_inquiry cp;
3000         int err;
3001
3002         if (status) {
3003                 BT_ERR("Failed to disable LE scanning: status %d", status);
3004                 return;
3005         }
3006
3007         hdev->discovery.scan_start = 0;
3008
3009         switch (hdev->discovery.type) {
3010         case DISCOV_TYPE_LE:
3011                 hci_dev_lock(hdev);
3012                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3013                 hci_dev_unlock(hdev);
3014                 break;
3015
3016         case DISCOV_TYPE_INTERLEAVED:
3017                 hci_dev_lock(hdev);
3018
3019                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3020                              &hdev->quirks)) {
3021                         /* If we were running LE only scan, change discovery
3022                          * state. If we were running both LE and BR/EDR inquiry
3023                          * simultaneously, and BR/EDR inquiry is already
3024                          * finished, stop discovery, otherwise BR/EDR inquiry
3025                          * will stop discovery when finished. If we will resolve
3026                          * remote device name, do not change discovery state.
3027                          */
3028                         if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3029                             hdev->discovery.state != DISCOVERY_RESOLVING)
3030                                 hci_discovery_set_state(hdev,
3031                                                         DISCOVERY_STOPPED);
3032                 } else {
3033                         struct hci_request req;
3034
3035                         hci_inquiry_cache_flush(hdev);
3036
3037                         hci_req_init(&req, hdev);
3038
3039                         memset(&cp, 0, sizeof(cp));
3040                         memcpy(&cp.lap, lap, sizeof(cp.lap));
3041                         cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3042                         hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3043
3044                         err = hci_req_run(&req, inquiry_complete);
3045                         if (err) {
3046                                 BT_ERR("Inquiry request failed: err %d", err);
3047                                 hci_discovery_set_state(hdev,
3048                                                         DISCOVERY_STOPPED);
3049                         }
3050                 }
3051
3052                 hci_dev_unlock(hdev);
3053                 break;
3054         }
3055 }
3056
3057 static void le_scan_disable_work(struct work_struct *work)
3058 {
3059         struct hci_dev *hdev = container_of(work, struct hci_dev,
3060                                             le_scan_disable.work);
3061         struct hci_request req;
3062         int err;
3063
3064         BT_DBG("%s", hdev->name);
3065
3066         cancel_delayed_work_sync(&hdev->le_scan_restart);
3067
3068         hci_req_init(&req, hdev);
3069
3070         hci_req_add_le_scan_disable(&req);
3071
3072         err = hci_req_run(&req, le_scan_disable_work_complete);
3073         if (err)
3074                 BT_ERR("Disable LE scanning request failed: err %d", err);
3075 }
3076
3077 static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3078                                           u16 opcode)
3079 {
3080         unsigned long timeout, duration, scan_start, now;
3081
3082         BT_DBG("%s", hdev->name);
3083
3084         if (status) {
3085                 BT_ERR("Failed to restart LE scan: status %d", status);
3086                 return;
3087         }
3088
3089         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3090             !hdev->discovery.scan_start)
3091                 return;
3092
3093         /* When the scan was started, hdev->le_scan_disable has been queued
3094          * after duration from scan_start. During scan restart this job
3095          * has been canceled, and we need to queue it again after proper
3096          * timeout, to make sure that scan does not run indefinitely.
3097          */
3098         duration = hdev->discovery.scan_duration;
3099         scan_start = hdev->discovery.scan_start;
3100         now = jiffies;
3101         if (now - scan_start <= duration) {
3102                 int elapsed;
3103
3104                 if (now >= scan_start)
3105                         elapsed = now - scan_start;
3106                 else
3107                         elapsed = ULONG_MAX - scan_start + now;
3108
3109                 timeout = duration - elapsed;
3110         } else {
3111                 timeout = 0;
3112         }
3113         queue_delayed_work(hdev->workqueue,
3114                            &hdev->le_scan_disable, timeout);
3115 }
3116
3117 static void le_scan_restart_work(struct work_struct *work)
3118 {
3119         struct hci_dev *hdev = container_of(work, struct hci_dev,
3120                                             le_scan_restart.work);
3121         struct hci_request req;
3122         struct hci_cp_le_set_scan_enable cp;
3123         int err;
3124
3125         BT_DBG("%s", hdev->name);
3126
3127         /* If controller is not scanning we are done. */
3128         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3129                 return;
3130
3131         hci_req_init(&req, hdev);
3132
3133         hci_req_add_le_scan_disable(&req);
3134
3135         memset(&cp, 0, sizeof(cp));
3136         cp.enable = LE_SCAN_ENABLE;
3137         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3138         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3139
3140         err = hci_req_run(&req, le_scan_restart_work_complete);
3141         if (err)
3142                 BT_ERR("Restart LE scan request failed: err %d", err);
3143 }
3144
3145 /* Copy the Identity Address of the controller.
3146  *
3147  * If the controller has a public BD_ADDR, then by default use that one.
3148  * If this is a LE only controller without a public address, default to
3149  * the static random address.
3150  *
3151  * For debugging purposes it is possible to force controllers with a
3152  * public address to use the static random address instead.
3153  *
3154  * In case BR/EDR has been disabled on a dual-mode controller and
3155  * userspace has configured a static address, then that address
3156  * becomes the identity address instead of the public BR/EDR address.
3157  */
3158 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3159                                u8 *bdaddr_type)
3160 {
3161         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3162             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3163             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3164              bacmp(&hdev->static_addr, BDADDR_ANY))) {
3165                 bacpy(bdaddr, &hdev->static_addr);
3166                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3167         } else {
3168                 bacpy(bdaddr, &hdev->bdaddr);
3169                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3170         }
3171 }
3172
3173 /* Alloc HCI device */
3174 struct hci_dev *hci_alloc_dev(void)
3175 {
3176         struct hci_dev *hdev;
3177
3178         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3179         if (!hdev)
3180                 return NULL;
3181
3182         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3183         hdev->esco_type = (ESCO_HV1);
3184         hdev->link_mode = (HCI_LM_ACCEPT);
3185         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3186         hdev->io_capability = 0x03;     /* No Input No Output */
3187         hdev->manufacturer = 0xffff;    /* Default to internal use */
3188         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3189         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3190         hdev->adv_instance_cnt = 0;
3191         hdev->cur_adv_instance = 0x00;
3192         hdev->adv_instance_timeout = 0;
3193
3194         hdev->sniff_max_interval = 800;
3195         hdev->sniff_min_interval = 80;
3196
3197         hdev->le_adv_channel_map = 0x07;
3198         hdev->le_adv_min_interval = 0x0800;
3199         hdev->le_adv_max_interval = 0x0800;
3200         hdev->le_scan_interval = 0x0060;
3201         hdev->le_scan_window = 0x0030;
3202         hdev->le_conn_min_interval = 0x0028;
3203         hdev->le_conn_max_interval = 0x0038;
3204         hdev->le_conn_latency = 0x0000;
3205         hdev->le_supv_timeout = 0x002a;
3206         hdev->le_def_tx_len = 0x001b;
3207         hdev->le_def_tx_time = 0x0148;
3208         hdev->le_max_tx_len = 0x001b;
3209         hdev->le_max_tx_time = 0x0148;
3210         hdev->le_max_rx_len = 0x001b;
3211         hdev->le_max_rx_time = 0x0148;
3212
3213         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3214         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3215         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3216         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3217
3218         mutex_init(&hdev->lock);
3219         mutex_init(&hdev->req_lock);
3220
3221         INIT_LIST_HEAD(&hdev->mgmt_pending);
3222         INIT_LIST_HEAD(&hdev->blacklist);
3223         INIT_LIST_HEAD(&hdev->whitelist);
3224         INIT_LIST_HEAD(&hdev->uuids);
3225         INIT_LIST_HEAD(&hdev->link_keys);
3226         INIT_LIST_HEAD(&hdev->long_term_keys);
3227         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3228         INIT_LIST_HEAD(&hdev->remote_oob_data);
3229         INIT_LIST_HEAD(&hdev->le_white_list);
3230         INIT_LIST_HEAD(&hdev->le_conn_params);
3231         INIT_LIST_HEAD(&hdev->pend_le_conns);
3232         INIT_LIST_HEAD(&hdev->pend_le_reports);
3233         INIT_LIST_HEAD(&hdev->conn_hash.list);
3234         INIT_LIST_HEAD(&hdev->adv_instances);
3235
3236         INIT_WORK(&hdev->rx_work, hci_rx_work);
3237         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3238         INIT_WORK(&hdev->tx_work, hci_tx_work);
3239         INIT_WORK(&hdev->power_on, hci_power_on);
3240         INIT_WORK(&hdev->error_reset, hci_error_reset);
3241
3242         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3243         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3244         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3245         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3246         INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
3247
3248         skb_queue_head_init(&hdev->rx_q);
3249         skb_queue_head_init(&hdev->cmd_q);
3250         skb_queue_head_init(&hdev->raw_q);
3251
3252         init_waitqueue_head(&hdev->req_wait_q);
3253
3254         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3255
3256         hci_init_sysfs(hdev);
3257         discovery_init(hdev);
3258
3259         return hdev;
3260 }
3261 EXPORT_SYMBOL(hci_alloc_dev);
3262
3263 /* Free HCI device */
3264 void hci_free_dev(struct hci_dev *hdev)
3265 {
3266         /* will free via device release */
3267         put_device(&hdev->dev);
3268 }
3269 EXPORT_SYMBOL(hci_free_dev);
3270
3271 /* Register HCI device */
3272 int hci_register_dev(struct hci_dev *hdev)
3273 {
3274         int id, error;
3275
3276         if (!hdev->open || !hdev->close || !hdev->send)
3277                 return -EINVAL;
3278
3279         /* Do not allow HCI_AMP devices to register at index 0,
3280          * so the index can be used as the AMP controller ID.
3281          */
3282         switch (hdev->dev_type) {
3283         case HCI_BREDR:
3284                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3285                 break;
3286         case HCI_AMP:
3287                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3288                 break;
3289         default:
3290                 return -EINVAL;
3291         }
3292
3293         if (id < 0)
3294                 return id;
3295
3296         sprintf(hdev->name, "hci%d", id);
3297         hdev->id = id;
3298
3299         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3300
3301         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3302                                           WQ_MEM_RECLAIM, 1, hdev->name);
3303         if (!hdev->workqueue) {
3304                 error = -ENOMEM;
3305                 goto err;
3306         }
3307
3308         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3309                                               WQ_MEM_RECLAIM, 1, hdev->name);
3310         if (!hdev->req_workqueue) {
3311                 destroy_workqueue(hdev->workqueue);
3312                 error = -ENOMEM;
3313                 goto err;
3314         }
3315
3316         if (!IS_ERR_OR_NULL(bt_debugfs))
3317                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3318
3319         dev_set_name(&hdev->dev, "%s", hdev->name);
3320
3321         error = device_add(&hdev->dev);
3322         if (error < 0)
3323                 goto err_wqueue;
3324
3325         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3326                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3327                                     hdev);
3328         if (hdev->rfkill) {
3329                 if (rfkill_register(hdev->rfkill) < 0) {
3330                         rfkill_destroy(hdev->rfkill);
3331                         hdev->rfkill = NULL;
3332                 }
3333         }
3334
3335         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3336                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3337
3338         hci_dev_set_flag(hdev, HCI_SETUP);
3339         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3340
3341         if (hdev->dev_type == HCI_BREDR) {
3342                 /* Assume BR/EDR support until proven otherwise (such as
3343                  * through reading supported features during init.
3344                  */
3345                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3346         }
3347
3348         write_lock(&hci_dev_list_lock);
3349         list_add(&hdev->list, &hci_dev_list);
3350         write_unlock(&hci_dev_list_lock);
3351
3352         /* Devices that are marked for raw-only usage are unconfigured
3353          * and should not be included in normal operation.
3354          */
3355         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3356                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3357
3358         hci_notify(hdev, HCI_DEV_REG);
3359         hci_dev_hold(hdev);
3360
3361         queue_work(hdev->req_workqueue, &hdev->power_on);
3362
3363         return id;
3364
3365 err_wqueue:
3366         destroy_workqueue(hdev->workqueue);
3367         destroy_workqueue(hdev->req_workqueue);
3368 err:
3369         ida_simple_remove(&hci_index_ida, hdev->id);
3370
3371         return error;
3372 }
3373 EXPORT_SYMBOL(hci_register_dev);
3374
3375 /* Unregister HCI device */
3376 void hci_unregister_dev(struct hci_dev *hdev)
3377 {
3378         int id;
3379
3380         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3381
3382         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3383
3384         id = hdev->id;
3385
3386         write_lock(&hci_dev_list_lock);
3387         list_del(&hdev->list);
3388         write_unlock(&hci_dev_list_lock);
3389
3390         hci_dev_do_close(hdev);
3391
3392         cancel_work_sync(&hdev->power_on);
3393
3394         if (!test_bit(HCI_INIT, &hdev->flags) &&
3395             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3396             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3397                 hci_dev_lock(hdev);
3398                 mgmt_index_removed(hdev);
3399                 hci_dev_unlock(hdev);
3400         }
3401
3402         /* mgmt_index_removed should take care of emptying the
3403          * pending list */
3404         BUG_ON(!list_empty(&hdev->mgmt_pending));
3405
3406         hci_notify(hdev, HCI_DEV_UNREG);
3407
3408         if (hdev->rfkill) {
3409                 rfkill_unregister(hdev->rfkill);
3410                 rfkill_destroy(hdev->rfkill);
3411         }
3412
3413         device_del(&hdev->dev);
3414
3415         debugfs_remove_recursive(hdev->debugfs);
3416
3417         destroy_workqueue(hdev->workqueue);
3418         destroy_workqueue(hdev->req_workqueue);
3419
3420         hci_dev_lock(hdev);
3421         hci_bdaddr_list_clear(&hdev->blacklist);
3422         hci_bdaddr_list_clear(&hdev->whitelist);
3423         hci_uuids_clear(hdev);
3424         hci_link_keys_clear(hdev);
3425         hci_smp_ltks_clear(hdev);
3426         hci_smp_irks_clear(hdev);
3427         hci_remote_oob_data_clear(hdev);
3428         hci_adv_instances_clear(hdev);
3429         hci_bdaddr_list_clear(&hdev->le_white_list);
3430         hci_conn_params_clear_all(hdev);
3431         hci_discovery_filter_clear(hdev);
3432         hci_dev_unlock(hdev);
3433
3434         hci_dev_put(hdev);
3435
3436         ida_simple_remove(&hci_index_ida, id);
3437 }
3438 EXPORT_SYMBOL(hci_unregister_dev);
3439
3440 /* Suspend HCI device */
3441 int hci_suspend_dev(struct hci_dev *hdev)
3442 {
3443         hci_notify(hdev, HCI_DEV_SUSPEND);
3444         return 0;
3445 }
3446 EXPORT_SYMBOL(hci_suspend_dev);
3447
3448 /* Resume HCI device */
3449 int hci_resume_dev(struct hci_dev *hdev)
3450 {
3451         hci_notify(hdev, HCI_DEV_RESUME);
3452         return 0;
3453 }
3454 EXPORT_SYMBOL(hci_resume_dev);
3455
3456 /* Reset HCI device */
3457 int hci_reset_dev(struct hci_dev *hdev)
3458 {
3459         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3460         struct sk_buff *skb;
3461
3462         skb = bt_skb_alloc(3, GFP_ATOMIC);
3463         if (!skb)
3464                 return -ENOMEM;
3465
3466         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3467         memcpy(skb_put(skb, 3), hw_err, 3);
3468
3469         /* Send Hardware Error to upper stack */
3470         return hci_recv_frame(hdev, skb);
3471 }
3472 EXPORT_SYMBOL(hci_reset_dev);
3473
3474 /* Receive frame from HCI drivers */
3475 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3476 {
3477         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3478                       && !test_bit(HCI_INIT, &hdev->flags))) {
3479                 kfree_skb(skb);
3480                 return -ENXIO;
3481         }
3482
3483         /* Incoming skb */
3484         bt_cb(skb)->incoming = 1;
3485
3486         /* Time stamp */
3487         __net_timestamp(skb);
3488
3489         skb_queue_tail(&hdev->rx_q, skb);
3490         queue_work(hdev->workqueue, &hdev->rx_work);
3491
3492         return 0;
3493 }
3494 EXPORT_SYMBOL(hci_recv_frame);
3495
3496 /* Receive diagnostic message from HCI drivers */
3497 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3498 {
3499         /* Time stamp */
3500         __net_timestamp(skb);
3501
3502         /* Mark as diagnostic packet and send to monitor */
3503         bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
3504         hci_send_to_monitor(hdev, skb);
3505
3506         kfree_skb(skb);
3507         return 0;
3508 }
3509 EXPORT_SYMBOL(hci_recv_diag);
3510
3511 /* ---- Interface to upper protocols ---- */
3512
3513 int hci_register_cb(struct hci_cb *cb)
3514 {
3515         BT_DBG("%p name %s", cb, cb->name);
3516
3517         mutex_lock(&hci_cb_list_lock);
3518         list_add_tail(&cb->list, &hci_cb_list);
3519         mutex_unlock(&hci_cb_list_lock);
3520
3521         return 0;
3522 }
3523 EXPORT_SYMBOL(hci_register_cb);
3524
3525 int hci_unregister_cb(struct hci_cb *cb)
3526 {
3527         BT_DBG("%p name %s", cb, cb->name);
3528
3529         mutex_lock(&hci_cb_list_lock);
3530         list_del(&cb->list);
3531         mutex_unlock(&hci_cb_list_lock);
3532
3533         return 0;
3534 }
3535 EXPORT_SYMBOL(hci_unregister_cb);
3536
3537 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3538 {
3539         int err;
3540
3541         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3542
3543         /* Time stamp */
3544         __net_timestamp(skb);
3545
3546         /* Send copy to monitor */
3547         hci_send_to_monitor(hdev, skb);
3548
3549         if (atomic_read(&hdev->promisc)) {
3550                 /* Send copy to the sockets */
3551                 hci_send_to_sock(hdev, skb);
3552         }
3553
3554         /* Get rid of skb owner, prior to sending to the driver. */
3555         skb_orphan(skb);
3556
3557         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3558                 kfree_skb(skb);
3559                 return;
3560         }
3561
3562         err = hdev->send(hdev, skb);
3563         if (err < 0) {
3564                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3565                 kfree_skb(skb);
3566         }
3567 }
3568
3569 /* Send HCI command */
3570 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3571                  const void *param)
3572 {
3573         struct sk_buff *skb;
3574
3575         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3576
3577         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3578         if (!skb) {
3579                 BT_ERR("%s no memory for command", hdev->name);
3580                 return -ENOMEM;
3581         }
3582
3583         /* Stand-alone HCI commands must be flagged as
3584          * single-command requests.
3585          */
3586         bt_cb(skb)->req.start = true;
3587
3588         skb_queue_tail(&hdev->cmd_q, skb);
3589         queue_work(hdev->workqueue, &hdev->cmd_work);
3590
3591         return 0;
3592 }
3593
3594 /* Get data from the previously sent command */
3595 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3596 {
3597         struct hci_command_hdr *hdr;
3598
3599         if (!hdev->sent_cmd)
3600                 return NULL;
3601
3602         hdr = (void *) hdev->sent_cmd->data;
3603
3604         if (hdr->opcode != cpu_to_le16(opcode))
3605                 return NULL;
3606
3607         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3608
3609         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3610 }
3611
3612 /* Send HCI command and wait for command commplete event */
3613 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3614                              const void *param, u32 timeout)
3615 {
3616         struct sk_buff *skb;
3617
3618         if (!test_bit(HCI_UP, &hdev->flags))
3619                 return ERR_PTR(-ENETDOWN);
3620
3621         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3622
3623         hci_req_lock(hdev);
3624         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3625         hci_req_unlock(hdev);
3626
3627         return skb;
3628 }
3629 EXPORT_SYMBOL(hci_cmd_sync);
3630
3631 /* Send ACL data */
3632 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3633 {
3634         struct hci_acl_hdr *hdr;
3635         int len = skb->len;
3636
3637         skb_push(skb, HCI_ACL_HDR_SIZE);
3638         skb_reset_transport_header(skb);
3639         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3640         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3641         hdr->dlen   = cpu_to_le16(len);
3642 }
3643
3644 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3645                           struct sk_buff *skb, __u16 flags)
3646 {
3647         struct hci_conn *conn = chan->conn;
3648         struct hci_dev *hdev = conn->hdev;
3649         struct sk_buff *list;
3650
3651         skb->len = skb_headlen(skb);
3652         skb->data_len = 0;
3653
3654         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3655
3656         switch (hdev->dev_type) {
3657         case HCI_BREDR:
3658                 hci_add_acl_hdr(skb, conn->handle, flags);
3659                 break;
3660         case HCI_AMP:
3661                 hci_add_acl_hdr(skb, chan->handle, flags);
3662                 break;
3663         default:
3664                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3665                 return;
3666         }
3667
3668         list = skb_shinfo(skb)->frag_list;
3669         if (!list) {
3670                 /* Non fragmented */
3671                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3672
3673                 skb_queue_tail(queue, skb);
3674         } else {
3675                 /* Fragmented */
3676                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3677
3678                 skb_shinfo(skb)->frag_list = NULL;
3679
3680                 /* Queue all fragments atomically. We need to use spin_lock_bh
3681                  * here because of 6LoWPAN links, as there this function is
3682                  * called from softirq and using normal spin lock could cause
3683                  * deadlocks.
3684                  */
3685                 spin_lock_bh(&queue->lock);
3686
3687                 __skb_queue_tail(queue, skb);
3688
3689                 flags &= ~ACL_START;
3690                 flags |= ACL_CONT;
3691                 do {
3692                         skb = list; list = list->next;
3693
3694                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3695                         hci_add_acl_hdr(skb, conn->handle, flags);
3696
3697                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3698
3699                         __skb_queue_tail(queue, skb);
3700                 } while (list);
3701
3702                 spin_unlock_bh(&queue->lock);
3703         }
3704 }
3705
3706 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3707 {
3708         struct hci_dev *hdev = chan->conn->hdev;
3709
3710         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3711
3712         hci_queue_acl(chan, &chan->data_q, skb, flags);
3713
3714         queue_work(hdev->workqueue, &hdev->tx_work);
3715 }
3716
3717 /* Send SCO data */
3718 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3719 {
3720         struct hci_dev *hdev = conn->hdev;
3721         struct hci_sco_hdr hdr;
3722
3723         BT_DBG("%s len %d", hdev->name, skb->len);
3724
3725         hdr.handle = cpu_to_le16(conn->handle);
3726         hdr.dlen   = skb->len;
3727
3728         skb_push(skb, HCI_SCO_HDR_SIZE);
3729         skb_reset_transport_header(skb);
3730         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3731
3732         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3733
3734         skb_queue_tail(&conn->data_q, skb);
3735         queue_work(hdev->workqueue, &hdev->tx_work);
3736 }
3737
3738 /* ---- HCI TX task (outgoing data) ---- */
3739
3740 /* HCI Connection scheduler */
3741 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3742                                      int *quote)
3743 {
3744         struct hci_conn_hash *h = &hdev->conn_hash;
3745         struct hci_conn *conn = NULL, *c;
3746         unsigned int num = 0, min = ~0;
3747
3748         /* We don't have to lock device here. Connections are always
3749          * added and removed with TX task disabled. */
3750
3751         rcu_read_lock();
3752
3753         list_for_each_entry_rcu(c, &h->list, list) {
3754                 if (c->type != type || skb_queue_empty(&c->data_q))
3755                         continue;
3756
3757                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3758                         continue;
3759
3760                 num++;
3761
3762                 if (c->sent < min) {
3763                         min  = c->sent;
3764                         conn = c;
3765                 }
3766
3767                 if (hci_conn_num(hdev, type) == num)
3768                         break;
3769         }
3770
3771         rcu_read_unlock();
3772
3773         if (conn) {
3774                 int cnt, q;
3775
3776                 switch (conn->type) {
3777                 case ACL_LINK:
3778                         cnt = hdev->acl_cnt;
3779                         break;
3780                 case SCO_LINK:
3781                 case ESCO_LINK:
3782                         cnt = hdev->sco_cnt;
3783                         break;
3784                 case LE_LINK:
3785                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3786                         break;
3787                 default:
3788                         cnt = 0;
3789                         BT_ERR("Unknown link type");
3790                 }
3791
3792                 q = cnt / num;
3793                 *quote = q ? q : 1;
3794         } else
3795                 *quote = 0;
3796
3797         BT_DBG("conn %p quote %d", conn, *quote);
3798         return conn;
3799 }
3800
3801 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3802 {
3803         struct hci_conn_hash *h = &hdev->conn_hash;
3804         struct hci_conn *c;
3805
3806         BT_ERR("%s link tx timeout", hdev->name);
3807
3808         rcu_read_lock();
3809
3810         /* Kill stalled connections */
3811         list_for_each_entry_rcu(c, &h->list, list) {
3812                 if (c->type == type && c->sent) {
3813                         BT_ERR("%s killing stalled connection %pMR",
3814                                hdev->name, &c->dst);
3815                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3816                 }
3817         }
3818
3819         rcu_read_unlock();
3820 }
3821
3822 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3823                                       int *quote)
3824 {
3825         struct hci_conn_hash *h = &hdev->conn_hash;
3826         struct hci_chan *chan = NULL;
3827         unsigned int num = 0, min = ~0, cur_prio = 0;
3828         struct hci_conn *conn;
3829         int cnt, q, conn_num = 0;
3830
3831         BT_DBG("%s", hdev->name);
3832
3833         rcu_read_lock();
3834
3835         list_for_each_entry_rcu(conn, &h->list, list) {
3836                 struct hci_chan *tmp;
3837
3838                 if (conn->type != type)
3839                         continue;
3840
3841                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3842                         continue;
3843
3844                 conn_num++;
3845
3846                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3847                         struct sk_buff *skb;
3848
3849                         if (skb_queue_empty(&tmp->data_q))
3850                                 continue;
3851
3852                         skb = skb_peek(&tmp->data_q);
3853                         if (skb->priority < cur_prio)
3854                                 continue;
3855
3856                         if (skb->priority > cur_prio) {
3857                                 num = 0;
3858                                 min = ~0;
3859                                 cur_prio = skb->priority;
3860                         }
3861
3862                         num++;
3863
3864                         if (conn->sent < min) {
3865                                 min  = conn->sent;
3866                                 chan = tmp;
3867                         }
3868                 }
3869
3870                 if (hci_conn_num(hdev, type) == conn_num)
3871                         break;
3872         }
3873
3874         rcu_read_unlock();
3875
3876         if (!chan)
3877                 return NULL;
3878
3879         switch (chan->conn->type) {
3880         case ACL_LINK:
3881                 cnt = hdev->acl_cnt;
3882                 break;
3883         case AMP_LINK:
3884                 cnt = hdev->block_cnt;
3885                 break;
3886         case SCO_LINK:
3887         case ESCO_LINK:
3888                 cnt = hdev->sco_cnt;
3889                 break;
3890         case LE_LINK:
3891                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3892                 break;
3893         default:
3894                 cnt = 0;
3895                 BT_ERR("Unknown link type");
3896         }
3897
3898         q = cnt / num;
3899         *quote = q ? q : 1;
3900         BT_DBG("chan %p quote %d", chan, *quote);
3901         return chan;
3902 }
3903
3904 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3905 {
3906         struct hci_conn_hash *h = &hdev->conn_hash;
3907         struct hci_conn *conn;
3908         int num = 0;
3909
3910         BT_DBG("%s", hdev->name);
3911
3912         rcu_read_lock();
3913
3914         list_for_each_entry_rcu(conn, &h->list, list) {
3915                 struct hci_chan *chan;
3916
3917                 if (conn->type != type)
3918                         continue;
3919
3920                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3921                         continue;
3922
3923                 num++;
3924
3925                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3926                         struct sk_buff *skb;
3927
3928                         if (chan->sent) {
3929                                 chan->sent = 0;
3930                                 continue;
3931                         }
3932
3933                         if (skb_queue_empty(&chan->data_q))
3934                                 continue;
3935
3936                         skb = skb_peek(&chan->data_q);
3937                         if (skb->priority >= HCI_PRIO_MAX - 1)
3938                                 continue;
3939
3940                         skb->priority = HCI_PRIO_MAX - 1;
3941
3942                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3943                                skb->priority);
3944                 }
3945
3946                 if (hci_conn_num(hdev, type) == num)
3947                         break;
3948         }
3949
3950         rcu_read_unlock();
3951
3952 }
3953
3954 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3955 {
3956         /* Calculate count of blocks used by this packet */
3957         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3958 }
3959
3960 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3961 {
3962         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3963                 /* ACL tx timeout must be longer than maximum
3964                  * link supervision timeout (40.9 seconds) */
3965                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3966                                        HCI_ACL_TX_TIMEOUT))
3967                         hci_link_tx_to(hdev, ACL_LINK);
3968         }
3969 }
3970
3971 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3972 {
3973         unsigned int cnt = hdev->acl_cnt;
3974         struct hci_chan *chan;
3975         struct sk_buff *skb;
3976         int quote;
3977
3978         __check_timeout(hdev, cnt);
3979
3980         while (hdev->acl_cnt &&
3981                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3982                 u32 priority = (skb_peek(&chan->data_q))->priority;
3983                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3984                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3985                                skb->len, skb->priority);
3986
3987                         /* Stop if priority has changed */
3988                         if (skb->priority < priority)
3989                                 break;
3990
3991                         skb = skb_dequeue(&chan->data_q);
3992
3993                         hci_conn_enter_active_mode(chan->conn,
3994                                                    bt_cb(skb)->force_active);
3995
3996                         hci_send_frame(hdev, skb);
3997                         hdev->acl_last_tx = jiffies;
3998
3999                         hdev->acl_cnt--;
4000                         chan->sent++;
4001                         chan->conn->sent++;
4002                 }
4003         }
4004
4005         if (cnt != hdev->acl_cnt)
4006                 hci_prio_recalculate(hdev, ACL_LINK);
4007 }
4008
4009 static void hci_sched_acl_blk(struct hci_dev *hdev)
4010 {
4011         unsigned int cnt = hdev->block_cnt;
4012         struct hci_chan *chan;
4013         struct sk_buff *skb;
4014         int quote;
4015         u8 type;
4016
4017         __check_timeout(hdev, cnt);
4018
4019         BT_DBG("%s", hdev->name);
4020
4021         if (hdev->dev_type == HCI_AMP)
4022                 type = AMP_LINK;
4023         else
4024                 type = ACL_LINK;
4025
4026         while (hdev->block_cnt > 0 &&
4027                (chan = hci_chan_sent(hdev, type, &quote))) {
4028                 u32 priority = (skb_peek(&chan->data_q))->priority;
4029                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4030                         int blocks;
4031
4032                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4033                                skb->len, skb->priority);
4034
4035                         /* Stop if priority has changed */
4036                         if (skb->priority < priority)
4037                                 break;
4038
4039                         skb = skb_dequeue(&chan->data_q);
4040
4041                         blocks = __get_blocks(hdev, skb);
4042                         if (blocks > hdev->block_cnt)
4043                                 return;
4044
4045                         hci_conn_enter_active_mode(chan->conn,
4046                                                    bt_cb(skb)->force_active);
4047
4048                         hci_send_frame(hdev, skb);
4049                         hdev->acl_last_tx = jiffies;
4050
4051                         hdev->block_cnt -= blocks;
4052                         quote -= blocks;
4053
4054                         chan->sent += blocks;
4055                         chan->conn->sent += blocks;
4056                 }
4057         }
4058
4059         if (cnt != hdev->block_cnt)
4060                 hci_prio_recalculate(hdev, type);
4061 }
4062
4063 static void hci_sched_acl(struct hci_dev *hdev)
4064 {
4065         BT_DBG("%s", hdev->name);
4066
4067         /* No ACL link over BR/EDR controller */
4068         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4069                 return;
4070
4071         /* No AMP link over AMP controller */
4072         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4073                 return;
4074
4075         switch (hdev->flow_ctl_mode) {
4076         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4077                 hci_sched_acl_pkt(hdev);
4078                 break;
4079
4080         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4081                 hci_sched_acl_blk(hdev);
4082                 break;
4083         }
4084 }
4085
4086 /* Schedule SCO */
4087 static void hci_sched_sco(struct hci_dev *hdev)
4088 {
4089         struct hci_conn *conn;
4090         struct sk_buff *skb;
4091         int quote;
4092
4093         BT_DBG("%s", hdev->name);
4094
4095         if (!hci_conn_num(hdev, SCO_LINK))
4096                 return;
4097
4098         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4099                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4100                         BT_DBG("skb %p len %d", skb, skb->len);
4101                         hci_send_frame(hdev, skb);
4102
4103                         conn->sent++;
4104                         if (conn->sent == ~0)
4105                                 conn->sent = 0;
4106                 }
4107         }
4108 }
4109
4110 static void hci_sched_esco(struct hci_dev *hdev)
4111 {
4112         struct hci_conn *conn;
4113         struct sk_buff *skb;
4114         int quote;
4115
4116         BT_DBG("%s", hdev->name);
4117
4118         if (!hci_conn_num(hdev, ESCO_LINK))
4119                 return;
4120
4121         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4122                                                      &quote))) {
4123                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4124                         BT_DBG("skb %p len %d", skb, skb->len);
4125                         hci_send_frame(hdev, skb);
4126
4127                         conn->sent++;
4128                         if (conn->sent == ~0)
4129                                 conn->sent = 0;
4130                 }
4131         }
4132 }
4133
4134 static void hci_sched_le(struct hci_dev *hdev)
4135 {
4136         struct hci_chan *chan;
4137         struct sk_buff *skb;
4138         int quote, cnt, tmp;
4139
4140         BT_DBG("%s", hdev->name);
4141
4142         if (!hci_conn_num(hdev, LE_LINK))
4143                 return;
4144
4145         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4146                 /* LE tx timeout must be longer than maximum
4147                  * link supervision timeout (40.9 seconds) */
4148                 if (!hdev->le_cnt && hdev->le_pkts &&
4149                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4150                         hci_link_tx_to(hdev, LE_LINK);
4151         }
4152
4153         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4154         tmp = cnt;
4155         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4156                 u32 priority = (skb_peek(&chan->data_q))->priority;
4157                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4158                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4159                                skb->len, skb->priority);
4160
4161                         /* Stop if priority has changed */
4162                         if (skb->priority < priority)
4163                                 break;
4164
4165                         skb = skb_dequeue(&chan->data_q);
4166
4167                         hci_send_frame(hdev, skb);
4168                         hdev->le_last_tx = jiffies;
4169
4170                         cnt--;
4171                         chan->sent++;
4172                         chan->conn->sent++;
4173                 }
4174         }
4175
4176         if (hdev->le_pkts)
4177                 hdev->le_cnt = cnt;
4178         else
4179                 hdev->acl_cnt = cnt;
4180
4181         if (cnt != tmp)
4182                 hci_prio_recalculate(hdev, LE_LINK);
4183 }
4184
4185 static void hci_tx_work(struct work_struct *work)
4186 {
4187         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4188         struct sk_buff *skb;
4189
4190         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4191                hdev->sco_cnt, hdev->le_cnt);
4192
4193         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4194                 /* Schedule queues and send stuff to HCI driver */
4195                 hci_sched_acl(hdev);
4196                 hci_sched_sco(hdev);
4197                 hci_sched_esco(hdev);
4198                 hci_sched_le(hdev);
4199         }
4200
4201         /* Send next queued raw (unknown type) packet */
4202         while ((skb = skb_dequeue(&hdev->raw_q)))
4203                 hci_send_frame(hdev, skb);
4204 }
4205
4206 /* ----- HCI RX task (incoming data processing) ----- */
4207
4208 /* ACL data packet */
4209 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4210 {
4211         struct hci_acl_hdr *hdr = (void *) skb->data;
4212         struct hci_conn *conn;
4213         __u16 handle, flags;
4214
4215         skb_pull(skb, HCI_ACL_HDR_SIZE);
4216
4217         handle = __le16_to_cpu(hdr->handle);
4218         flags  = hci_flags(handle);
4219         handle = hci_handle(handle);
4220
4221         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4222                handle, flags);
4223
4224         hdev->stat.acl_rx++;
4225
4226         hci_dev_lock(hdev);
4227         conn = hci_conn_hash_lookup_handle(hdev, handle);
4228         hci_dev_unlock(hdev);
4229
4230         if (conn) {
4231                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4232
4233                 /* Send to upper protocol */
4234                 l2cap_recv_acldata(conn, skb, flags);
4235                 return;
4236         } else {
4237                 BT_ERR("%s ACL packet for unknown connection handle %d",
4238                        hdev->name, handle);
4239         }
4240
4241         kfree_skb(skb);
4242 }
4243
4244 /* SCO data packet */
4245 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4246 {
4247         struct hci_sco_hdr *hdr = (void *) skb->data;
4248         struct hci_conn *conn;
4249         __u16 handle;
4250
4251         skb_pull(skb, HCI_SCO_HDR_SIZE);
4252
4253         handle = __le16_to_cpu(hdr->handle);
4254
4255         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4256
4257         hdev->stat.sco_rx++;
4258
4259         hci_dev_lock(hdev);
4260         conn = hci_conn_hash_lookup_handle(hdev, handle);
4261         hci_dev_unlock(hdev);
4262
4263         if (conn) {
4264                 /* Send to upper protocol */
4265                 sco_recv_scodata(conn, skb);
4266                 return;
4267         } else {
4268                 BT_ERR("%s SCO packet for unknown connection handle %d",
4269                        hdev->name, handle);
4270         }
4271
4272         kfree_skb(skb);
4273 }
4274
4275 static bool hci_req_is_complete(struct hci_dev *hdev)
4276 {
4277         struct sk_buff *skb;
4278
4279         skb = skb_peek(&hdev->cmd_q);
4280         if (!skb)
4281                 return true;
4282
4283         return bt_cb(skb)->req.start;
4284 }
4285
4286 static void hci_resend_last(struct hci_dev *hdev)
4287 {
4288         struct hci_command_hdr *sent;
4289         struct sk_buff *skb;
4290         u16 opcode;
4291
4292         if (!hdev->sent_cmd)
4293                 return;
4294
4295         sent = (void *) hdev->sent_cmd->data;
4296         opcode = __le16_to_cpu(sent->opcode);
4297         if (opcode == HCI_OP_RESET)
4298                 return;
4299
4300         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4301         if (!skb)
4302                 return;
4303
4304         skb_queue_head(&hdev->cmd_q, skb);
4305         queue_work(hdev->workqueue, &hdev->cmd_work);
4306 }
4307
4308 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4309                           hci_req_complete_t *req_complete,
4310                           hci_req_complete_skb_t *req_complete_skb)
4311 {
4312         struct sk_buff *skb;
4313         unsigned long flags;
4314
4315         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4316
4317         /* If the completed command doesn't match the last one that was
4318          * sent we need to do special handling of it.
4319          */
4320         if (!hci_sent_cmd_data(hdev, opcode)) {
4321                 /* Some CSR based controllers generate a spontaneous
4322                  * reset complete event during init and any pending
4323                  * command will never be completed. In such a case we
4324                  * need to resend whatever was the last sent
4325                  * command.
4326                  */
4327                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4328                         hci_resend_last(hdev);
4329
4330                 return;
4331         }
4332
4333         /* If the command succeeded and there's still more commands in
4334          * this request the request is not yet complete.
4335          */
4336         if (!status && !hci_req_is_complete(hdev))
4337                 return;
4338
4339         /* If this was the last command in a request the complete
4340          * callback would be found in hdev->sent_cmd instead of the
4341          * command queue (hdev->cmd_q).
4342          */
4343         if (bt_cb(hdev->sent_cmd)->req.complete) {
4344                 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4345                 return;
4346         }
4347
4348         if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4349                 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4350                 return;
4351         }
4352
4353         /* Remove all pending commands belonging to this request */
4354         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4355         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4356                 if (bt_cb(skb)->req.start) {
4357                         __skb_queue_head(&hdev->cmd_q, skb);
4358                         break;
4359                 }
4360
4361                 *req_complete = bt_cb(skb)->req.complete;
4362                 *req_complete_skb = bt_cb(skb)->req.complete_skb;
4363                 kfree_skb(skb);
4364         }
4365         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4366 }
4367
4368 static void hci_rx_work(struct work_struct *work)
4369 {
4370         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4371         struct sk_buff *skb;
4372
4373         BT_DBG("%s", hdev->name);
4374
4375         while ((skb = skb_dequeue(&hdev->rx_q))) {
4376                 /* Send copy to monitor */
4377                 hci_send_to_monitor(hdev, skb);
4378
4379                 if (atomic_read(&hdev->promisc)) {
4380                         /* Send copy to the sockets */
4381                         hci_send_to_sock(hdev, skb);
4382                 }
4383
4384                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4385                         kfree_skb(skb);
4386                         continue;
4387                 }
4388
4389                 if (test_bit(HCI_INIT, &hdev->flags)) {
4390                         /* Don't process data packets in this states. */
4391                         switch (bt_cb(skb)->pkt_type) {
4392                         case HCI_ACLDATA_PKT:
4393                         case HCI_SCODATA_PKT:
4394                                 kfree_skb(skb);
4395                                 continue;
4396                         }
4397                 }
4398
4399                 /* Process frame */
4400                 switch (bt_cb(skb)->pkt_type) {
4401                 case HCI_EVENT_PKT:
4402                         BT_DBG("%s Event packet", hdev->name);
4403                         hci_event_packet(hdev, skb);
4404                         break;
4405
4406                 case HCI_ACLDATA_PKT:
4407                         BT_DBG("%s ACL data packet", hdev->name);
4408                         hci_acldata_packet(hdev, skb);
4409                         break;
4410
4411                 case HCI_SCODATA_PKT:
4412                         BT_DBG("%s SCO data packet", hdev->name);
4413                         hci_scodata_packet(hdev, skb);
4414                         break;
4415
4416                 default:
4417                         kfree_skb(skb);
4418                         break;
4419                 }
4420         }
4421 }
4422
4423 static void hci_cmd_work(struct work_struct *work)
4424 {
4425         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4426         struct sk_buff *skb;
4427
4428         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4429                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4430
4431         /* Send queued commands */
4432         if (atomic_read(&hdev->cmd_cnt)) {
4433                 skb = skb_dequeue(&hdev->cmd_q);
4434                 if (!skb)
4435                         return;
4436
4437                 kfree_skb(hdev->sent_cmd);
4438
4439                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4440                 if (hdev->sent_cmd) {
4441                         atomic_dec(&hdev->cmd_cnt);
4442                         hci_send_frame(hdev, skb);
4443                         if (test_bit(HCI_RESET, &hdev->flags))
4444                                 cancel_delayed_work(&hdev->cmd_timer);
4445                         else
4446                                 schedule_delayed_work(&hdev->cmd_timer,
4447                                                       HCI_CMD_TIMEOUT);
4448                 } else {
4449                         skb_queue_head(&hdev->cmd_q, skb);
4450                         queue_work(hdev->workqueue, &hdev->cmd_work);
4451                 }
4452         }
4453 }