ASoC: adau17x1: Add basic DT support for adau17x1
[cascardo/linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
47
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
51
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_MUTEX(hci_cb_list_lock);
55
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
58
59 /* ---- HCI debugfs entries ---- */
60
61 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
62                              size_t count, loff_t *ppos)
63 {
64         struct hci_dev *hdev = file->private_data;
65         char buf[3];
66
67         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
68         buf[1] = '\n';
69         buf[2] = '\0';
70         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
71 }
72
73 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
74                               size_t count, loff_t *ppos)
75 {
76         struct hci_dev *hdev = file->private_data;
77         struct sk_buff *skb;
78         char buf[32];
79         size_t buf_size = min(count, (sizeof(buf)-1));
80         bool enable;
81
82         if (!test_bit(HCI_UP, &hdev->flags))
83                 return -ENETDOWN;
84
85         if (copy_from_user(buf, user_buf, buf_size))
86                 return -EFAULT;
87
88         buf[buf_size] = '\0';
89         if (strtobool(buf, &enable))
90                 return -EINVAL;
91
92         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
93                 return -EALREADY;
94
95         hci_req_sync_lock(hdev);
96         if (enable)
97                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
98                                      HCI_CMD_TIMEOUT);
99         else
100                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
101                                      HCI_CMD_TIMEOUT);
102         hci_req_sync_unlock(hdev);
103
104         if (IS_ERR(skb))
105                 return PTR_ERR(skb);
106
107         kfree_skb(skb);
108
109         hci_dev_change_flag(hdev, HCI_DUT_MODE);
110
111         return count;
112 }
113
114 static const struct file_operations dut_mode_fops = {
115         .open           = simple_open,
116         .read           = dut_mode_read,
117         .write          = dut_mode_write,
118         .llseek         = default_llseek,
119 };
120
121 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
122                                 size_t count, loff_t *ppos)
123 {
124         struct hci_dev *hdev = file->private_data;
125         char buf[3];
126
127         buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
128         buf[1] = '\n';
129         buf[2] = '\0';
130         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
131 }
132
133 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
134                                  size_t count, loff_t *ppos)
135 {
136         struct hci_dev *hdev = file->private_data;
137         char buf[32];
138         size_t buf_size = min(count, (sizeof(buf)-1));
139         bool enable;
140         int err;
141
142         if (copy_from_user(buf, user_buf, buf_size))
143                 return -EFAULT;
144
145         buf[buf_size] = '\0';
146         if (strtobool(buf, &enable))
147                 return -EINVAL;
148
149         /* When the diagnostic flags are not persistent and the transport
150          * is not active, then there is no need for the vendor callback.
151          *
152          * Instead just store the desired value. If needed the setting
153          * will be programmed when the controller gets powered on.
154          */
155         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
156             !test_bit(HCI_RUNNING, &hdev->flags))
157                 goto done;
158
159         hci_req_sync_lock(hdev);
160         err = hdev->set_diag(hdev, enable);
161         hci_req_sync_unlock(hdev);
162
163         if (err < 0)
164                 return err;
165
166 done:
167         if (enable)
168                 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
169         else
170                 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
171
172         return count;
173 }
174
175 static const struct file_operations vendor_diag_fops = {
176         .open           = simple_open,
177         .read           = vendor_diag_read,
178         .write          = vendor_diag_write,
179         .llseek         = default_llseek,
180 };
181
182 static void hci_debugfs_create_basic(struct hci_dev *hdev)
183 {
184         debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
185                             &dut_mode_fops);
186
187         if (hdev->set_diag)
188                 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
189                                     &vendor_diag_fops);
190 }
191
192 static int hci_reset_req(struct hci_request *req, unsigned long opt)
193 {
194         BT_DBG("%s %ld", req->hdev->name, opt);
195
196         /* Reset device */
197         set_bit(HCI_RESET, &req->hdev->flags);
198         hci_req_add(req, HCI_OP_RESET, 0, NULL);
199         return 0;
200 }
201
202 static void bredr_init(struct hci_request *req)
203 {
204         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
205
206         /* Read Local Supported Features */
207         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
208
209         /* Read Local Version */
210         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
211
212         /* Read BD Address */
213         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
214 }
215
216 static void amp_init1(struct hci_request *req)
217 {
218         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
219
220         /* Read Local Version */
221         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
222
223         /* Read Local Supported Commands */
224         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
225
226         /* Read Local AMP Info */
227         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
228
229         /* Read Data Blk size */
230         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
231
232         /* Read Flow Control Mode */
233         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
234
235         /* Read Location Data */
236         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
237 }
238
239 static int amp_init2(struct hci_request *req)
240 {
241         /* Read Local Supported Features. Not all AMP controllers
242          * support this so it's placed conditionally in the second
243          * stage init.
244          */
245         if (req->hdev->commands[14] & 0x20)
246                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
247
248         return 0;
249 }
250
251 static int hci_init1_req(struct hci_request *req, unsigned long opt)
252 {
253         struct hci_dev *hdev = req->hdev;
254
255         BT_DBG("%s %ld", hdev->name, opt);
256
257         /* Reset */
258         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
259                 hci_reset_req(req, 0);
260
261         switch (hdev->dev_type) {
262         case HCI_BREDR:
263                 bredr_init(req);
264                 break;
265
266         case HCI_AMP:
267                 amp_init1(req);
268                 break;
269
270         default:
271                 BT_ERR("Unknown device type %d", hdev->dev_type);
272                 break;
273         }
274
275         return 0;
276 }
277
278 static void bredr_setup(struct hci_request *req)
279 {
280         __le16 param;
281         __u8 flt_type;
282
283         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
284         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
285
286         /* Read Class of Device */
287         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
288
289         /* Read Local Name */
290         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
291
292         /* Read Voice Setting */
293         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
294
295         /* Read Number of Supported IAC */
296         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
297
298         /* Read Current IAC LAP */
299         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
300
301         /* Clear Event Filters */
302         flt_type = HCI_FLT_CLEAR_ALL;
303         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
304
305         /* Connection accept timeout ~20 secs */
306         param = cpu_to_le16(0x7d00);
307         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
308 }
309
310 static void le_setup(struct hci_request *req)
311 {
312         struct hci_dev *hdev = req->hdev;
313
314         /* Read LE Buffer Size */
315         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
316
317         /* Read LE Local Supported Features */
318         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
319
320         /* Read LE Supported States */
321         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
322
323         /* LE-only controllers have LE implicitly enabled */
324         if (!lmp_bredr_capable(hdev))
325                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
326 }
327
328 static void hci_setup_event_mask(struct hci_request *req)
329 {
330         struct hci_dev *hdev = req->hdev;
331
332         /* The second byte is 0xff instead of 0x9f (two reserved bits
333          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
334          * command otherwise.
335          */
336         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
337
338         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
339          * any event mask for pre 1.2 devices.
340          */
341         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
342                 return;
343
344         if (lmp_bredr_capable(hdev)) {
345                 events[4] |= 0x01; /* Flow Specification Complete */
346         } else {
347                 /* Use a different default for LE-only devices */
348                 memset(events, 0, sizeof(events));
349                 events[1] |= 0x20; /* Command Complete */
350                 events[1] |= 0x40; /* Command Status */
351                 events[1] |= 0x80; /* Hardware Error */
352
353                 /* If the controller supports the Disconnect command, enable
354                  * the corresponding event. In addition enable packet flow
355                  * control related events.
356                  */
357                 if (hdev->commands[0] & 0x20) {
358                         events[0] |= 0x10; /* Disconnection Complete */
359                         events[2] |= 0x04; /* Number of Completed Packets */
360                         events[3] |= 0x02; /* Data Buffer Overflow */
361                 }
362
363                 /* If the controller supports the Read Remote Version
364                  * Information command, enable the corresponding event.
365                  */
366                 if (hdev->commands[2] & 0x80)
367                         events[1] |= 0x08; /* Read Remote Version Information
368                                             * Complete
369                                             */
370
371                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
372                         events[0] |= 0x80; /* Encryption Change */
373                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
374                 }
375         }
376
377         if (lmp_inq_rssi_capable(hdev) ||
378             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
379                 events[4] |= 0x02; /* Inquiry Result with RSSI */
380
381         if (lmp_ext_feat_capable(hdev))
382                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
383
384         if (lmp_esco_capable(hdev)) {
385                 events[5] |= 0x08; /* Synchronous Connection Complete */
386                 events[5] |= 0x10; /* Synchronous Connection Changed */
387         }
388
389         if (lmp_sniffsubr_capable(hdev))
390                 events[5] |= 0x20; /* Sniff Subrating */
391
392         if (lmp_pause_enc_capable(hdev))
393                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
394
395         if (lmp_ext_inq_capable(hdev))
396                 events[5] |= 0x40; /* Extended Inquiry Result */
397
398         if (lmp_no_flush_capable(hdev))
399                 events[7] |= 0x01; /* Enhanced Flush Complete */
400
401         if (lmp_lsto_capable(hdev))
402                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
403
404         if (lmp_ssp_capable(hdev)) {
405                 events[6] |= 0x01;      /* IO Capability Request */
406                 events[6] |= 0x02;      /* IO Capability Response */
407                 events[6] |= 0x04;      /* User Confirmation Request */
408                 events[6] |= 0x08;      /* User Passkey Request */
409                 events[6] |= 0x10;      /* Remote OOB Data Request */
410                 events[6] |= 0x20;      /* Simple Pairing Complete */
411                 events[7] |= 0x04;      /* User Passkey Notification */
412                 events[7] |= 0x08;      /* Keypress Notification */
413                 events[7] |= 0x10;      /* Remote Host Supported
414                                          * Features Notification
415                                          */
416         }
417
418         if (lmp_le_capable(hdev))
419                 events[7] |= 0x20;      /* LE Meta-Event */
420
421         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
422 }
423
424 static int hci_init2_req(struct hci_request *req, unsigned long opt)
425 {
426         struct hci_dev *hdev = req->hdev;
427
428         if (hdev->dev_type == HCI_AMP)
429                 return amp_init2(req);
430
431         if (lmp_bredr_capable(hdev))
432                 bredr_setup(req);
433         else
434                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
435
436         if (lmp_le_capable(hdev))
437                 le_setup(req);
438
439         /* All Bluetooth 1.2 and later controllers should support the
440          * HCI command for reading the local supported commands.
441          *
442          * Unfortunately some controllers indicate Bluetooth 1.2 support,
443          * but do not have support for this command. If that is the case,
444          * the driver can quirk the behavior and skip reading the local
445          * supported commands.
446          */
447         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
448             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
449                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
450
451         if (lmp_ssp_capable(hdev)) {
452                 /* When SSP is available, then the host features page
453                  * should also be available as well. However some
454                  * controllers list the max_page as 0 as long as SSP
455                  * has not been enabled. To achieve proper debugging
456                  * output, force the minimum max_page to 1 at least.
457                  */
458                 hdev->max_page = 0x01;
459
460                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
461                         u8 mode = 0x01;
462
463                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
464                                     sizeof(mode), &mode);
465                 } else {
466                         struct hci_cp_write_eir cp;
467
468                         memset(hdev->eir, 0, sizeof(hdev->eir));
469                         memset(&cp, 0, sizeof(cp));
470
471                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
472                 }
473         }
474
475         if (lmp_inq_rssi_capable(hdev) ||
476             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
477                 u8 mode;
478
479                 /* If Extended Inquiry Result events are supported, then
480                  * they are clearly preferred over Inquiry Result with RSSI
481                  * events.
482                  */
483                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
484
485                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
486         }
487
488         if (lmp_inq_tx_pwr_capable(hdev))
489                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
490
491         if (lmp_ext_feat_capable(hdev)) {
492                 struct hci_cp_read_local_ext_features cp;
493
494                 cp.page = 0x01;
495                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
496                             sizeof(cp), &cp);
497         }
498
499         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
500                 u8 enable = 1;
501                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
502                             &enable);
503         }
504
505         return 0;
506 }
507
508 static void hci_setup_link_policy(struct hci_request *req)
509 {
510         struct hci_dev *hdev = req->hdev;
511         struct hci_cp_write_def_link_policy cp;
512         u16 link_policy = 0;
513
514         if (lmp_rswitch_capable(hdev))
515                 link_policy |= HCI_LP_RSWITCH;
516         if (lmp_hold_capable(hdev))
517                 link_policy |= HCI_LP_HOLD;
518         if (lmp_sniff_capable(hdev))
519                 link_policy |= HCI_LP_SNIFF;
520         if (lmp_park_capable(hdev))
521                 link_policy |= HCI_LP_PARK;
522
523         cp.policy = cpu_to_le16(link_policy);
524         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
525 }
526
527 static void hci_set_le_support(struct hci_request *req)
528 {
529         struct hci_dev *hdev = req->hdev;
530         struct hci_cp_write_le_host_supported cp;
531
532         /* LE-only devices do not support explicit enablement */
533         if (!lmp_bredr_capable(hdev))
534                 return;
535
536         memset(&cp, 0, sizeof(cp));
537
538         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
539                 cp.le = 0x01;
540                 cp.simul = 0x00;
541         }
542
543         if (cp.le != lmp_host_le_capable(hdev))
544                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
545                             &cp);
546 }
547
548 static void hci_set_event_mask_page_2(struct hci_request *req)
549 {
550         struct hci_dev *hdev = req->hdev;
551         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
552
553         /* If Connectionless Slave Broadcast master role is supported
554          * enable all necessary events for it.
555          */
556         if (lmp_csb_master_capable(hdev)) {
557                 events[1] |= 0x40;      /* Triggered Clock Capture */
558                 events[1] |= 0x80;      /* Synchronization Train Complete */
559                 events[2] |= 0x10;      /* Slave Page Response Timeout */
560                 events[2] |= 0x20;      /* CSB Channel Map Change */
561         }
562
563         /* If Connectionless Slave Broadcast slave role is supported
564          * enable all necessary events for it.
565          */
566         if (lmp_csb_slave_capable(hdev)) {
567                 events[2] |= 0x01;      /* Synchronization Train Received */
568                 events[2] |= 0x02;      /* CSB Receive */
569                 events[2] |= 0x04;      /* CSB Timeout */
570                 events[2] |= 0x08;      /* Truncated Page Complete */
571         }
572
573         /* Enable Authenticated Payload Timeout Expired event if supported */
574         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
575                 events[2] |= 0x80;
576
577         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
578 }
579
580 static int hci_init3_req(struct hci_request *req, unsigned long opt)
581 {
582         struct hci_dev *hdev = req->hdev;
583         u8 p;
584
585         hci_setup_event_mask(req);
586
587         if (hdev->commands[6] & 0x20 &&
588             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
589                 struct hci_cp_read_stored_link_key cp;
590
591                 bacpy(&cp.bdaddr, BDADDR_ANY);
592                 cp.read_all = 0x01;
593                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
594         }
595
596         if (hdev->commands[5] & 0x10)
597                 hci_setup_link_policy(req);
598
599         if (hdev->commands[8] & 0x01)
600                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
601
602         /* Some older Broadcom based Bluetooth 1.2 controllers do not
603          * support the Read Page Scan Type command. Check support for
604          * this command in the bit mask of supported commands.
605          */
606         if (hdev->commands[13] & 0x01)
607                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
608
609         if (lmp_le_capable(hdev)) {
610                 u8 events[8];
611
612                 memset(events, 0, sizeof(events));
613
614                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
615                         events[0] |= 0x10;      /* LE Long Term Key Request */
616
617                 /* If controller supports the Connection Parameters Request
618                  * Link Layer Procedure, enable the corresponding event.
619                  */
620                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
621                         events[0] |= 0x20;      /* LE Remote Connection
622                                                  * Parameter Request
623                                                  */
624
625                 /* If the controller supports the Data Length Extension
626                  * feature, enable the corresponding event.
627                  */
628                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
629                         events[0] |= 0x40;      /* LE Data Length Change */
630
631                 /* If the controller supports Extended Scanner Filter
632                  * Policies, enable the correspondig event.
633                  */
634                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
635                         events[1] |= 0x04;      /* LE Direct Advertising
636                                                  * Report
637                                                  */
638
639                 /* If the controller supports the LE Set Scan Enable command,
640                  * enable the corresponding advertising report event.
641                  */
642                 if (hdev->commands[26] & 0x08)
643                         events[0] |= 0x02;      /* LE Advertising Report */
644
645                 /* If the controller supports the LE Create Connection
646                  * command, enable the corresponding event.
647                  */
648                 if (hdev->commands[26] & 0x10)
649                         events[0] |= 0x01;      /* LE Connection Complete */
650
651                 /* If the controller supports the LE Connection Update
652                  * command, enable the corresponding event.
653                  */
654                 if (hdev->commands[27] & 0x04)
655                         events[0] |= 0x04;      /* LE Connection Update
656                                                  * Complete
657                                                  */
658
659                 /* If the controller supports the LE Read Remote Used Features
660                  * command, enable the corresponding event.
661                  */
662                 if (hdev->commands[27] & 0x20)
663                         events[0] |= 0x08;      /* LE Read Remote Used
664                                                  * Features Complete
665                                                  */
666
667                 /* If the controller supports the LE Read Local P-256
668                  * Public Key command, enable the corresponding event.
669                  */
670                 if (hdev->commands[34] & 0x02)
671                         events[0] |= 0x80;      /* LE Read Local P-256
672                                                  * Public Key Complete
673                                                  */
674
675                 /* If the controller supports the LE Generate DHKey
676                  * command, enable the corresponding event.
677                  */
678                 if (hdev->commands[34] & 0x04)
679                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
680
681                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
682                             events);
683
684                 if (hdev->commands[25] & 0x40) {
685                         /* Read LE Advertising Channel TX Power */
686                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
687                 }
688
689                 if (hdev->commands[26] & 0x40) {
690                         /* Read LE White List Size */
691                         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
692                                     0, NULL);
693                 }
694
695                 if (hdev->commands[26] & 0x80) {
696                         /* Clear LE White List */
697                         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
698                 }
699
700                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
701                         /* Read LE Maximum Data Length */
702                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
703
704                         /* Read LE Suggested Default Data Length */
705                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
706                 }
707
708                 hci_set_le_support(req);
709         }
710
711         /* Read features beyond page 1 if available */
712         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
713                 struct hci_cp_read_local_ext_features cp;
714
715                 cp.page = p;
716                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
717                             sizeof(cp), &cp);
718         }
719
720         return 0;
721 }
722
723 static int hci_init4_req(struct hci_request *req, unsigned long opt)
724 {
725         struct hci_dev *hdev = req->hdev;
726
727         /* Some Broadcom based Bluetooth controllers do not support the
728          * Delete Stored Link Key command. They are clearly indicating its
729          * absence in the bit mask of supported commands.
730          *
731          * Check the supported commands and only if the the command is marked
732          * as supported send it. If not supported assume that the controller
733          * does not have actual support for stored link keys which makes this
734          * command redundant anyway.
735          *
736          * Some controllers indicate that they support handling deleting
737          * stored link keys, but they don't. The quirk lets a driver
738          * just disable this command.
739          */
740         if (hdev->commands[6] & 0x80 &&
741             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
742                 struct hci_cp_delete_stored_link_key cp;
743
744                 bacpy(&cp.bdaddr, BDADDR_ANY);
745                 cp.delete_all = 0x01;
746                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
747                             sizeof(cp), &cp);
748         }
749
750         /* Set event mask page 2 if the HCI command for it is supported */
751         if (hdev->commands[22] & 0x04)
752                 hci_set_event_mask_page_2(req);
753
754         /* Read local codec list if the HCI command is supported */
755         if (hdev->commands[29] & 0x20)
756                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
757
758         /* Get MWS transport configuration if the HCI command is supported */
759         if (hdev->commands[30] & 0x08)
760                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
761
762         /* Check for Synchronization Train support */
763         if (lmp_sync_train_capable(hdev))
764                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
765
766         /* Enable Secure Connections if supported and configured */
767         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
768             bredr_sc_enabled(hdev)) {
769                 u8 support = 0x01;
770
771                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
772                             sizeof(support), &support);
773         }
774
775         return 0;
776 }
777
778 static int __hci_init(struct hci_dev *hdev)
779 {
780         int err;
781
782         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
783         if (err < 0)
784                 return err;
785
786         if (hci_dev_test_flag(hdev, HCI_SETUP))
787                 hci_debugfs_create_basic(hdev);
788
789         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
790         if (err < 0)
791                 return err;
792
793         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
794          * BR/EDR/LE type controllers. AMP controllers only need the
795          * first two stages of init.
796          */
797         if (hdev->dev_type != HCI_BREDR)
798                 return 0;
799
800         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
801         if (err < 0)
802                 return err;
803
804         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
805         if (err < 0)
806                 return err;
807
808         /* This function is only called when the controller is actually in
809          * configured state. When the controller is marked as unconfigured,
810          * this initialization procedure is not run.
811          *
812          * It means that it is possible that a controller runs through its
813          * setup phase and then discovers missing settings. If that is the
814          * case, then this function will not be called. It then will only
815          * be called during the config phase.
816          *
817          * So only when in setup phase or config phase, create the debugfs
818          * entries and register the SMP channels.
819          */
820         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
821             !hci_dev_test_flag(hdev, HCI_CONFIG))
822                 return 0;
823
824         hci_debugfs_create_common(hdev);
825
826         if (lmp_bredr_capable(hdev))
827                 hci_debugfs_create_bredr(hdev);
828
829         if (lmp_le_capable(hdev))
830                 hci_debugfs_create_le(hdev);
831
832         return 0;
833 }
834
835 static int hci_init0_req(struct hci_request *req, unsigned long opt)
836 {
837         struct hci_dev *hdev = req->hdev;
838
839         BT_DBG("%s %ld", hdev->name, opt);
840
841         /* Reset */
842         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
843                 hci_reset_req(req, 0);
844
845         /* Read Local Version */
846         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
847
848         /* Read BD Address */
849         if (hdev->set_bdaddr)
850                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
851
852         return 0;
853 }
854
855 static int __hci_unconf_init(struct hci_dev *hdev)
856 {
857         int err;
858
859         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
860                 return 0;
861
862         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
863         if (err < 0)
864                 return err;
865
866         if (hci_dev_test_flag(hdev, HCI_SETUP))
867                 hci_debugfs_create_basic(hdev);
868
869         return 0;
870 }
871
872 static int hci_scan_req(struct hci_request *req, unsigned long opt)
873 {
874         __u8 scan = opt;
875
876         BT_DBG("%s %x", req->hdev->name, scan);
877
878         /* Inquiry and Page scans */
879         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
880         return 0;
881 }
882
883 static int hci_auth_req(struct hci_request *req, unsigned long opt)
884 {
885         __u8 auth = opt;
886
887         BT_DBG("%s %x", req->hdev->name, auth);
888
889         /* Authentication */
890         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
891         return 0;
892 }
893
894 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
895 {
896         __u8 encrypt = opt;
897
898         BT_DBG("%s %x", req->hdev->name, encrypt);
899
900         /* Encryption */
901         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
902         return 0;
903 }
904
905 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
906 {
907         __le16 policy = cpu_to_le16(opt);
908
909         BT_DBG("%s %x", req->hdev->name, policy);
910
911         /* Default link policy */
912         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
913         return 0;
914 }
915
916 /* Get HCI device by index.
917  * Device is held on return. */
918 struct hci_dev *hci_dev_get(int index)
919 {
920         struct hci_dev *hdev = NULL, *d;
921
922         BT_DBG("%d", index);
923
924         if (index < 0)
925                 return NULL;
926
927         read_lock(&hci_dev_list_lock);
928         list_for_each_entry(d, &hci_dev_list, list) {
929                 if (d->id == index) {
930                         hdev = hci_dev_hold(d);
931                         break;
932                 }
933         }
934         read_unlock(&hci_dev_list_lock);
935         return hdev;
936 }
937
938 /* ---- Inquiry support ---- */
939
940 bool hci_discovery_active(struct hci_dev *hdev)
941 {
942         struct discovery_state *discov = &hdev->discovery;
943
944         switch (discov->state) {
945         case DISCOVERY_FINDING:
946         case DISCOVERY_RESOLVING:
947                 return true;
948
949         default:
950                 return false;
951         }
952 }
953
954 void hci_discovery_set_state(struct hci_dev *hdev, int state)
955 {
956         int old_state = hdev->discovery.state;
957
958         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
959
960         if (old_state == state)
961                 return;
962
963         hdev->discovery.state = state;
964
965         switch (state) {
966         case DISCOVERY_STOPPED:
967                 hci_update_background_scan(hdev);
968
969                 if (old_state != DISCOVERY_STARTING)
970                         mgmt_discovering(hdev, 0);
971                 break;
972         case DISCOVERY_STARTING:
973                 break;
974         case DISCOVERY_FINDING:
975                 mgmt_discovering(hdev, 1);
976                 break;
977         case DISCOVERY_RESOLVING:
978                 break;
979         case DISCOVERY_STOPPING:
980                 break;
981         }
982 }
983
984 void hci_inquiry_cache_flush(struct hci_dev *hdev)
985 {
986         struct discovery_state *cache = &hdev->discovery;
987         struct inquiry_entry *p, *n;
988
989         list_for_each_entry_safe(p, n, &cache->all, all) {
990                 list_del(&p->all);
991                 kfree(p);
992         }
993
994         INIT_LIST_HEAD(&cache->unknown);
995         INIT_LIST_HEAD(&cache->resolve);
996 }
997
998 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
999                                                bdaddr_t *bdaddr)
1000 {
1001         struct discovery_state *cache = &hdev->discovery;
1002         struct inquiry_entry *e;
1003
1004         BT_DBG("cache %p, %pMR", cache, bdaddr);
1005
1006         list_for_each_entry(e, &cache->all, all) {
1007                 if (!bacmp(&e->data.bdaddr, bdaddr))
1008                         return e;
1009         }
1010
1011         return NULL;
1012 }
1013
1014 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1015                                                        bdaddr_t *bdaddr)
1016 {
1017         struct discovery_state *cache = &hdev->discovery;
1018         struct inquiry_entry *e;
1019
1020         BT_DBG("cache %p, %pMR", cache, bdaddr);
1021
1022         list_for_each_entry(e, &cache->unknown, list) {
1023                 if (!bacmp(&e->data.bdaddr, bdaddr))
1024                         return e;
1025         }
1026
1027         return NULL;
1028 }
1029
1030 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1031                                                        bdaddr_t *bdaddr,
1032                                                        int state)
1033 {
1034         struct discovery_state *cache = &hdev->discovery;
1035         struct inquiry_entry *e;
1036
1037         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1038
1039         list_for_each_entry(e, &cache->resolve, list) {
1040                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1041                         return e;
1042                 if (!bacmp(&e->data.bdaddr, bdaddr))
1043                         return e;
1044         }
1045
1046         return NULL;
1047 }
1048
1049 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1050                                       struct inquiry_entry *ie)
1051 {
1052         struct discovery_state *cache = &hdev->discovery;
1053         struct list_head *pos = &cache->resolve;
1054         struct inquiry_entry *p;
1055
1056         list_del(&ie->list);
1057
1058         list_for_each_entry(p, &cache->resolve, list) {
1059                 if (p->name_state != NAME_PENDING &&
1060                     abs(p->data.rssi) >= abs(ie->data.rssi))
1061                         break;
1062                 pos = &p->list;
1063         }
1064
1065         list_add(&ie->list, pos);
1066 }
1067
1068 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1069                              bool name_known)
1070 {
1071         struct discovery_state *cache = &hdev->discovery;
1072         struct inquiry_entry *ie;
1073         u32 flags = 0;
1074
1075         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1076
1077         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1078
1079         if (!data->ssp_mode)
1080                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1081
1082         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1083         if (ie) {
1084                 if (!ie->data.ssp_mode)
1085                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1086
1087                 if (ie->name_state == NAME_NEEDED &&
1088                     data->rssi != ie->data.rssi) {
1089                         ie->data.rssi = data->rssi;
1090                         hci_inquiry_cache_update_resolve(hdev, ie);
1091                 }
1092
1093                 goto update;
1094         }
1095
1096         /* Entry not in the cache. Add new one. */
1097         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1098         if (!ie) {
1099                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1100                 goto done;
1101         }
1102
1103         list_add(&ie->all, &cache->all);
1104
1105         if (name_known) {
1106                 ie->name_state = NAME_KNOWN;
1107         } else {
1108                 ie->name_state = NAME_NOT_KNOWN;
1109                 list_add(&ie->list, &cache->unknown);
1110         }
1111
1112 update:
1113         if (name_known && ie->name_state != NAME_KNOWN &&
1114             ie->name_state != NAME_PENDING) {
1115                 ie->name_state = NAME_KNOWN;
1116                 list_del(&ie->list);
1117         }
1118
1119         memcpy(&ie->data, data, sizeof(*data));
1120         ie->timestamp = jiffies;
1121         cache->timestamp = jiffies;
1122
1123         if (ie->name_state == NAME_NOT_KNOWN)
1124                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1125
1126 done:
1127         return flags;
1128 }
1129
1130 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1131 {
1132         struct discovery_state *cache = &hdev->discovery;
1133         struct inquiry_info *info = (struct inquiry_info *) buf;
1134         struct inquiry_entry *e;
1135         int copied = 0;
1136
1137         list_for_each_entry(e, &cache->all, all) {
1138                 struct inquiry_data *data = &e->data;
1139
1140                 if (copied >= num)
1141                         break;
1142
1143                 bacpy(&info->bdaddr, &data->bdaddr);
1144                 info->pscan_rep_mode    = data->pscan_rep_mode;
1145                 info->pscan_period_mode = data->pscan_period_mode;
1146                 info->pscan_mode        = data->pscan_mode;
1147                 memcpy(info->dev_class, data->dev_class, 3);
1148                 info->clock_offset      = data->clock_offset;
1149
1150                 info++;
1151                 copied++;
1152         }
1153
1154         BT_DBG("cache %p, copied %d", cache, copied);
1155         return copied;
1156 }
1157
1158 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1159 {
1160         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1161         struct hci_dev *hdev = req->hdev;
1162         struct hci_cp_inquiry cp;
1163
1164         BT_DBG("%s", hdev->name);
1165
1166         if (test_bit(HCI_INQUIRY, &hdev->flags))
1167                 return 0;
1168
1169         /* Start Inquiry */
1170         memcpy(&cp.lap, &ir->lap, 3);
1171         cp.length  = ir->length;
1172         cp.num_rsp = ir->num_rsp;
1173         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1174
1175         return 0;
1176 }
1177
1178 int hci_inquiry(void __user *arg)
1179 {
1180         __u8 __user *ptr = arg;
1181         struct hci_inquiry_req ir;
1182         struct hci_dev *hdev;
1183         int err = 0, do_inquiry = 0, max_rsp;
1184         long timeo;
1185         __u8 *buf;
1186
1187         if (copy_from_user(&ir, ptr, sizeof(ir)))
1188                 return -EFAULT;
1189
1190         hdev = hci_dev_get(ir.dev_id);
1191         if (!hdev)
1192                 return -ENODEV;
1193
1194         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1195                 err = -EBUSY;
1196                 goto done;
1197         }
1198
1199         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1200                 err = -EOPNOTSUPP;
1201                 goto done;
1202         }
1203
1204         if (hdev->dev_type != HCI_BREDR) {
1205                 err = -EOPNOTSUPP;
1206                 goto done;
1207         }
1208
1209         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1210                 err = -EOPNOTSUPP;
1211                 goto done;
1212         }
1213
1214         hci_dev_lock(hdev);
1215         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1216             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1217                 hci_inquiry_cache_flush(hdev);
1218                 do_inquiry = 1;
1219         }
1220         hci_dev_unlock(hdev);
1221
1222         timeo = ir.length * msecs_to_jiffies(2000);
1223
1224         if (do_inquiry) {
1225                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1226                                    timeo, NULL);
1227                 if (err < 0)
1228                         goto done;
1229
1230                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1231                  * cleared). If it is interrupted by a signal, return -EINTR.
1232                  */
1233                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1234                                 TASK_INTERRUPTIBLE))
1235                         return -EINTR;
1236         }
1237
1238         /* for unlimited number of responses we will use buffer with
1239          * 255 entries
1240          */
1241         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1242
1243         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1244          * copy it to the user space.
1245          */
1246         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1247         if (!buf) {
1248                 err = -ENOMEM;
1249                 goto done;
1250         }
1251
1252         hci_dev_lock(hdev);
1253         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1254         hci_dev_unlock(hdev);
1255
1256         BT_DBG("num_rsp %d", ir.num_rsp);
1257
1258         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1259                 ptr += sizeof(ir);
1260                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1261                                  ir.num_rsp))
1262                         err = -EFAULT;
1263         } else
1264                 err = -EFAULT;
1265
1266         kfree(buf);
1267
1268 done:
1269         hci_dev_put(hdev);
1270         return err;
1271 }
1272
1273 static int hci_dev_do_open(struct hci_dev *hdev)
1274 {
1275         int ret = 0;
1276
1277         BT_DBG("%s %p", hdev->name, hdev);
1278
1279         hci_req_sync_lock(hdev);
1280
1281         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1282                 ret = -ENODEV;
1283                 goto done;
1284         }
1285
1286         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1287             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1288                 /* Check for rfkill but allow the HCI setup stage to
1289                  * proceed (which in itself doesn't cause any RF activity).
1290                  */
1291                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1292                         ret = -ERFKILL;
1293                         goto done;
1294                 }
1295
1296                 /* Check for valid public address or a configured static
1297                  * random adddress, but let the HCI setup proceed to
1298                  * be able to determine if there is a public address
1299                  * or not.
1300                  *
1301                  * In case of user channel usage, it is not important
1302                  * if a public address or static random address is
1303                  * available.
1304                  *
1305                  * This check is only valid for BR/EDR controllers
1306                  * since AMP controllers do not have an address.
1307                  */
1308                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1309                     hdev->dev_type == HCI_BREDR &&
1310                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1311                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1312                         ret = -EADDRNOTAVAIL;
1313                         goto done;
1314                 }
1315         }
1316
1317         if (test_bit(HCI_UP, &hdev->flags)) {
1318                 ret = -EALREADY;
1319                 goto done;
1320         }
1321
1322         if (hdev->open(hdev)) {
1323                 ret = -EIO;
1324                 goto done;
1325         }
1326
1327         set_bit(HCI_RUNNING, &hdev->flags);
1328         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1329
1330         atomic_set(&hdev->cmd_cnt, 1);
1331         set_bit(HCI_INIT, &hdev->flags);
1332
1333         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1334                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1335
1336                 if (hdev->setup)
1337                         ret = hdev->setup(hdev);
1338
1339                 /* The transport driver can set these quirks before
1340                  * creating the HCI device or in its setup callback.
1341                  *
1342                  * In case any of them is set, the controller has to
1343                  * start up as unconfigured.
1344                  */
1345                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1346                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1347                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1348
1349                 /* For an unconfigured controller it is required to
1350                  * read at least the version information provided by
1351                  * the Read Local Version Information command.
1352                  *
1353                  * If the set_bdaddr driver callback is provided, then
1354                  * also the original Bluetooth public device address
1355                  * will be read using the Read BD Address command.
1356                  */
1357                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1358                         ret = __hci_unconf_init(hdev);
1359         }
1360
1361         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1362                 /* If public address change is configured, ensure that
1363                  * the address gets programmed. If the driver does not
1364                  * support changing the public address, fail the power
1365                  * on procedure.
1366                  */
1367                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1368                     hdev->set_bdaddr)
1369                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1370                 else
1371                         ret = -EADDRNOTAVAIL;
1372         }
1373
1374         if (!ret) {
1375                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1376                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1377                         ret = __hci_init(hdev);
1378                         if (!ret && hdev->post_init)
1379                                 ret = hdev->post_init(hdev);
1380                 }
1381         }
1382
1383         /* If the HCI Reset command is clearing all diagnostic settings,
1384          * then they need to be reprogrammed after the init procedure
1385          * completed.
1386          */
1387         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1388             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1389                 ret = hdev->set_diag(hdev, true);
1390
1391         clear_bit(HCI_INIT, &hdev->flags);
1392
1393         if (!ret) {
1394                 hci_dev_hold(hdev);
1395                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1396                 set_bit(HCI_UP, &hdev->flags);
1397                 hci_sock_dev_event(hdev, HCI_DEV_UP);
1398                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1399                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1400                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1401                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1402                     hci_dev_test_flag(hdev, HCI_MGMT) &&
1403                     hdev->dev_type == HCI_BREDR) {
1404                         ret = __hci_req_hci_power_on(hdev);
1405                         mgmt_power_on(hdev, ret);
1406                 }
1407         } else {
1408                 /* Init failed, cleanup */
1409                 flush_work(&hdev->tx_work);
1410                 flush_work(&hdev->cmd_work);
1411                 flush_work(&hdev->rx_work);
1412
1413                 skb_queue_purge(&hdev->cmd_q);
1414                 skb_queue_purge(&hdev->rx_q);
1415
1416                 if (hdev->flush)
1417                         hdev->flush(hdev);
1418
1419                 if (hdev->sent_cmd) {
1420                         kfree_skb(hdev->sent_cmd);
1421                         hdev->sent_cmd = NULL;
1422                 }
1423
1424                 clear_bit(HCI_RUNNING, &hdev->flags);
1425                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1426
1427                 hdev->close(hdev);
1428                 hdev->flags &= BIT(HCI_RAW);
1429         }
1430
1431 done:
1432         hci_req_sync_unlock(hdev);
1433         return ret;
1434 }
1435
1436 /* ---- HCI ioctl helpers ---- */
1437
1438 int hci_dev_open(__u16 dev)
1439 {
1440         struct hci_dev *hdev;
1441         int err;
1442
1443         hdev = hci_dev_get(dev);
1444         if (!hdev)
1445                 return -ENODEV;
1446
1447         /* Devices that are marked as unconfigured can only be powered
1448          * up as user channel. Trying to bring them up as normal devices
1449          * will result into a failure. Only user channel operation is
1450          * possible.
1451          *
1452          * When this function is called for a user channel, the flag
1453          * HCI_USER_CHANNEL will be set first before attempting to
1454          * open the device.
1455          */
1456         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1457             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1458                 err = -EOPNOTSUPP;
1459                 goto done;
1460         }
1461
1462         /* We need to ensure that no other power on/off work is pending
1463          * before proceeding to call hci_dev_do_open. This is
1464          * particularly important if the setup procedure has not yet
1465          * completed.
1466          */
1467         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1468                 cancel_delayed_work(&hdev->power_off);
1469
1470         /* After this call it is guaranteed that the setup procedure
1471          * has finished. This means that error conditions like RFKILL
1472          * or no valid public or static random address apply.
1473          */
1474         flush_workqueue(hdev->req_workqueue);
1475
1476         /* For controllers not using the management interface and that
1477          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1478          * so that pairing works for them. Once the management interface
1479          * is in use this bit will be cleared again and userspace has
1480          * to explicitly enable it.
1481          */
1482         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1483             !hci_dev_test_flag(hdev, HCI_MGMT))
1484                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1485
1486         err = hci_dev_do_open(hdev);
1487
1488 done:
1489         hci_dev_put(hdev);
1490         return err;
1491 }
1492
1493 /* This function requires the caller holds hdev->lock */
1494 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1495 {
1496         struct hci_conn_params *p;
1497
1498         list_for_each_entry(p, &hdev->le_conn_params, list) {
1499                 if (p->conn) {
1500                         hci_conn_drop(p->conn);
1501                         hci_conn_put(p->conn);
1502                         p->conn = NULL;
1503                 }
1504                 list_del_init(&p->action);
1505         }
1506
1507         BT_DBG("All LE pending actions cleared");
1508 }
1509
1510 int hci_dev_do_close(struct hci_dev *hdev)
1511 {
1512         bool auto_off;
1513
1514         BT_DBG("%s %p", hdev->name, hdev);
1515
1516         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1517             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1518             test_bit(HCI_UP, &hdev->flags)) {
1519                 /* Execute vendor specific shutdown routine */
1520                 if (hdev->shutdown)
1521                         hdev->shutdown(hdev);
1522         }
1523
1524         cancel_delayed_work(&hdev->power_off);
1525
1526         hci_request_cancel_all(hdev);
1527         hci_req_sync_lock(hdev);
1528
1529         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1530                 cancel_delayed_work_sync(&hdev->cmd_timer);
1531                 hci_req_sync_unlock(hdev);
1532                 return 0;
1533         }
1534
1535         /* Flush RX and TX works */
1536         flush_work(&hdev->tx_work);
1537         flush_work(&hdev->rx_work);
1538
1539         if (hdev->discov_timeout > 0) {
1540                 hdev->discov_timeout = 0;
1541                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1542                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1543         }
1544
1545         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1546                 cancel_delayed_work(&hdev->service_cache);
1547
1548         if (hci_dev_test_flag(hdev, HCI_MGMT))
1549                 cancel_delayed_work_sync(&hdev->rpa_expired);
1550
1551         /* Avoid potential lockdep warnings from the *_flush() calls by
1552          * ensuring the workqueue is empty up front.
1553          */
1554         drain_workqueue(hdev->workqueue);
1555
1556         hci_dev_lock(hdev);
1557
1558         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1559
1560         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1561
1562         if (!auto_off && hdev->dev_type == HCI_BREDR &&
1563             hci_dev_test_flag(hdev, HCI_MGMT))
1564                 __mgmt_power_off(hdev);
1565
1566         hci_inquiry_cache_flush(hdev);
1567         hci_pend_le_actions_clear(hdev);
1568         hci_conn_hash_flush(hdev);
1569         hci_dev_unlock(hdev);
1570
1571         smp_unregister(hdev);
1572
1573         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1574
1575         if (hdev->flush)
1576                 hdev->flush(hdev);
1577
1578         /* Reset device */
1579         skb_queue_purge(&hdev->cmd_q);
1580         atomic_set(&hdev->cmd_cnt, 1);
1581         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1582             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1583                 set_bit(HCI_INIT, &hdev->flags);
1584                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1585                 clear_bit(HCI_INIT, &hdev->flags);
1586         }
1587
1588         /* flush cmd  work */
1589         flush_work(&hdev->cmd_work);
1590
1591         /* Drop queues */
1592         skb_queue_purge(&hdev->rx_q);
1593         skb_queue_purge(&hdev->cmd_q);
1594         skb_queue_purge(&hdev->raw_q);
1595
1596         /* Drop last sent command */
1597         if (hdev->sent_cmd) {
1598                 cancel_delayed_work_sync(&hdev->cmd_timer);
1599                 kfree_skb(hdev->sent_cmd);
1600                 hdev->sent_cmd = NULL;
1601         }
1602
1603         clear_bit(HCI_RUNNING, &hdev->flags);
1604         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1605
1606         /* After this point our queues are empty
1607          * and no tasks are scheduled. */
1608         hdev->close(hdev);
1609
1610         /* Clear flags */
1611         hdev->flags &= BIT(HCI_RAW);
1612         hci_dev_clear_volatile_flags(hdev);
1613
1614         /* Controller radio is available but is currently powered down */
1615         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1616
1617         memset(hdev->eir, 0, sizeof(hdev->eir));
1618         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1619         bacpy(&hdev->random_addr, BDADDR_ANY);
1620
1621         hci_req_sync_unlock(hdev);
1622
1623         hci_dev_put(hdev);
1624         return 0;
1625 }
1626
1627 int hci_dev_close(__u16 dev)
1628 {
1629         struct hci_dev *hdev;
1630         int err;
1631
1632         hdev = hci_dev_get(dev);
1633         if (!hdev)
1634                 return -ENODEV;
1635
1636         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1637                 err = -EBUSY;
1638                 goto done;
1639         }
1640
1641         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1642                 cancel_delayed_work(&hdev->power_off);
1643
1644         err = hci_dev_do_close(hdev);
1645
1646 done:
1647         hci_dev_put(hdev);
1648         return err;
1649 }
1650
1651 static int hci_dev_do_reset(struct hci_dev *hdev)
1652 {
1653         int ret;
1654
1655         BT_DBG("%s %p", hdev->name, hdev);
1656
1657         hci_req_sync_lock(hdev);
1658
1659         /* Drop queues */
1660         skb_queue_purge(&hdev->rx_q);
1661         skb_queue_purge(&hdev->cmd_q);
1662
1663         /* Avoid potential lockdep warnings from the *_flush() calls by
1664          * ensuring the workqueue is empty up front.
1665          */
1666         drain_workqueue(hdev->workqueue);
1667
1668         hci_dev_lock(hdev);
1669         hci_inquiry_cache_flush(hdev);
1670         hci_conn_hash_flush(hdev);
1671         hci_dev_unlock(hdev);
1672
1673         if (hdev->flush)
1674                 hdev->flush(hdev);
1675
1676         atomic_set(&hdev->cmd_cnt, 1);
1677         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1678
1679         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1680
1681         hci_req_sync_unlock(hdev);
1682         return ret;
1683 }
1684
1685 int hci_dev_reset(__u16 dev)
1686 {
1687         struct hci_dev *hdev;
1688         int err;
1689
1690         hdev = hci_dev_get(dev);
1691         if (!hdev)
1692                 return -ENODEV;
1693
1694         if (!test_bit(HCI_UP, &hdev->flags)) {
1695                 err = -ENETDOWN;
1696                 goto done;
1697         }
1698
1699         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1700                 err = -EBUSY;
1701                 goto done;
1702         }
1703
1704         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1705                 err = -EOPNOTSUPP;
1706                 goto done;
1707         }
1708
1709         err = hci_dev_do_reset(hdev);
1710
1711 done:
1712         hci_dev_put(hdev);
1713         return err;
1714 }
1715
1716 int hci_dev_reset_stat(__u16 dev)
1717 {
1718         struct hci_dev *hdev;
1719         int ret = 0;
1720
1721         hdev = hci_dev_get(dev);
1722         if (!hdev)
1723                 return -ENODEV;
1724
1725         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1726                 ret = -EBUSY;
1727                 goto done;
1728         }
1729
1730         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1731                 ret = -EOPNOTSUPP;
1732                 goto done;
1733         }
1734
1735         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1736
1737 done:
1738         hci_dev_put(hdev);
1739         return ret;
1740 }
1741
1742 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1743 {
1744         bool conn_changed, discov_changed;
1745
1746         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1747
1748         if ((scan & SCAN_PAGE))
1749                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1750                                                           HCI_CONNECTABLE);
1751         else
1752                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1753                                                            HCI_CONNECTABLE);
1754
1755         if ((scan & SCAN_INQUIRY)) {
1756                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1757                                                             HCI_DISCOVERABLE);
1758         } else {
1759                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1760                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1761                                                              HCI_DISCOVERABLE);
1762         }
1763
1764         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1765                 return;
1766
1767         if (conn_changed || discov_changed) {
1768                 /* In case this was disabled through mgmt */
1769                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1770
1771                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1772                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1773
1774                 mgmt_new_settings(hdev);
1775         }
1776 }
1777
1778 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1779 {
1780         struct hci_dev *hdev;
1781         struct hci_dev_req dr;
1782         int err = 0;
1783
1784         if (copy_from_user(&dr, arg, sizeof(dr)))
1785                 return -EFAULT;
1786
1787         hdev = hci_dev_get(dr.dev_id);
1788         if (!hdev)
1789                 return -ENODEV;
1790
1791         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1792                 err = -EBUSY;
1793                 goto done;
1794         }
1795
1796         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1797                 err = -EOPNOTSUPP;
1798                 goto done;
1799         }
1800
1801         if (hdev->dev_type != HCI_BREDR) {
1802                 err = -EOPNOTSUPP;
1803                 goto done;
1804         }
1805
1806         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1807                 err = -EOPNOTSUPP;
1808                 goto done;
1809         }
1810
1811         switch (cmd) {
1812         case HCISETAUTH:
1813                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1814                                    HCI_INIT_TIMEOUT, NULL);
1815                 break;
1816
1817         case HCISETENCRYPT:
1818                 if (!lmp_encrypt_capable(hdev)) {
1819                         err = -EOPNOTSUPP;
1820                         break;
1821                 }
1822
1823                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1824                         /* Auth must be enabled first */
1825                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1826                                            HCI_INIT_TIMEOUT, NULL);
1827                         if (err)
1828                                 break;
1829                 }
1830
1831                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1832                                    HCI_INIT_TIMEOUT, NULL);
1833                 break;
1834
1835         case HCISETSCAN:
1836                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1837                                    HCI_INIT_TIMEOUT, NULL);
1838
1839                 /* Ensure that the connectable and discoverable states
1840                  * get correctly modified as this was a non-mgmt change.
1841                  */
1842                 if (!err)
1843                         hci_update_scan_state(hdev, dr.dev_opt);
1844                 break;
1845
1846         case HCISETLINKPOL:
1847                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1848                                    HCI_INIT_TIMEOUT, NULL);
1849                 break;
1850
1851         case HCISETLINKMODE:
1852                 hdev->link_mode = ((__u16) dr.dev_opt) &
1853                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1854                 break;
1855
1856         case HCISETPTYPE:
1857                 hdev->pkt_type = (__u16) dr.dev_opt;
1858                 break;
1859
1860         case HCISETACLMTU:
1861                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1862                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1863                 break;
1864
1865         case HCISETSCOMTU:
1866                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1867                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1868                 break;
1869
1870         default:
1871                 err = -EINVAL;
1872                 break;
1873         }
1874
1875 done:
1876         hci_dev_put(hdev);
1877         return err;
1878 }
1879
1880 int hci_get_dev_list(void __user *arg)
1881 {
1882         struct hci_dev *hdev;
1883         struct hci_dev_list_req *dl;
1884         struct hci_dev_req *dr;
1885         int n = 0, size, err;
1886         __u16 dev_num;
1887
1888         if (get_user(dev_num, (__u16 __user *) arg))
1889                 return -EFAULT;
1890
1891         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1892                 return -EINVAL;
1893
1894         size = sizeof(*dl) + dev_num * sizeof(*dr);
1895
1896         dl = kzalloc(size, GFP_KERNEL);
1897         if (!dl)
1898                 return -ENOMEM;
1899
1900         dr = dl->dev_req;
1901
1902         read_lock(&hci_dev_list_lock);
1903         list_for_each_entry(hdev, &hci_dev_list, list) {
1904                 unsigned long flags = hdev->flags;
1905
1906                 /* When the auto-off is configured it means the transport
1907                  * is running, but in that case still indicate that the
1908                  * device is actually down.
1909                  */
1910                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1911                         flags &= ~BIT(HCI_UP);
1912
1913                 (dr + n)->dev_id  = hdev->id;
1914                 (dr + n)->dev_opt = flags;
1915
1916                 if (++n >= dev_num)
1917                         break;
1918         }
1919         read_unlock(&hci_dev_list_lock);
1920
1921         dl->dev_num = n;
1922         size = sizeof(*dl) + n * sizeof(*dr);
1923
1924         err = copy_to_user(arg, dl, size);
1925         kfree(dl);
1926
1927         return err ? -EFAULT : 0;
1928 }
1929
1930 int hci_get_dev_info(void __user *arg)
1931 {
1932         struct hci_dev *hdev;
1933         struct hci_dev_info di;
1934         unsigned long flags;
1935         int err = 0;
1936
1937         if (copy_from_user(&di, arg, sizeof(di)))
1938                 return -EFAULT;
1939
1940         hdev = hci_dev_get(di.dev_id);
1941         if (!hdev)
1942                 return -ENODEV;
1943
1944         /* When the auto-off is configured it means the transport
1945          * is running, but in that case still indicate that the
1946          * device is actually down.
1947          */
1948         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1949                 flags = hdev->flags & ~BIT(HCI_UP);
1950         else
1951                 flags = hdev->flags;
1952
1953         strcpy(di.name, hdev->name);
1954         di.bdaddr   = hdev->bdaddr;
1955         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1956         di.flags    = flags;
1957         di.pkt_type = hdev->pkt_type;
1958         if (lmp_bredr_capable(hdev)) {
1959                 di.acl_mtu  = hdev->acl_mtu;
1960                 di.acl_pkts = hdev->acl_pkts;
1961                 di.sco_mtu  = hdev->sco_mtu;
1962                 di.sco_pkts = hdev->sco_pkts;
1963         } else {
1964                 di.acl_mtu  = hdev->le_mtu;
1965                 di.acl_pkts = hdev->le_pkts;
1966                 di.sco_mtu  = 0;
1967                 di.sco_pkts = 0;
1968         }
1969         di.link_policy = hdev->link_policy;
1970         di.link_mode   = hdev->link_mode;
1971
1972         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1973         memcpy(&di.features, &hdev->features, sizeof(di.features));
1974
1975         if (copy_to_user(arg, &di, sizeof(di)))
1976                 err = -EFAULT;
1977
1978         hci_dev_put(hdev);
1979
1980         return err;
1981 }
1982
1983 /* ---- Interface to HCI drivers ---- */
1984
1985 static int hci_rfkill_set_block(void *data, bool blocked)
1986 {
1987         struct hci_dev *hdev = data;
1988
1989         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1990
1991         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1992                 return -EBUSY;
1993
1994         if (blocked) {
1995                 hci_dev_set_flag(hdev, HCI_RFKILLED);
1996                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1997                     !hci_dev_test_flag(hdev, HCI_CONFIG))
1998                         hci_dev_do_close(hdev);
1999         } else {
2000                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2001         }
2002
2003         return 0;
2004 }
2005
2006 static const struct rfkill_ops hci_rfkill_ops = {
2007         .set_block = hci_rfkill_set_block,
2008 };
2009
2010 static void hci_power_on(struct work_struct *work)
2011 {
2012         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2013         int err;
2014
2015         BT_DBG("%s", hdev->name);
2016
2017         if (test_bit(HCI_UP, &hdev->flags) &&
2018             hci_dev_test_flag(hdev, HCI_MGMT) &&
2019             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2020                 hci_req_sync_lock(hdev);
2021                 err = __hci_req_hci_power_on(hdev);
2022                 hci_req_sync_unlock(hdev);
2023                 mgmt_power_on(hdev, err);
2024                 return;
2025         }
2026
2027         err = hci_dev_do_open(hdev);
2028         if (err < 0) {
2029                 hci_dev_lock(hdev);
2030                 mgmt_set_powered_failed(hdev, err);
2031                 hci_dev_unlock(hdev);
2032                 return;
2033         }
2034
2035         /* During the HCI setup phase, a few error conditions are
2036          * ignored and they need to be checked now. If they are still
2037          * valid, it is important to turn the device back off.
2038          */
2039         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2040             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2041             (hdev->dev_type == HCI_BREDR &&
2042              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2043              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2044                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2045                 hci_dev_do_close(hdev);
2046         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2047                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2048                                    HCI_AUTO_OFF_TIMEOUT);
2049         }
2050
2051         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2052                 /* For unconfigured devices, set the HCI_RAW flag
2053                  * so that userspace can easily identify them.
2054                  */
2055                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2056                         set_bit(HCI_RAW, &hdev->flags);
2057
2058                 /* For fully configured devices, this will send
2059                  * the Index Added event. For unconfigured devices,
2060                  * it will send Unconfigued Index Added event.
2061                  *
2062                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2063                  * and no event will be send.
2064                  */
2065                 mgmt_index_added(hdev);
2066         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2067                 /* When the controller is now configured, then it
2068                  * is important to clear the HCI_RAW flag.
2069                  */
2070                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2071                         clear_bit(HCI_RAW, &hdev->flags);
2072
2073                 /* Powering on the controller with HCI_CONFIG set only
2074                  * happens with the transition from unconfigured to
2075                  * configured. This will send the Index Added event.
2076                  */
2077                 mgmt_index_added(hdev);
2078         }
2079 }
2080
2081 static void hci_power_off(struct work_struct *work)
2082 {
2083         struct hci_dev *hdev = container_of(work, struct hci_dev,
2084                                             power_off.work);
2085
2086         BT_DBG("%s", hdev->name);
2087
2088         hci_dev_do_close(hdev);
2089 }
2090
2091 static void hci_error_reset(struct work_struct *work)
2092 {
2093         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2094
2095         BT_DBG("%s", hdev->name);
2096
2097         if (hdev->hw_error)
2098                 hdev->hw_error(hdev, hdev->hw_error_code);
2099         else
2100                 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2101                        hdev->hw_error_code);
2102
2103         if (hci_dev_do_close(hdev))
2104                 return;
2105
2106         hci_dev_do_open(hdev);
2107 }
2108
2109 void hci_uuids_clear(struct hci_dev *hdev)
2110 {
2111         struct bt_uuid *uuid, *tmp;
2112
2113         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2114                 list_del(&uuid->list);
2115                 kfree(uuid);
2116         }
2117 }
2118
2119 void hci_link_keys_clear(struct hci_dev *hdev)
2120 {
2121         struct link_key *key;
2122
2123         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2124                 list_del_rcu(&key->list);
2125                 kfree_rcu(key, rcu);
2126         }
2127 }
2128
2129 void hci_smp_ltks_clear(struct hci_dev *hdev)
2130 {
2131         struct smp_ltk *k;
2132
2133         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2134                 list_del_rcu(&k->list);
2135                 kfree_rcu(k, rcu);
2136         }
2137 }
2138
2139 void hci_smp_irks_clear(struct hci_dev *hdev)
2140 {
2141         struct smp_irk *k;
2142
2143         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2144                 list_del_rcu(&k->list);
2145                 kfree_rcu(k, rcu);
2146         }
2147 }
2148
2149 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2150 {
2151         struct link_key *k;
2152
2153         rcu_read_lock();
2154         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2155                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2156                         rcu_read_unlock();
2157                         return k;
2158                 }
2159         }
2160         rcu_read_unlock();
2161
2162         return NULL;
2163 }
2164
2165 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2166                                u8 key_type, u8 old_key_type)
2167 {
2168         /* Legacy key */
2169         if (key_type < 0x03)
2170                 return true;
2171
2172         /* Debug keys are insecure so don't store them persistently */
2173         if (key_type == HCI_LK_DEBUG_COMBINATION)
2174                 return false;
2175
2176         /* Changed combination key and there's no previous one */
2177         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2178                 return false;
2179
2180         /* Security mode 3 case */
2181         if (!conn)
2182                 return true;
2183
2184         /* BR/EDR key derived using SC from an LE link */
2185         if (conn->type == LE_LINK)
2186                 return true;
2187
2188         /* Neither local nor remote side had no-bonding as requirement */
2189         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2190                 return true;
2191
2192         /* Local side had dedicated bonding as requirement */
2193         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2194                 return true;
2195
2196         /* Remote side had dedicated bonding as requirement */
2197         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2198                 return true;
2199
2200         /* If none of the above criteria match, then don't store the key
2201          * persistently */
2202         return false;
2203 }
2204
2205 static u8 ltk_role(u8 type)
2206 {
2207         if (type == SMP_LTK)
2208                 return HCI_ROLE_MASTER;
2209
2210         return HCI_ROLE_SLAVE;
2211 }
2212
2213 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2214                              u8 addr_type, u8 role)
2215 {
2216         struct smp_ltk *k;
2217
2218         rcu_read_lock();
2219         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2220                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2221                         continue;
2222
2223                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2224                         rcu_read_unlock();
2225                         return k;
2226                 }
2227         }
2228         rcu_read_unlock();
2229
2230         return NULL;
2231 }
2232
2233 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2234 {
2235         struct smp_irk *irk;
2236
2237         rcu_read_lock();
2238         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2239                 if (!bacmp(&irk->rpa, rpa)) {
2240                         rcu_read_unlock();
2241                         return irk;
2242                 }
2243         }
2244
2245         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2246                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2247                         bacpy(&irk->rpa, rpa);
2248                         rcu_read_unlock();
2249                         return irk;
2250                 }
2251         }
2252         rcu_read_unlock();
2253
2254         return NULL;
2255 }
2256
2257 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2258                                      u8 addr_type)
2259 {
2260         struct smp_irk *irk;
2261
2262         /* Identity Address must be public or static random */
2263         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2264                 return NULL;
2265
2266         rcu_read_lock();
2267         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2268                 if (addr_type == irk->addr_type &&
2269                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2270                         rcu_read_unlock();
2271                         return irk;
2272                 }
2273         }
2274         rcu_read_unlock();
2275
2276         return NULL;
2277 }
2278
2279 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2280                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2281                                   u8 pin_len, bool *persistent)
2282 {
2283         struct link_key *key, *old_key;
2284         u8 old_key_type;
2285
2286         old_key = hci_find_link_key(hdev, bdaddr);
2287         if (old_key) {
2288                 old_key_type = old_key->type;
2289                 key = old_key;
2290         } else {
2291                 old_key_type = conn ? conn->key_type : 0xff;
2292                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2293                 if (!key)
2294                         return NULL;
2295                 list_add_rcu(&key->list, &hdev->link_keys);
2296         }
2297
2298         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2299
2300         /* Some buggy controller combinations generate a changed
2301          * combination key for legacy pairing even when there's no
2302          * previous key */
2303         if (type == HCI_LK_CHANGED_COMBINATION &&
2304             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2305                 type = HCI_LK_COMBINATION;
2306                 if (conn)
2307                         conn->key_type = type;
2308         }
2309
2310         bacpy(&key->bdaddr, bdaddr);
2311         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2312         key->pin_len = pin_len;
2313
2314         if (type == HCI_LK_CHANGED_COMBINATION)
2315                 key->type = old_key_type;
2316         else
2317                 key->type = type;
2318
2319         if (persistent)
2320                 *persistent = hci_persistent_key(hdev, conn, type,
2321                                                  old_key_type);
2322
2323         return key;
2324 }
2325
2326 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2327                             u8 addr_type, u8 type, u8 authenticated,
2328                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2329 {
2330         struct smp_ltk *key, *old_key;
2331         u8 role = ltk_role(type);
2332
2333         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2334         if (old_key)
2335                 key = old_key;
2336         else {
2337                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2338                 if (!key)
2339                         return NULL;
2340                 list_add_rcu(&key->list, &hdev->long_term_keys);
2341         }
2342
2343         bacpy(&key->bdaddr, bdaddr);
2344         key->bdaddr_type = addr_type;
2345         memcpy(key->val, tk, sizeof(key->val));
2346         key->authenticated = authenticated;
2347         key->ediv = ediv;
2348         key->rand = rand;
2349         key->enc_size = enc_size;
2350         key->type = type;
2351
2352         return key;
2353 }
2354
2355 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2356                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2357 {
2358         struct smp_irk *irk;
2359
2360         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2361         if (!irk) {
2362                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2363                 if (!irk)
2364                         return NULL;
2365
2366                 bacpy(&irk->bdaddr, bdaddr);
2367                 irk->addr_type = addr_type;
2368
2369                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2370         }
2371
2372         memcpy(irk->val, val, 16);
2373         bacpy(&irk->rpa, rpa);
2374
2375         return irk;
2376 }
2377
2378 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2379 {
2380         struct link_key *key;
2381
2382         key = hci_find_link_key(hdev, bdaddr);
2383         if (!key)
2384                 return -ENOENT;
2385
2386         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2387
2388         list_del_rcu(&key->list);
2389         kfree_rcu(key, rcu);
2390
2391         return 0;
2392 }
2393
2394 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2395 {
2396         struct smp_ltk *k;
2397         int removed = 0;
2398
2399         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2400                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2401                         continue;
2402
2403                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2404
2405                 list_del_rcu(&k->list);
2406                 kfree_rcu(k, rcu);
2407                 removed++;
2408         }
2409
2410         return removed ? 0 : -ENOENT;
2411 }
2412
2413 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2414 {
2415         struct smp_irk *k;
2416
2417         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2418                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2419                         continue;
2420
2421                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2422
2423                 list_del_rcu(&k->list);
2424                 kfree_rcu(k, rcu);
2425         }
2426 }
2427
2428 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2429 {
2430         struct smp_ltk *k;
2431         struct smp_irk *irk;
2432         u8 addr_type;
2433
2434         if (type == BDADDR_BREDR) {
2435                 if (hci_find_link_key(hdev, bdaddr))
2436                         return true;
2437                 return false;
2438         }
2439
2440         /* Convert to HCI addr type which struct smp_ltk uses */
2441         if (type == BDADDR_LE_PUBLIC)
2442                 addr_type = ADDR_LE_DEV_PUBLIC;
2443         else
2444                 addr_type = ADDR_LE_DEV_RANDOM;
2445
2446         irk = hci_get_irk(hdev, bdaddr, addr_type);
2447         if (irk) {
2448                 bdaddr = &irk->bdaddr;
2449                 addr_type = irk->addr_type;
2450         }
2451
2452         rcu_read_lock();
2453         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2454                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2455                         rcu_read_unlock();
2456                         return true;
2457                 }
2458         }
2459         rcu_read_unlock();
2460
2461         return false;
2462 }
2463
2464 /* HCI command timer function */
2465 static void hci_cmd_timeout(struct work_struct *work)
2466 {
2467         struct hci_dev *hdev = container_of(work, struct hci_dev,
2468                                             cmd_timer.work);
2469
2470         if (hdev->sent_cmd) {
2471                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2472                 u16 opcode = __le16_to_cpu(sent->opcode);
2473
2474                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2475         } else {
2476                 BT_ERR("%s command tx timeout", hdev->name);
2477         }
2478
2479         atomic_set(&hdev->cmd_cnt, 1);
2480         queue_work(hdev->workqueue, &hdev->cmd_work);
2481 }
2482
2483 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2484                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2485 {
2486         struct oob_data *data;
2487
2488         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2489                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2490                         continue;
2491                 if (data->bdaddr_type != bdaddr_type)
2492                         continue;
2493                 return data;
2494         }
2495
2496         return NULL;
2497 }
2498
2499 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2500                                u8 bdaddr_type)
2501 {
2502         struct oob_data *data;
2503
2504         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2505         if (!data)
2506                 return -ENOENT;
2507
2508         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2509
2510         list_del(&data->list);
2511         kfree(data);
2512
2513         return 0;
2514 }
2515
2516 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2517 {
2518         struct oob_data *data, *n;
2519
2520         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2521                 list_del(&data->list);
2522                 kfree(data);
2523         }
2524 }
2525
2526 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2527                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2528                             u8 *hash256, u8 *rand256)
2529 {
2530         struct oob_data *data;
2531
2532         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2533         if (!data) {
2534                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2535                 if (!data)
2536                         return -ENOMEM;
2537
2538                 bacpy(&data->bdaddr, bdaddr);
2539                 data->bdaddr_type = bdaddr_type;
2540                 list_add(&data->list, &hdev->remote_oob_data);
2541         }
2542
2543         if (hash192 && rand192) {
2544                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2545                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2546                 if (hash256 && rand256)
2547                         data->present = 0x03;
2548         } else {
2549                 memset(data->hash192, 0, sizeof(data->hash192));
2550                 memset(data->rand192, 0, sizeof(data->rand192));
2551                 if (hash256 && rand256)
2552                         data->present = 0x02;
2553                 else
2554                         data->present = 0x00;
2555         }
2556
2557         if (hash256 && rand256) {
2558                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2559                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2560         } else {
2561                 memset(data->hash256, 0, sizeof(data->hash256));
2562                 memset(data->rand256, 0, sizeof(data->rand256));
2563                 if (hash192 && rand192)
2564                         data->present = 0x01;
2565         }
2566
2567         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2568
2569         return 0;
2570 }
2571
2572 /* This function requires the caller holds hdev->lock */
2573 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2574 {
2575         struct adv_info *adv_instance;
2576
2577         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2578                 if (adv_instance->instance == instance)
2579                         return adv_instance;
2580         }
2581
2582         return NULL;
2583 }
2584
2585 /* This function requires the caller holds hdev->lock */
2586 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2587 {
2588         struct adv_info *cur_instance;
2589
2590         cur_instance = hci_find_adv_instance(hdev, instance);
2591         if (!cur_instance)
2592                 return NULL;
2593
2594         if (cur_instance == list_last_entry(&hdev->adv_instances,
2595                                             struct adv_info, list))
2596                 return list_first_entry(&hdev->adv_instances,
2597                                                  struct adv_info, list);
2598         else
2599                 return list_next_entry(cur_instance, list);
2600 }
2601
2602 /* This function requires the caller holds hdev->lock */
2603 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2604 {
2605         struct adv_info *adv_instance;
2606
2607         adv_instance = hci_find_adv_instance(hdev, instance);
2608         if (!adv_instance)
2609                 return -ENOENT;
2610
2611         BT_DBG("%s removing %dMR", hdev->name, instance);
2612
2613         if (hdev->cur_adv_instance == instance) {
2614                 if (hdev->adv_instance_timeout) {
2615                         cancel_delayed_work(&hdev->adv_instance_expire);
2616                         hdev->adv_instance_timeout = 0;
2617                 }
2618                 hdev->cur_adv_instance = 0x00;
2619         }
2620
2621         list_del(&adv_instance->list);
2622         kfree(adv_instance);
2623
2624         hdev->adv_instance_cnt--;
2625
2626         return 0;
2627 }
2628
2629 /* This function requires the caller holds hdev->lock */
2630 void hci_adv_instances_clear(struct hci_dev *hdev)
2631 {
2632         struct adv_info *adv_instance, *n;
2633
2634         if (hdev->adv_instance_timeout) {
2635                 cancel_delayed_work(&hdev->adv_instance_expire);
2636                 hdev->adv_instance_timeout = 0;
2637         }
2638
2639         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2640                 list_del(&adv_instance->list);
2641                 kfree(adv_instance);
2642         }
2643
2644         hdev->adv_instance_cnt = 0;
2645         hdev->cur_adv_instance = 0x00;
2646 }
2647
2648 /* This function requires the caller holds hdev->lock */
2649 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2650                          u16 adv_data_len, u8 *adv_data,
2651                          u16 scan_rsp_len, u8 *scan_rsp_data,
2652                          u16 timeout, u16 duration)
2653 {
2654         struct adv_info *adv_instance;
2655
2656         adv_instance = hci_find_adv_instance(hdev, instance);
2657         if (adv_instance) {
2658                 memset(adv_instance->adv_data, 0,
2659                        sizeof(adv_instance->adv_data));
2660                 memset(adv_instance->scan_rsp_data, 0,
2661                        sizeof(adv_instance->scan_rsp_data));
2662         } else {
2663                 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2664                     instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2665                         return -EOVERFLOW;
2666
2667                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2668                 if (!adv_instance)
2669                         return -ENOMEM;
2670
2671                 adv_instance->pending = true;
2672                 adv_instance->instance = instance;
2673                 list_add(&adv_instance->list, &hdev->adv_instances);
2674                 hdev->adv_instance_cnt++;
2675         }
2676
2677         adv_instance->flags = flags;
2678         adv_instance->adv_data_len = adv_data_len;
2679         adv_instance->scan_rsp_len = scan_rsp_len;
2680
2681         if (adv_data_len)
2682                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2683
2684         if (scan_rsp_len)
2685                 memcpy(adv_instance->scan_rsp_data,
2686                        scan_rsp_data, scan_rsp_len);
2687
2688         adv_instance->timeout = timeout;
2689         adv_instance->remaining_time = timeout;
2690
2691         if (duration == 0)
2692                 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2693         else
2694                 adv_instance->duration = duration;
2695
2696         BT_DBG("%s for %dMR", hdev->name, instance);
2697
2698         return 0;
2699 }
2700
2701 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2702                                          bdaddr_t *bdaddr, u8 type)
2703 {
2704         struct bdaddr_list *b;
2705
2706         list_for_each_entry(b, bdaddr_list, list) {
2707                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2708                         return b;
2709         }
2710
2711         return NULL;
2712 }
2713
2714 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2715 {
2716         struct bdaddr_list *b, *n;
2717
2718         list_for_each_entry_safe(b, n, bdaddr_list, list) {
2719                 list_del(&b->list);
2720                 kfree(b);
2721         }
2722 }
2723
2724 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2725 {
2726         struct bdaddr_list *entry;
2727
2728         if (!bacmp(bdaddr, BDADDR_ANY))
2729                 return -EBADF;
2730
2731         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2732                 return -EEXIST;
2733
2734         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2735         if (!entry)
2736                 return -ENOMEM;
2737
2738         bacpy(&entry->bdaddr, bdaddr);
2739         entry->bdaddr_type = type;
2740
2741         list_add(&entry->list, list);
2742
2743         return 0;
2744 }
2745
2746 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2747 {
2748         struct bdaddr_list *entry;
2749
2750         if (!bacmp(bdaddr, BDADDR_ANY)) {
2751                 hci_bdaddr_list_clear(list);
2752                 return 0;
2753         }
2754
2755         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2756         if (!entry)
2757                 return -ENOENT;
2758
2759         list_del(&entry->list);
2760         kfree(entry);
2761
2762         return 0;
2763 }
2764
2765 /* This function requires the caller holds hdev->lock */
2766 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2767                                                bdaddr_t *addr, u8 addr_type)
2768 {
2769         struct hci_conn_params *params;
2770
2771         list_for_each_entry(params, &hdev->le_conn_params, list) {
2772                 if (bacmp(&params->addr, addr) == 0 &&
2773                     params->addr_type == addr_type) {
2774                         return params;
2775                 }
2776         }
2777
2778         return NULL;
2779 }
2780
2781 /* This function requires the caller holds hdev->lock */
2782 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2783                                                   bdaddr_t *addr, u8 addr_type)
2784 {
2785         struct hci_conn_params *param;
2786
2787         list_for_each_entry(param, list, action) {
2788                 if (bacmp(&param->addr, addr) == 0 &&
2789                     param->addr_type == addr_type)
2790                         return param;
2791         }
2792
2793         return NULL;
2794 }
2795
2796 /* This function requires the caller holds hdev->lock */
2797 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2798                                             bdaddr_t *addr, u8 addr_type)
2799 {
2800         struct hci_conn_params *params;
2801
2802         params = hci_conn_params_lookup(hdev, addr, addr_type);
2803         if (params)
2804                 return params;
2805
2806         params = kzalloc(sizeof(*params), GFP_KERNEL);
2807         if (!params) {
2808                 BT_ERR("Out of memory");
2809                 return NULL;
2810         }
2811
2812         bacpy(&params->addr, addr);
2813         params->addr_type = addr_type;
2814
2815         list_add(&params->list, &hdev->le_conn_params);
2816         INIT_LIST_HEAD(&params->action);
2817
2818         params->conn_min_interval = hdev->le_conn_min_interval;
2819         params->conn_max_interval = hdev->le_conn_max_interval;
2820         params->conn_latency = hdev->le_conn_latency;
2821         params->supervision_timeout = hdev->le_supv_timeout;
2822         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2823
2824         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2825
2826         return params;
2827 }
2828
2829 static void hci_conn_params_free(struct hci_conn_params *params)
2830 {
2831         if (params->conn) {
2832                 hci_conn_drop(params->conn);
2833                 hci_conn_put(params->conn);
2834         }
2835
2836         list_del(&params->action);
2837         list_del(&params->list);
2838         kfree(params);
2839 }
2840
2841 /* This function requires the caller holds hdev->lock */
2842 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2843 {
2844         struct hci_conn_params *params;
2845
2846         params = hci_conn_params_lookup(hdev, addr, addr_type);
2847         if (!params)
2848                 return;
2849
2850         hci_conn_params_free(params);
2851
2852         hci_update_background_scan(hdev);
2853
2854         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2855 }
2856
2857 /* This function requires the caller holds hdev->lock */
2858 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2859 {
2860         struct hci_conn_params *params, *tmp;
2861
2862         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2863                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2864                         continue;
2865
2866                 /* If trying to estabilish one time connection to disabled
2867                  * device, leave the params, but mark them as just once.
2868                  */
2869                 if (params->explicit_connect) {
2870                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2871                         continue;
2872                 }
2873
2874                 list_del(&params->list);
2875                 kfree(params);
2876         }
2877
2878         BT_DBG("All LE disabled connection parameters were removed");
2879 }
2880
2881 /* This function requires the caller holds hdev->lock */
2882 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2883 {
2884         struct hci_conn_params *params, *tmp;
2885
2886         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2887                 hci_conn_params_free(params);
2888
2889         BT_DBG("All LE connection parameters were removed");
2890 }
2891
2892 /* Copy the Identity Address of the controller.
2893  *
2894  * If the controller has a public BD_ADDR, then by default use that one.
2895  * If this is a LE only controller without a public address, default to
2896  * the static random address.
2897  *
2898  * For debugging purposes it is possible to force controllers with a
2899  * public address to use the static random address instead.
2900  *
2901  * In case BR/EDR has been disabled on a dual-mode controller and
2902  * userspace has configured a static address, then that address
2903  * becomes the identity address instead of the public BR/EDR address.
2904  */
2905 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2906                                u8 *bdaddr_type)
2907 {
2908         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2909             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2910             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2911              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2912                 bacpy(bdaddr, &hdev->static_addr);
2913                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2914         } else {
2915                 bacpy(bdaddr, &hdev->bdaddr);
2916                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2917         }
2918 }
2919
2920 /* Alloc HCI device */
2921 struct hci_dev *hci_alloc_dev(void)
2922 {
2923         struct hci_dev *hdev;
2924
2925         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2926         if (!hdev)
2927                 return NULL;
2928
2929         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2930         hdev->esco_type = (ESCO_HV1);
2931         hdev->link_mode = (HCI_LM_ACCEPT);
2932         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2933         hdev->io_capability = 0x03;     /* No Input No Output */
2934         hdev->manufacturer = 0xffff;    /* Default to internal use */
2935         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2936         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2937         hdev->adv_instance_cnt = 0;
2938         hdev->cur_adv_instance = 0x00;
2939         hdev->adv_instance_timeout = 0;
2940
2941         hdev->sniff_max_interval = 800;
2942         hdev->sniff_min_interval = 80;
2943
2944         hdev->le_adv_channel_map = 0x07;
2945         hdev->le_adv_min_interval = 0x0800;
2946         hdev->le_adv_max_interval = 0x0800;
2947         hdev->le_scan_interval = 0x0060;
2948         hdev->le_scan_window = 0x0030;
2949         hdev->le_conn_min_interval = 0x0028;
2950         hdev->le_conn_max_interval = 0x0038;
2951         hdev->le_conn_latency = 0x0000;
2952         hdev->le_supv_timeout = 0x002a;
2953         hdev->le_def_tx_len = 0x001b;
2954         hdev->le_def_tx_time = 0x0148;
2955         hdev->le_max_tx_len = 0x001b;
2956         hdev->le_max_tx_time = 0x0148;
2957         hdev->le_max_rx_len = 0x001b;
2958         hdev->le_max_rx_time = 0x0148;
2959
2960         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2961         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2962         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2963         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2964
2965         mutex_init(&hdev->lock);
2966         mutex_init(&hdev->req_lock);
2967
2968         INIT_LIST_HEAD(&hdev->mgmt_pending);
2969         INIT_LIST_HEAD(&hdev->blacklist);
2970         INIT_LIST_HEAD(&hdev->whitelist);
2971         INIT_LIST_HEAD(&hdev->uuids);
2972         INIT_LIST_HEAD(&hdev->link_keys);
2973         INIT_LIST_HEAD(&hdev->long_term_keys);
2974         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2975         INIT_LIST_HEAD(&hdev->remote_oob_data);
2976         INIT_LIST_HEAD(&hdev->le_white_list);
2977         INIT_LIST_HEAD(&hdev->le_conn_params);
2978         INIT_LIST_HEAD(&hdev->pend_le_conns);
2979         INIT_LIST_HEAD(&hdev->pend_le_reports);
2980         INIT_LIST_HEAD(&hdev->conn_hash.list);
2981         INIT_LIST_HEAD(&hdev->adv_instances);
2982
2983         INIT_WORK(&hdev->rx_work, hci_rx_work);
2984         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2985         INIT_WORK(&hdev->tx_work, hci_tx_work);
2986         INIT_WORK(&hdev->power_on, hci_power_on);
2987         INIT_WORK(&hdev->error_reset, hci_error_reset);
2988
2989         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2990
2991         skb_queue_head_init(&hdev->rx_q);
2992         skb_queue_head_init(&hdev->cmd_q);
2993         skb_queue_head_init(&hdev->raw_q);
2994
2995         init_waitqueue_head(&hdev->req_wait_q);
2996
2997         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2998
2999         hci_request_setup(hdev);
3000
3001         hci_init_sysfs(hdev);
3002         discovery_init(hdev);
3003
3004         return hdev;
3005 }
3006 EXPORT_SYMBOL(hci_alloc_dev);
3007
3008 /* Free HCI device */
3009 void hci_free_dev(struct hci_dev *hdev)
3010 {
3011         /* will free via device release */
3012         put_device(&hdev->dev);
3013 }
3014 EXPORT_SYMBOL(hci_free_dev);
3015
3016 /* Register HCI device */
3017 int hci_register_dev(struct hci_dev *hdev)
3018 {
3019         int id, error;
3020
3021         if (!hdev->open || !hdev->close || !hdev->send)
3022                 return -EINVAL;
3023
3024         /* Do not allow HCI_AMP devices to register at index 0,
3025          * so the index can be used as the AMP controller ID.
3026          */
3027         switch (hdev->dev_type) {
3028         case HCI_BREDR:
3029                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3030                 break;
3031         case HCI_AMP:
3032                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3033                 break;
3034         default:
3035                 return -EINVAL;
3036         }
3037
3038         if (id < 0)
3039                 return id;
3040
3041         sprintf(hdev->name, "hci%d", id);
3042         hdev->id = id;
3043
3044         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3045
3046         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3047                                           WQ_MEM_RECLAIM, 1, hdev->name);
3048         if (!hdev->workqueue) {
3049                 error = -ENOMEM;
3050                 goto err;
3051         }
3052
3053         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3054                                               WQ_MEM_RECLAIM, 1, hdev->name);
3055         if (!hdev->req_workqueue) {
3056                 destroy_workqueue(hdev->workqueue);
3057                 error = -ENOMEM;
3058                 goto err;
3059         }
3060
3061         if (!IS_ERR_OR_NULL(bt_debugfs))
3062                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3063
3064         dev_set_name(&hdev->dev, "%s", hdev->name);
3065
3066         error = device_add(&hdev->dev);
3067         if (error < 0)
3068                 goto err_wqueue;
3069
3070         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3071                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3072                                     hdev);
3073         if (hdev->rfkill) {
3074                 if (rfkill_register(hdev->rfkill) < 0) {
3075                         rfkill_destroy(hdev->rfkill);
3076                         hdev->rfkill = NULL;
3077                 }
3078         }
3079
3080         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3081                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3082
3083         hci_dev_set_flag(hdev, HCI_SETUP);
3084         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3085
3086         if (hdev->dev_type == HCI_BREDR) {
3087                 /* Assume BR/EDR support until proven otherwise (such as
3088                  * through reading supported features during init.
3089                  */
3090                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3091         }
3092
3093         write_lock(&hci_dev_list_lock);
3094         list_add(&hdev->list, &hci_dev_list);
3095         write_unlock(&hci_dev_list_lock);
3096
3097         /* Devices that are marked for raw-only usage are unconfigured
3098          * and should not be included in normal operation.
3099          */
3100         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3101                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3102
3103         hci_sock_dev_event(hdev, HCI_DEV_REG);
3104         hci_dev_hold(hdev);
3105
3106         queue_work(hdev->req_workqueue, &hdev->power_on);
3107
3108         return id;
3109
3110 err_wqueue:
3111         destroy_workqueue(hdev->workqueue);
3112         destroy_workqueue(hdev->req_workqueue);
3113 err:
3114         ida_simple_remove(&hci_index_ida, hdev->id);
3115
3116         return error;
3117 }
3118 EXPORT_SYMBOL(hci_register_dev);
3119
3120 /* Unregister HCI device */
3121 void hci_unregister_dev(struct hci_dev *hdev)
3122 {
3123         int id;
3124
3125         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3126
3127         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3128
3129         id = hdev->id;
3130
3131         write_lock(&hci_dev_list_lock);
3132         list_del(&hdev->list);
3133         write_unlock(&hci_dev_list_lock);
3134
3135         hci_dev_do_close(hdev);
3136
3137         cancel_work_sync(&hdev->power_on);
3138
3139         if (!test_bit(HCI_INIT, &hdev->flags) &&
3140             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3141             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3142                 hci_dev_lock(hdev);
3143                 mgmt_index_removed(hdev);
3144                 hci_dev_unlock(hdev);
3145         }
3146
3147         /* mgmt_index_removed should take care of emptying the
3148          * pending list */
3149         BUG_ON(!list_empty(&hdev->mgmt_pending));
3150
3151         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3152
3153         if (hdev->rfkill) {
3154                 rfkill_unregister(hdev->rfkill);
3155                 rfkill_destroy(hdev->rfkill);
3156         }
3157
3158         device_del(&hdev->dev);
3159
3160         debugfs_remove_recursive(hdev->debugfs);
3161
3162         destroy_workqueue(hdev->workqueue);
3163         destroy_workqueue(hdev->req_workqueue);
3164
3165         hci_dev_lock(hdev);
3166         hci_bdaddr_list_clear(&hdev->blacklist);
3167         hci_bdaddr_list_clear(&hdev->whitelist);
3168         hci_uuids_clear(hdev);
3169         hci_link_keys_clear(hdev);
3170         hci_smp_ltks_clear(hdev);
3171         hci_smp_irks_clear(hdev);
3172         hci_remote_oob_data_clear(hdev);
3173         hci_adv_instances_clear(hdev);
3174         hci_bdaddr_list_clear(&hdev->le_white_list);
3175         hci_conn_params_clear_all(hdev);
3176         hci_discovery_filter_clear(hdev);
3177         hci_dev_unlock(hdev);
3178
3179         hci_dev_put(hdev);
3180
3181         ida_simple_remove(&hci_index_ida, id);
3182 }
3183 EXPORT_SYMBOL(hci_unregister_dev);
3184
3185 /* Suspend HCI device */
3186 int hci_suspend_dev(struct hci_dev *hdev)
3187 {
3188         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3189         return 0;
3190 }
3191 EXPORT_SYMBOL(hci_suspend_dev);
3192
3193 /* Resume HCI device */
3194 int hci_resume_dev(struct hci_dev *hdev)
3195 {
3196         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3197         return 0;
3198 }
3199 EXPORT_SYMBOL(hci_resume_dev);
3200
3201 /* Reset HCI device */
3202 int hci_reset_dev(struct hci_dev *hdev)
3203 {
3204         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3205         struct sk_buff *skb;
3206
3207         skb = bt_skb_alloc(3, GFP_ATOMIC);
3208         if (!skb)
3209                 return -ENOMEM;
3210
3211         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3212         memcpy(skb_put(skb, 3), hw_err, 3);
3213
3214         /* Send Hardware Error to upper stack */
3215         return hci_recv_frame(hdev, skb);
3216 }
3217 EXPORT_SYMBOL(hci_reset_dev);
3218
3219 /* Receive frame from HCI drivers */
3220 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3221 {
3222         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3223                       && !test_bit(HCI_INIT, &hdev->flags))) {
3224                 kfree_skb(skb);
3225                 return -ENXIO;
3226         }
3227
3228         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3229             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3230             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
3231                 kfree_skb(skb);
3232                 return -EINVAL;
3233         }
3234
3235         /* Incoming skb */
3236         bt_cb(skb)->incoming = 1;
3237
3238         /* Time stamp */
3239         __net_timestamp(skb);
3240
3241         skb_queue_tail(&hdev->rx_q, skb);
3242         queue_work(hdev->workqueue, &hdev->rx_work);
3243
3244         return 0;
3245 }
3246 EXPORT_SYMBOL(hci_recv_frame);
3247
3248 /* Receive diagnostic message from HCI drivers */
3249 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3250 {
3251         /* Mark as diagnostic packet */
3252         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3253
3254         /* Time stamp */
3255         __net_timestamp(skb);
3256
3257         skb_queue_tail(&hdev->rx_q, skb);
3258         queue_work(hdev->workqueue, &hdev->rx_work);
3259
3260         return 0;
3261 }
3262 EXPORT_SYMBOL(hci_recv_diag);
3263
3264 /* ---- Interface to upper protocols ---- */
3265
3266 int hci_register_cb(struct hci_cb *cb)
3267 {
3268         BT_DBG("%p name %s", cb, cb->name);
3269
3270         mutex_lock(&hci_cb_list_lock);
3271         list_add_tail(&cb->list, &hci_cb_list);
3272         mutex_unlock(&hci_cb_list_lock);
3273
3274         return 0;
3275 }
3276 EXPORT_SYMBOL(hci_register_cb);
3277
3278 int hci_unregister_cb(struct hci_cb *cb)
3279 {
3280         BT_DBG("%p name %s", cb, cb->name);
3281
3282         mutex_lock(&hci_cb_list_lock);
3283         list_del(&cb->list);
3284         mutex_unlock(&hci_cb_list_lock);
3285
3286         return 0;
3287 }
3288 EXPORT_SYMBOL(hci_unregister_cb);
3289
3290 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3291 {
3292         int err;
3293
3294         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3295                skb->len);
3296
3297         /* Time stamp */
3298         __net_timestamp(skb);
3299
3300         /* Send copy to monitor */
3301         hci_send_to_monitor(hdev, skb);
3302
3303         if (atomic_read(&hdev->promisc)) {
3304                 /* Send copy to the sockets */
3305                 hci_send_to_sock(hdev, skb);
3306         }
3307
3308         /* Get rid of skb owner, prior to sending to the driver. */
3309         skb_orphan(skb);
3310
3311         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3312                 kfree_skb(skb);
3313                 return;
3314         }
3315
3316         err = hdev->send(hdev, skb);
3317         if (err < 0) {
3318                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3319                 kfree_skb(skb);
3320         }
3321 }
3322
3323 /* Send HCI command */
3324 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3325                  const void *param)
3326 {
3327         struct sk_buff *skb;
3328
3329         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3330
3331         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3332         if (!skb) {
3333                 BT_ERR("%s no memory for command", hdev->name);
3334                 return -ENOMEM;
3335         }
3336
3337         /* Stand-alone HCI commands must be flagged as
3338          * single-command requests.
3339          */
3340         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3341
3342         skb_queue_tail(&hdev->cmd_q, skb);
3343         queue_work(hdev->workqueue, &hdev->cmd_work);
3344
3345         return 0;
3346 }
3347
3348 /* Get data from the previously sent command */
3349 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3350 {
3351         struct hci_command_hdr *hdr;
3352
3353         if (!hdev->sent_cmd)
3354                 return NULL;
3355
3356         hdr = (void *) hdev->sent_cmd->data;
3357
3358         if (hdr->opcode != cpu_to_le16(opcode))
3359                 return NULL;
3360
3361         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3362
3363         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3364 }
3365
3366 /* Send HCI command and wait for command commplete event */
3367 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3368                              const void *param, u32 timeout)
3369 {
3370         struct sk_buff *skb;
3371
3372         if (!test_bit(HCI_UP, &hdev->flags))
3373                 return ERR_PTR(-ENETDOWN);
3374
3375         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3376
3377         hci_req_sync_lock(hdev);
3378         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3379         hci_req_sync_unlock(hdev);
3380
3381         return skb;
3382 }
3383 EXPORT_SYMBOL(hci_cmd_sync);
3384
3385 /* Send ACL data */
3386 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3387 {
3388         struct hci_acl_hdr *hdr;
3389         int len = skb->len;
3390
3391         skb_push(skb, HCI_ACL_HDR_SIZE);
3392         skb_reset_transport_header(skb);
3393         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3394         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3395         hdr->dlen   = cpu_to_le16(len);
3396 }
3397
3398 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3399                           struct sk_buff *skb, __u16 flags)
3400 {
3401         struct hci_conn *conn = chan->conn;
3402         struct hci_dev *hdev = conn->hdev;
3403         struct sk_buff *list;
3404
3405         skb->len = skb_headlen(skb);
3406         skb->data_len = 0;
3407
3408         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3409
3410         switch (hdev->dev_type) {
3411         case HCI_BREDR:
3412                 hci_add_acl_hdr(skb, conn->handle, flags);
3413                 break;
3414         case HCI_AMP:
3415                 hci_add_acl_hdr(skb, chan->handle, flags);
3416                 break;
3417         default:
3418                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3419                 return;
3420         }
3421
3422         list = skb_shinfo(skb)->frag_list;
3423         if (!list) {
3424                 /* Non fragmented */
3425                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3426
3427                 skb_queue_tail(queue, skb);
3428         } else {
3429                 /* Fragmented */
3430                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3431
3432                 skb_shinfo(skb)->frag_list = NULL;
3433
3434                 /* Queue all fragments atomically. We need to use spin_lock_bh
3435                  * here because of 6LoWPAN links, as there this function is
3436                  * called from softirq and using normal spin lock could cause
3437                  * deadlocks.
3438                  */
3439                 spin_lock_bh(&queue->lock);
3440
3441                 __skb_queue_tail(queue, skb);
3442
3443                 flags &= ~ACL_START;
3444                 flags |= ACL_CONT;
3445                 do {
3446                         skb = list; list = list->next;
3447
3448                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3449                         hci_add_acl_hdr(skb, conn->handle, flags);
3450
3451                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3452
3453                         __skb_queue_tail(queue, skb);
3454                 } while (list);
3455
3456                 spin_unlock_bh(&queue->lock);
3457         }
3458 }
3459
3460 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3461 {
3462         struct hci_dev *hdev = chan->conn->hdev;
3463
3464         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3465
3466         hci_queue_acl(chan, &chan->data_q, skb, flags);
3467
3468         queue_work(hdev->workqueue, &hdev->tx_work);
3469 }
3470
3471 /* Send SCO data */
3472 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3473 {
3474         struct hci_dev *hdev = conn->hdev;
3475         struct hci_sco_hdr hdr;
3476
3477         BT_DBG("%s len %d", hdev->name, skb->len);
3478
3479         hdr.handle = cpu_to_le16(conn->handle);
3480         hdr.dlen   = skb->len;
3481
3482         skb_push(skb, HCI_SCO_HDR_SIZE);
3483         skb_reset_transport_header(skb);
3484         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3485
3486         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3487
3488         skb_queue_tail(&conn->data_q, skb);
3489         queue_work(hdev->workqueue, &hdev->tx_work);
3490 }
3491
3492 /* ---- HCI TX task (outgoing data) ---- */
3493
3494 /* HCI Connection scheduler */
3495 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3496                                      int *quote)
3497 {
3498         struct hci_conn_hash *h = &hdev->conn_hash;
3499         struct hci_conn *conn = NULL, *c;
3500         unsigned int num = 0, min = ~0;
3501
3502         /* We don't have to lock device here. Connections are always
3503          * added and removed with TX task disabled. */
3504
3505         rcu_read_lock();
3506
3507         list_for_each_entry_rcu(c, &h->list, list) {
3508                 if (c->type != type || skb_queue_empty(&c->data_q))
3509                         continue;
3510
3511                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3512                         continue;
3513
3514                 num++;
3515
3516                 if (c->sent < min) {
3517                         min  = c->sent;
3518                         conn = c;
3519                 }
3520
3521                 if (hci_conn_num(hdev, type) == num)
3522                         break;
3523         }
3524
3525         rcu_read_unlock();
3526
3527         if (conn) {
3528                 int cnt, q;
3529
3530                 switch (conn->type) {
3531                 case ACL_LINK:
3532                         cnt = hdev->acl_cnt;
3533                         break;
3534                 case SCO_LINK:
3535                 case ESCO_LINK:
3536                         cnt = hdev->sco_cnt;
3537                         break;
3538                 case LE_LINK:
3539                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3540                         break;
3541                 default:
3542                         cnt = 0;
3543                         BT_ERR("Unknown link type");
3544                 }
3545
3546                 q = cnt / num;
3547                 *quote = q ? q : 1;
3548         } else
3549                 *quote = 0;
3550
3551         BT_DBG("conn %p quote %d", conn, *quote);
3552         return conn;
3553 }
3554
3555 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3556 {
3557         struct hci_conn_hash *h = &hdev->conn_hash;
3558         struct hci_conn *c;
3559
3560         BT_ERR("%s link tx timeout", hdev->name);
3561
3562         rcu_read_lock();
3563
3564         /* Kill stalled connections */
3565         list_for_each_entry_rcu(c, &h->list, list) {
3566                 if (c->type == type && c->sent) {
3567                         BT_ERR("%s killing stalled connection %pMR",
3568                                hdev->name, &c->dst);
3569                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3570                 }
3571         }
3572
3573         rcu_read_unlock();
3574 }
3575
3576 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3577                                       int *quote)
3578 {
3579         struct hci_conn_hash *h = &hdev->conn_hash;
3580         struct hci_chan *chan = NULL;
3581         unsigned int num = 0, min = ~0, cur_prio = 0;
3582         struct hci_conn *conn;
3583         int cnt, q, conn_num = 0;
3584
3585         BT_DBG("%s", hdev->name);
3586
3587         rcu_read_lock();
3588
3589         list_for_each_entry_rcu(conn, &h->list, list) {
3590                 struct hci_chan *tmp;
3591
3592                 if (conn->type != type)
3593                         continue;
3594
3595                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3596                         continue;
3597
3598                 conn_num++;
3599
3600                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3601                         struct sk_buff *skb;
3602
3603                         if (skb_queue_empty(&tmp->data_q))
3604                                 continue;
3605
3606                         skb = skb_peek(&tmp->data_q);
3607                         if (skb->priority < cur_prio)
3608                                 continue;
3609
3610                         if (skb->priority > cur_prio) {
3611                                 num = 0;
3612                                 min = ~0;
3613                                 cur_prio = skb->priority;
3614                         }
3615
3616                         num++;
3617
3618                         if (conn->sent < min) {
3619                                 min  = conn->sent;
3620                                 chan = tmp;
3621                         }
3622                 }
3623
3624                 if (hci_conn_num(hdev, type) == conn_num)
3625                         break;
3626         }
3627
3628         rcu_read_unlock();
3629
3630         if (!chan)
3631                 return NULL;
3632
3633         switch (chan->conn->type) {
3634         case ACL_LINK:
3635                 cnt = hdev->acl_cnt;
3636                 break;
3637         case AMP_LINK:
3638                 cnt = hdev->block_cnt;
3639                 break;
3640         case SCO_LINK:
3641         case ESCO_LINK:
3642                 cnt = hdev->sco_cnt;
3643                 break;
3644         case LE_LINK:
3645                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3646                 break;
3647         default:
3648                 cnt = 0;
3649                 BT_ERR("Unknown link type");
3650         }
3651
3652         q = cnt / num;
3653         *quote = q ? q : 1;
3654         BT_DBG("chan %p quote %d", chan, *quote);
3655         return chan;
3656 }
3657
3658 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3659 {
3660         struct hci_conn_hash *h = &hdev->conn_hash;
3661         struct hci_conn *conn;
3662         int num = 0;
3663
3664         BT_DBG("%s", hdev->name);
3665
3666         rcu_read_lock();
3667
3668         list_for_each_entry_rcu(conn, &h->list, list) {
3669                 struct hci_chan *chan;
3670
3671                 if (conn->type != type)
3672                         continue;
3673
3674                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3675                         continue;
3676
3677                 num++;
3678
3679                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3680                         struct sk_buff *skb;
3681
3682                         if (chan->sent) {
3683                                 chan->sent = 0;
3684                                 continue;
3685                         }
3686
3687                         if (skb_queue_empty(&chan->data_q))
3688                                 continue;
3689
3690                         skb = skb_peek(&chan->data_q);
3691                         if (skb->priority >= HCI_PRIO_MAX - 1)
3692                                 continue;
3693
3694                         skb->priority = HCI_PRIO_MAX - 1;
3695
3696                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3697                                skb->priority);
3698                 }
3699
3700                 if (hci_conn_num(hdev, type) == num)
3701                         break;
3702         }
3703
3704         rcu_read_unlock();
3705
3706 }
3707
3708 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3709 {
3710         /* Calculate count of blocks used by this packet */
3711         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3712 }
3713
3714 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3715 {
3716         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3717                 /* ACL tx timeout must be longer than maximum
3718                  * link supervision timeout (40.9 seconds) */
3719                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3720                                        HCI_ACL_TX_TIMEOUT))
3721                         hci_link_tx_to(hdev, ACL_LINK);
3722         }
3723 }
3724
3725 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3726 {
3727         unsigned int cnt = hdev->acl_cnt;
3728         struct hci_chan *chan;
3729         struct sk_buff *skb;
3730         int quote;
3731
3732         __check_timeout(hdev, cnt);
3733
3734         while (hdev->acl_cnt &&
3735                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3736                 u32 priority = (skb_peek(&chan->data_q))->priority;
3737                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3738                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3739                                skb->len, skb->priority);
3740
3741                         /* Stop if priority has changed */
3742                         if (skb->priority < priority)
3743                                 break;
3744
3745                         skb = skb_dequeue(&chan->data_q);
3746
3747                         hci_conn_enter_active_mode(chan->conn,
3748                                                    bt_cb(skb)->force_active);
3749
3750                         hci_send_frame(hdev, skb);
3751                         hdev->acl_last_tx = jiffies;
3752
3753                         hdev->acl_cnt--;
3754                         chan->sent++;
3755                         chan->conn->sent++;
3756                 }
3757         }
3758
3759         if (cnt != hdev->acl_cnt)
3760                 hci_prio_recalculate(hdev, ACL_LINK);
3761 }
3762
3763 static void hci_sched_acl_blk(struct hci_dev *hdev)
3764 {
3765         unsigned int cnt = hdev->block_cnt;
3766         struct hci_chan *chan;
3767         struct sk_buff *skb;
3768         int quote;
3769         u8 type;
3770
3771         __check_timeout(hdev, cnt);
3772
3773         BT_DBG("%s", hdev->name);
3774
3775         if (hdev->dev_type == HCI_AMP)
3776                 type = AMP_LINK;
3777         else
3778                 type = ACL_LINK;
3779
3780         while (hdev->block_cnt > 0 &&
3781                (chan = hci_chan_sent(hdev, type, &quote))) {
3782                 u32 priority = (skb_peek(&chan->data_q))->priority;
3783                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3784                         int blocks;
3785
3786                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3787                                skb->len, skb->priority);
3788
3789                         /* Stop if priority has changed */
3790                         if (skb->priority < priority)
3791                                 break;
3792
3793                         skb = skb_dequeue(&chan->data_q);
3794
3795                         blocks = __get_blocks(hdev, skb);
3796                         if (blocks > hdev->block_cnt)
3797                                 return;
3798
3799                         hci_conn_enter_active_mode(chan->conn,
3800                                                    bt_cb(skb)->force_active);
3801
3802                         hci_send_frame(hdev, skb);
3803                         hdev->acl_last_tx = jiffies;
3804
3805                         hdev->block_cnt -= blocks;
3806                         quote -= blocks;
3807
3808                         chan->sent += blocks;
3809                         chan->conn->sent += blocks;
3810                 }
3811         }
3812
3813         if (cnt != hdev->block_cnt)
3814                 hci_prio_recalculate(hdev, type);
3815 }
3816
3817 static void hci_sched_acl(struct hci_dev *hdev)
3818 {
3819         BT_DBG("%s", hdev->name);
3820
3821         /* No ACL link over BR/EDR controller */
3822         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3823                 return;
3824
3825         /* No AMP link over AMP controller */
3826         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3827                 return;
3828
3829         switch (hdev->flow_ctl_mode) {
3830         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3831                 hci_sched_acl_pkt(hdev);
3832                 break;
3833
3834         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3835                 hci_sched_acl_blk(hdev);
3836                 break;
3837         }
3838 }
3839
3840 /* Schedule SCO */
3841 static void hci_sched_sco(struct hci_dev *hdev)
3842 {
3843         struct hci_conn *conn;
3844         struct sk_buff *skb;
3845         int quote;
3846
3847         BT_DBG("%s", hdev->name);
3848
3849         if (!hci_conn_num(hdev, SCO_LINK))
3850                 return;
3851
3852         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3853                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3854                         BT_DBG("skb %p len %d", skb, skb->len);
3855                         hci_send_frame(hdev, skb);
3856
3857                         conn->sent++;
3858                         if (conn->sent == ~0)
3859                                 conn->sent = 0;
3860                 }
3861         }
3862 }
3863
3864 static void hci_sched_esco(struct hci_dev *hdev)
3865 {
3866         struct hci_conn *conn;
3867         struct sk_buff *skb;
3868         int quote;
3869
3870         BT_DBG("%s", hdev->name);
3871
3872         if (!hci_conn_num(hdev, ESCO_LINK))
3873                 return;
3874
3875         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3876                                                      &quote))) {
3877                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3878                         BT_DBG("skb %p len %d", skb, skb->len);
3879                         hci_send_frame(hdev, skb);
3880
3881                         conn->sent++;
3882                         if (conn->sent == ~0)
3883                                 conn->sent = 0;
3884                 }
3885         }
3886 }
3887
3888 static void hci_sched_le(struct hci_dev *hdev)
3889 {
3890         struct hci_chan *chan;
3891         struct sk_buff *skb;
3892         int quote, cnt, tmp;
3893
3894         BT_DBG("%s", hdev->name);
3895
3896         if (!hci_conn_num(hdev, LE_LINK))
3897                 return;
3898
3899         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3900                 /* LE tx timeout must be longer than maximum
3901                  * link supervision timeout (40.9 seconds) */
3902                 if (!hdev->le_cnt && hdev->le_pkts &&
3903                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3904                         hci_link_tx_to(hdev, LE_LINK);
3905         }
3906
3907         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3908         tmp = cnt;
3909         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3910                 u32 priority = (skb_peek(&chan->data_q))->priority;
3911                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3912                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3913                                skb->len, skb->priority);
3914
3915                         /* Stop if priority has changed */
3916                         if (skb->priority < priority)
3917                                 break;
3918
3919                         skb = skb_dequeue(&chan->data_q);
3920
3921                         hci_send_frame(hdev, skb);
3922                         hdev->le_last_tx = jiffies;
3923
3924                         cnt--;
3925                         chan->sent++;
3926                         chan->conn->sent++;
3927                 }
3928         }
3929
3930         if (hdev->le_pkts)
3931                 hdev->le_cnt = cnt;
3932         else
3933                 hdev->acl_cnt = cnt;
3934
3935         if (cnt != tmp)
3936                 hci_prio_recalculate(hdev, LE_LINK);
3937 }
3938
3939 static void hci_tx_work(struct work_struct *work)
3940 {
3941         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3942         struct sk_buff *skb;
3943
3944         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3945                hdev->sco_cnt, hdev->le_cnt);
3946
3947         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3948                 /* Schedule queues and send stuff to HCI driver */
3949                 hci_sched_acl(hdev);
3950                 hci_sched_sco(hdev);
3951                 hci_sched_esco(hdev);
3952                 hci_sched_le(hdev);
3953         }
3954
3955         /* Send next queued raw (unknown type) packet */
3956         while ((skb = skb_dequeue(&hdev->raw_q)))
3957                 hci_send_frame(hdev, skb);
3958 }
3959
3960 /* ----- HCI RX task (incoming data processing) ----- */
3961
3962 /* ACL data packet */
3963 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3964 {
3965         struct hci_acl_hdr *hdr = (void *) skb->data;
3966         struct hci_conn *conn;
3967         __u16 handle, flags;
3968
3969         skb_pull(skb, HCI_ACL_HDR_SIZE);
3970
3971         handle = __le16_to_cpu(hdr->handle);
3972         flags  = hci_flags(handle);
3973         handle = hci_handle(handle);
3974
3975         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3976                handle, flags);
3977
3978         hdev->stat.acl_rx++;
3979
3980         hci_dev_lock(hdev);
3981         conn = hci_conn_hash_lookup_handle(hdev, handle);
3982         hci_dev_unlock(hdev);
3983
3984         if (conn) {
3985                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3986
3987                 /* Send to upper protocol */
3988                 l2cap_recv_acldata(conn, skb, flags);
3989                 return;
3990         } else {
3991                 BT_ERR("%s ACL packet for unknown connection handle %d",
3992                        hdev->name, handle);
3993         }
3994
3995         kfree_skb(skb);
3996 }
3997
3998 /* SCO data packet */
3999 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4000 {
4001         struct hci_sco_hdr *hdr = (void *) skb->data;
4002         struct hci_conn *conn;
4003         __u16 handle;
4004
4005         skb_pull(skb, HCI_SCO_HDR_SIZE);
4006
4007         handle = __le16_to_cpu(hdr->handle);
4008
4009         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4010
4011         hdev->stat.sco_rx++;
4012
4013         hci_dev_lock(hdev);
4014         conn = hci_conn_hash_lookup_handle(hdev, handle);
4015         hci_dev_unlock(hdev);
4016
4017         if (conn) {
4018                 /* Send to upper protocol */
4019                 sco_recv_scodata(conn, skb);
4020                 return;
4021         } else {
4022                 BT_ERR("%s SCO packet for unknown connection handle %d",
4023                        hdev->name, handle);
4024         }
4025
4026         kfree_skb(skb);
4027 }
4028
4029 static bool hci_req_is_complete(struct hci_dev *hdev)
4030 {
4031         struct sk_buff *skb;
4032
4033         skb = skb_peek(&hdev->cmd_q);
4034         if (!skb)
4035                 return true;
4036
4037         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4038 }
4039
4040 static void hci_resend_last(struct hci_dev *hdev)
4041 {
4042         struct hci_command_hdr *sent;
4043         struct sk_buff *skb;
4044         u16 opcode;
4045
4046         if (!hdev->sent_cmd)
4047                 return;
4048
4049         sent = (void *) hdev->sent_cmd->data;
4050         opcode = __le16_to_cpu(sent->opcode);
4051         if (opcode == HCI_OP_RESET)
4052                 return;
4053
4054         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4055         if (!skb)
4056                 return;
4057
4058         skb_queue_head(&hdev->cmd_q, skb);
4059         queue_work(hdev->workqueue, &hdev->cmd_work);
4060 }
4061
4062 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4063                           hci_req_complete_t *req_complete,
4064                           hci_req_complete_skb_t *req_complete_skb)
4065 {
4066         struct sk_buff *skb;
4067         unsigned long flags;
4068
4069         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4070
4071         /* If the completed command doesn't match the last one that was
4072          * sent we need to do special handling of it.
4073          */
4074         if (!hci_sent_cmd_data(hdev, opcode)) {
4075                 /* Some CSR based controllers generate a spontaneous
4076                  * reset complete event during init and any pending
4077                  * command will never be completed. In such a case we
4078                  * need to resend whatever was the last sent
4079                  * command.
4080                  */
4081                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4082                         hci_resend_last(hdev);
4083
4084                 return;
4085         }
4086
4087         /* If the command succeeded and there's still more commands in
4088          * this request the request is not yet complete.
4089          */
4090         if (!status && !hci_req_is_complete(hdev))
4091                 return;
4092
4093         /* If this was the last command in a request the complete
4094          * callback would be found in hdev->sent_cmd instead of the
4095          * command queue (hdev->cmd_q).
4096          */
4097         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4098                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4099                 return;
4100         }
4101
4102         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4103                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4104                 return;
4105         }
4106
4107         /* Remove all pending commands belonging to this request */
4108         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4109         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4110                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4111                         __skb_queue_head(&hdev->cmd_q, skb);
4112                         break;
4113                 }
4114
4115                 *req_complete = bt_cb(skb)->hci.req_complete;
4116                 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4117                 kfree_skb(skb);
4118         }
4119         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4120 }
4121
4122 static void hci_rx_work(struct work_struct *work)
4123 {
4124         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4125         struct sk_buff *skb;
4126
4127         BT_DBG("%s", hdev->name);
4128
4129         while ((skb = skb_dequeue(&hdev->rx_q))) {
4130                 /* Send copy to monitor */
4131                 hci_send_to_monitor(hdev, skb);
4132
4133                 if (atomic_read(&hdev->promisc)) {
4134                         /* Send copy to the sockets */
4135                         hci_send_to_sock(hdev, skb);
4136                 }
4137
4138                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4139                         kfree_skb(skb);
4140                         continue;
4141                 }
4142
4143                 if (test_bit(HCI_INIT, &hdev->flags)) {
4144                         /* Don't process data packets in this states. */
4145                         switch (hci_skb_pkt_type(skb)) {
4146                         case HCI_ACLDATA_PKT:
4147                         case HCI_SCODATA_PKT:
4148                                 kfree_skb(skb);
4149                                 continue;
4150                         }
4151                 }
4152
4153                 /* Process frame */
4154                 switch (hci_skb_pkt_type(skb)) {
4155                 case HCI_EVENT_PKT:
4156                         BT_DBG("%s Event packet", hdev->name);
4157                         hci_event_packet(hdev, skb);
4158                         break;
4159
4160                 case HCI_ACLDATA_PKT:
4161                         BT_DBG("%s ACL data packet", hdev->name);
4162                         hci_acldata_packet(hdev, skb);
4163                         break;
4164
4165                 case HCI_SCODATA_PKT:
4166                         BT_DBG("%s SCO data packet", hdev->name);
4167                         hci_scodata_packet(hdev, skb);
4168                         break;
4169
4170                 default:
4171                         kfree_skb(skb);
4172                         break;
4173                 }
4174         }
4175 }
4176
4177 static void hci_cmd_work(struct work_struct *work)
4178 {
4179         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4180         struct sk_buff *skb;
4181
4182         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4183                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4184
4185         /* Send queued commands */
4186         if (atomic_read(&hdev->cmd_cnt)) {
4187                 skb = skb_dequeue(&hdev->cmd_q);
4188                 if (!skb)
4189                         return;
4190
4191                 kfree_skb(hdev->sent_cmd);
4192
4193                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4194                 if (hdev->sent_cmd) {
4195                         atomic_dec(&hdev->cmd_cnt);
4196                         hci_send_frame(hdev, skb);
4197                         if (test_bit(HCI_RESET, &hdev->flags))
4198                                 cancel_delayed_work(&hdev->cmd_timer);
4199                         else
4200                                 schedule_delayed_work(&hdev->cmd_timer,
4201                                                       HCI_CMD_TIMEOUT);
4202                 } else {
4203                         skb_queue_head(&hdev->cmd_q, skb);
4204                         queue_work(hdev->workqueue, &hdev->cmd_work);
4205                 }
4206         }
4207 }