Merge tag 'v4.6-rc1' into for-linus-4.6
[cascardo/linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43 #include "leds.h"
44
45 static void hci_rx_work(struct work_struct *work);
46 static void hci_cmd_work(struct work_struct *work);
47 static void hci_tx_work(struct work_struct *work);
48
49 /* HCI device list */
50 LIST_HEAD(hci_dev_list);
51 DEFINE_RWLOCK(hci_dev_list_lock);
52
53 /* HCI callback list */
54 LIST_HEAD(hci_cb_list);
55 DEFINE_MUTEX(hci_cb_list_lock);
56
57 /* HCI ID Numbering */
58 static DEFINE_IDA(hci_index_ida);
59
60 /* ---- HCI debugfs entries ---- */
61
62 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
63                              size_t count, loff_t *ppos)
64 {
65         struct hci_dev *hdev = file->private_data;
66         char buf[3];
67
68         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
69         buf[1] = '\n';
70         buf[2] = '\0';
71         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
72 }
73
74 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
75                               size_t count, loff_t *ppos)
76 {
77         struct hci_dev *hdev = file->private_data;
78         struct sk_buff *skb;
79         char buf[32];
80         size_t buf_size = min(count, (sizeof(buf)-1));
81         bool enable;
82
83         if (!test_bit(HCI_UP, &hdev->flags))
84                 return -ENETDOWN;
85
86         if (copy_from_user(buf, user_buf, buf_size))
87                 return -EFAULT;
88
89         buf[buf_size] = '\0';
90         if (strtobool(buf, &enable))
91                 return -EINVAL;
92
93         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
94                 return -EALREADY;
95
96         hci_req_sync_lock(hdev);
97         if (enable)
98                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99                                      HCI_CMD_TIMEOUT);
100         else
101                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102                                      HCI_CMD_TIMEOUT);
103         hci_req_sync_unlock(hdev);
104
105         if (IS_ERR(skb))
106                 return PTR_ERR(skb);
107
108         kfree_skb(skb);
109
110         hci_dev_change_flag(hdev, HCI_DUT_MODE);
111
112         return count;
113 }
114
115 static const struct file_operations dut_mode_fops = {
116         .open           = simple_open,
117         .read           = dut_mode_read,
118         .write          = dut_mode_write,
119         .llseek         = default_llseek,
120 };
121
122 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123                                 size_t count, loff_t *ppos)
124 {
125         struct hci_dev *hdev = file->private_data;
126         char buf[3];
127
128         buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
129         buf[1] = '\n';
130         buf[2] = '\0';
131         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
132 }
133
134 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135                                  size_t count, loff_t *ppos)
136 {
137         struct hci_dev *hdev = file->private_data;
138         char buf[32];
139         size_t buf_size = min(count, (sizeof(buf)-1));
140         bool enable;
141         int err;
142
143         if (copy_from_user(buf, user_buf, buf_size))
144                 return -EFAULT;
145
146         buf[buf_size] = '\0';
147         if (strtobool(buf, &enable))
148                 return -EINVAL;
149
150         /* When the diagnostic flags are not persistent and the transport
151          * is not active, then there is no need for the vendor callback.
152          *
153          * Instead just store the desired value. If needed the setting
154          * will be programmed when the controller gets powered on.
155          */
156         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
157             !test_bit(HCI_RUNNING, &hdev->flags))
158                 goto done;
159
160         hci_req_sync_lock(hdev);
161         err = hdev->set_diag(hdev, enable);
162         hci_req_sync_unlock(hdev);
163
164         if (err < 0)
165                 return err;
166
167 done:
168         if (enable)
169                 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
170         else
171                 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
172
173         return count;
174 }
175
176 static const struct file_operations vendor_diag_fops = {
177         .open           = simple_open,
178         .read           = vendor_diag_read,
179         .write          = vendor_diag_write,
180         .llseek         = default_llseek,
181 };
182
183 static void hci_debugfs_create_basic(struct hci_dev *hdev)
184 {
185         debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
186                             &dut_mode_fops);
187
188         if (hdev->set_diag)
189                 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
190                                     &vendor_diag_fops);
191 }
192
193 static int hci_reset_req(struct hci_request *req, unsigned long opt)
194 {
195         BT_DBG("%s %ld", req->hdev->name, opt);
196
197         /* Reset device */
198         set_bit(HCI_RESET, &req->hdev->flags);
199         hci_req_add(req, HCI_OP_RESET, 0, NULL);
200         return 0;
201 }
202
203 static void bredr_init(struct hci_request *req)
204 {
205         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
206
207         /* Read Local Supported Features */
208         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
209
210         /* Read Local Version */
211         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
212
213         /* Read BD Address */
214         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
215 }
216
217 static void amp_init1(struct hci_request *req)
218 {
219         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
220
221         /* Read Local Version */
222         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
223
224         /* Read Local Supported Commands */
225         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
226
227         /* Read Local AMP Info */
228         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
229
230         /* Read Data Blk size */
231         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
232
233         /* Read Flow Control Mode */
234         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
235
236         /* Read Location Data */
237         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
238 }
239
240 static int amp_init2(struct hci_request *req)
241 {
242         /* Read Local Supported Features. Not all AMP controllers
243          * support this so it's placed conditionally in the second
244          * stage init.
245          */
246         if (req->hdev->commands[14] & 0x20)
247                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
248
249         return 0;
250 }
251
252 static int hci_init1_req(struct hci_request *req, unsigned long opt)
253 {
254         struct hci_dev *hdev = req->hdev;
255
256         BT_DBG("%s %ld", hdev->name, opt);
257
258         /* Reset */
259         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
260                 hci_reset_req(req, 0);
261
262         switch (hdev->dev_type) {
263         case HCI_BREDR:
264                 bredr_init(req);
265                 break;
266
267         case HCI_AMP:
268                 amp_init1(req);
269                 break;
270
271         default:
272                 BT_ERR("Unknown device type %d", hdev->dev_type);
273                 break;
274         }
275
276         return 0;
277 }
278
279 static void bredr_setup(struct hci_request *req)
280 {
281         __le16 param;
282         __u8 flt_type;
283
284         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
285         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
286
287         /* Read Class of Device */
288         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
289
290         /* Read Local Name */
291         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
292
293         /* Read Voice Setting */
294         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
295
296         /* Read Number of Supported IAC */
297         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
298
299         /* Read Current IAC LAP */
300         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
301
302         /* Clear Event Filters */
303         flt_type = HCI_FLT_CLEAR_ALL;
304         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
305
306         /* Connection accept timeout ~20 secs */
307         param = cpu_to_le16(0x7d00);
308         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
309 }
310
311 static void le_setup(struct hci_request *req)
312 {
313         struct hci_dev *hdev = req->hdev;
314
315         /* Read LE Buffer Size */
316         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
317
318         /* Read LE Local Supported Features */
319         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
320
321         /* Read LE Supported States */
322         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
323
324         /* LE-only controllers have LE implicitly enabled */
325         if (!lmp_bredr_capable(hdev))
326                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
327 }
328
329 static void hci_setup_event_mask(struct hci_request *req)
330 {
331         struct hci_dev *hdev = req->hdev;
332
333         /* The second byte is 0xff instead of 0x9f (two reserved bits
334          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
335          * command otherwise.
336          */
337         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
338
339         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
340          * any event mask for pre 1.2 devices.
341          */
342         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
343                 return;
344
345         if (lmp_bredr_capable(hdev)) {
346                 events[4] |= 0x01; /* Flow Specification Complete */
347         } else {
348                 /* Use a different default for LE-only devices */
349                 memset(events, 0, sizeof(events));
350                 events[1] |= 0x20; /* Command Complete */
351                 events[1] |= 0x40; /* Command Status */
352                 events[1] |= 0x80; /* Hardware Error */
353
354                 /* If the controller supports the Disconnect command, enable
355                  * the corresponding event. In addition enable packet flow
356                  * control related events.
357                  */
358                 if (hdev->commands[0] & 0x20) {
359                         events[0] |= 0x10; /* Disconnection Complete */
360                         events[2] |= 0x04; /* Number of Completed Packets */
361                         events[3] |= 0x02; /* Data Buffer Overflow */
362                 }
363
364                 /* If the controller supports the Read Remote Version
365                  * Information command, enable the corresponding event.
366                  */
367                 if (hdev->commands[2] & 0x80)
368                         events[1] |= 0x08; /* Read Remote Version Information
369                                             * Complete
370                                             */
371
372                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
373                         events[0] |= 0x80; /* Encryption Change */
374                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
375                 }
376         }
377
378         if (lmp_inq_rssi_capable(hdev) ||
379             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
380                 events[4] |= 0x02; /* Inquiry Result with RSSI */
381
382         if (lmp_ext_feat_capable(hdev))
383                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
384
385         if (lmp_esco_capable(hdev)) {
386                 events[5] |= 0x08; /* Synchronous Connection Complete */
387                 events[5] |= 0x10; /* Synchronous Connection Changed */
388         }
389
390         if (lmp_sniffsubr_capable(hdev))
391                 events[5] |= 0x20; /* Sniff Subrating */
392
393         if (lmp_pause_enc_capable(hdev))
394                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
395
396         if (lmp_ext_inq_capable(hdev))
397                 events[5] |= 0x40; /* Extended Inquiry Result */
398
399         if (lmp_no_flush_capable(hdev))
400                 events[7] |= 0x01; /* Enhanced Flush Complete */
401
402         if (lmp_lsto_capable(hdev))
403                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
404
405         if (lmp_ssp_capable(hdev)) {
406                 events[6] |= 0x01;      /* IO Capability Request */
407                 events[6] |= 0x02;      /* IO Capability Response */
408                 events[6] |= 0x04;      /* User Confirmation Request */
409                 events[6] |= 0x08;      /* User Passkey Request */
410                 events[6] |= 0x10;      /* Remote OOB Data Request */
411                 events[6] |= 0x20;      /* Simple Pairing Complete */
412                 events[7] |= 0x04;      /* User Passkey Notification */
413                 events[7] |= 0x08;      /* Keypress Notification */
414                 events[7] |= 0x10;      /* Remote Host Supported
415                                          * Features Notification
416                                          */
417         }
418
419         if (lmp_le_capable(hdev))
420                 events[7] |= 0x20;      /* LE Meta-Event */
421
422         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
423 }
424
425 static int hci_init2_req(struct hci_request *req, unsigned long opt)
426 {
427         struct hci_dev *hdev = req->hdev;
428
429         if (hdev->dev_type == HCI_AMP)
430                 return amp_init2(req);
431
432         if (lmp_bredr_capable(hdev))
433                 bredr_setup(req);
434         else
435                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
436
437         if (lmp_le_capable(hdev))
438                 le_setup(req);
439
440         /* All Bluetooth 1.2 and later controllers should support the
441          * HCI command for reading the local supported commands.
442          *
443          * Unfortunately some controllers indicate Bluetooth 1.2 support,
444          * but do not have support for this command. If that is the case,
445          * the driver can quirk the behavior and skip reading the local
446          * supported commands.
447          */
448         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
449             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
450                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
451
452         if (lmp_ssp_capable(hdev)) {
453                 /* When SSP is available, then the host features page
454                  * should also be available as well. However some
455                  * controllers list the max_page as 0 as long as SSP
456                  * has not been enabled. To achieve proper debugging
457                  * output, force the minimum max_page to 1 at least.
458                  */
459                 hdev->max_page = 0x01;
460
461                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
462                         u8 mode = 0x01;
463
464                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
465                                     sizeof(mode), &mode);
466                 } else {
467                         struct hci_cp_write_eir cp;
468
469                         memset(hdev->eir, 0, sizeof(hdev->eir));
470                         memset(&cp, 0, sizeof(cp));
471
472                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
473                 }
474         }
475
476         if (lmp_inq_rssi_capable(hdev) ||
477             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
478                 u8 mode;
479
480                 /* If Extended Inquiry Result events are supported, then
481                  * they are clearly preferred over Inquiry Result with RSSI
482                  * events.
483                  */
484                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
485
486                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
487         }
488
489         if (lmp_inq_tx_pwr_capable(hdev))
490                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
491
492         if (lmp_ext_feat_capable(hdev)) {
493                 struct hci_cp_read_local_ext_features cp;
494
495                 cp.page = 0x01;
496                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
497                             sizeof(cp), &cp);
498         }
499
500         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
501                 u8 enable = 1;
502                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
503                             &enable);
504         }
505
506         return 0;
507 }
508
509 static void hci_setup_link_policy(struct hci_request *req)
510 {
511         struct hci_dev *hdev = req->hdev;
512         struct hci_cp_write_def_link_policy cp;
513         u16 link_policy = 0;
514
515         if (lmp_rswitch_capable(hdev))
516                 link_policy |= HCI_LP_RSWITCH;
517         if (lmp_hold_capable(hdev))
518                 link_policy |= HCI_LP_HOLD;
519         if (lmp_sniff_capable(hdev))
520                 link_policy |= HCI_LP_SNIFF;
521         if (lmp_park_capable(hdev))
522                 link_policy |= HCI_LP_PARK;
523
524         cp.policy = cpu_to_le16(link_policy);
525         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
526 }
527
528 static void hci_set_le_support(struct hci_request *req)
529 {
530         struct hci_dev *hdev = req->hdev;
531         struct hci_cp_write_le_host_supported cp;
532
533         /* LE-only devices do not support explicit enablement */
534         if (!lmp_bredr_capable(hdev))
535                 return;
536
537         memset(&cp, 0, sizeof(cp));
538
539         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
540                 cp.le = 0x01;
541                 cp.simul = 0x00;
542         }
543
544         if (cp.le != lmp_host_le_capable(hdev))
545                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
546                             &cp);
547 }
548
549 static void hci_set_event_mask_page_2(struct hci_request *req)
550 {
551         struct hci_dev *hdev = req->hdev;
552         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
553
554         /* If Connectionless Slave Broadcast master role is supported
555          * enable all necessary events for it.
556          */
557         if (lmp_csb_master_capable(hdev)) {
558                 events[1] |= 0x40;      /* Triggered Clock Capture */
559                 events[1] |= 0x80;      /* Synchronization Train Complete */
560                 events[2] |= 0x10;      /* Slave Page Response Timeout */
561                 events[2] |= 0x20;      /* CSB Channel Map Change */
562         }
563
564         /* If Connectionless Slave Broadcast slave role is supported
565          * enable all necessary events for it.
566          */
567         if (lmp_csb_slave_capable(hdev)) {
568                 events[2] |= 0x01;      /* Synchronization Train Received */
569                 events[2] |= 0x02;      /* CSB Receive */
570                 events[2] |= 0x04;      /* CSB Timeout */
571                 events[2] |= 0x08;      /* Truncated Page Complete */
572         }
573
574         /* Enable Authenticated Payload Timeout Expired event if supported */
575         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
576                 events[2] |= 0x80;
577
578         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
579 }
580
581 static int hci_init3_req(struct hci_request *req, unsigned long opt)
582 {
583         struct hci_dev *hdev = req->hdev;
584         u8 p;
585
586         hci_setup_event_mask(req);
587
588         if (hdev->commands[6] & 0x20 &&
589             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
590                 struct hci_cp_read_stored_link_key cp;
591
592                 bacpy(&cp.bdaddr, BDADDR_ANY);
593                 cp.read_all = 0x01;
594                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
595         }
596
597         if (hdev->commands[5] & 0x10)
598                 hci_setup_link_policy(req);
599
600         if (hdev->commands[8] & 0x01)
601                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
602
603         /* Some older Broadcom based Bluetooth 1.2 controllers do not
604          * support the Read Page Scan Type command. Check support for
605          * this command in the bit mask of supported commands.
606          */
607         if (hdev->commands[13] & 0x01)
608                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
609
610         if (lmp_le_capable(hdev)) {
611                 u8 events[8];
612
613                 memset(events, 0, sizeof(events));
614
615                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
616                         events[0] |= 0x10;      /* LE Long Term Key Request */
617
618                 /* If controller supports the Connection Parameters Request
619                  * Link Layer Procedure, enable the corresponding event.
620                  */
621                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
622                         events[0] |= 0x20;      /* LE Remote Connection
623                                                  * Parameter Request
624                                                  */
625
626                 /* If the controller supports the Data Length Extension
627                  * feature, enable the corresponding event.
628                  */
629                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
630                         events[0] |= 0x40;      /* LE Data Length Change */
631
632                 /* If the controller supports Extended Scanner Filter
633                  * Policies, enable the correspondig event.
634                  */
635                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
636                         events[1] |= 0x04;      /* LE Direct Advertising
637                                                  * Report
638                                                  */
639
640                 /* If the controller supports the LE Set Scan Enable command,
641                  * enable the corresponding advertising report event.
642                  */
643                 if (hdev->commands[26] & 0x08)
644                         events[0] |= 0x02;      /* LE Advertising Report */
645
646                 /* If the controller supports the LE Create Connection
647                  * command, enable the corresponding event.
648                  */
649                 if (hdev->commands[26] & 0x10)
650                         events[0] |= 0x01;      /* LE Connection Complete */
651
652                 /* If the controller supports the LE Connection Update
653                  * command, enable the corresponding event.
654                  */
655                 if (hdev->commands[27] & 0x04)
656                         events[0] |= 0x04;      /* LE Connection Update
657                                                  * Complete
658                                                  */
659
660                 /* If the controller supports the LE Read Remote Used Features
661                  * command, enable the corresponding event.
662                  */
663                 if (hdev->commands[27] & 0x20)
664                         events[0] |= 0x08;      /* LE Read Remote Used
665                                                  * Features Complete
666                                                  */
667
668                 /* If the controller supports the LE Read Local P-256
669                  * Public Key command, enable the corresponding event.
670                  */
671                 if (hdev->commands[34] & 0x02)
672                         events[0] |= 0x80;      /* LE Read Local P-256
673                                                  * Public Key Complete
674                                                  */
675
676                 /* If the controller supports the LE Generate DHKey
677                  * command, enable the corresponding event.
678                  */
679                 if (hdev->commands[34] & 0x04)
680                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
681
682                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
683                             events);
684
685                 if (hdev->commands[25] & 0x40) {
686                         /* Read LE Advertising Channel TX Power */
687                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
688                 }
689
690                 if (hdev->commands[26] & 0x40) {
691                         /* Read LE White List Size */
692                         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
693                                     0, NULL);
694                 }
695
696                 if (hdev->commands[26] & 0x80) {
697                         /* Clear LE White List */
698                         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
699                 }
700
701                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
702                         /* Read LE Maximum Data Length */
703                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
704
705                         /* Read LE Suggested Default Data Length */
706                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
707                 }
708
709                 hci_set_le_support(req);
710         }
711
712         /* Read features beyond page 1 if available */
713         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
714                 struct hci_cp_read_local_ext_features cp;
715
716                 cp.page = p;
717                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
718                             sizeof(cp), &cp);
719         }
720
721         return 0;
722 }
723
724 static int hci_init4_req(struct hci_request *req, unsigned long opt)
725 {
726         struct hci_dev *hdev = req->hdev;
727
728         /* Some Broadcom based Bluetooth controllers do not support the
729          * Delete Stored Link Key command. They are clearly indicating its
730          * absence in the bit mask of supported commands.
731          *
732          * Check the supported commands and only if the the command is marked
733          * as supported send it. If not supported assume that the controller
734          * does not have actual support for stored link keys which makes this
735          * command redundant anyway.
736          *
737          * Some controllers indicate that they support handling deleting
738          * stored link keys, but they don't. The quirk lets a driver
739          * just disable this command.
740          */
741         if (hdev->commands[6] & 0x80 &&
742             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
743                 struct hci_cp_delete_stored_link_key cp;
744
745                 bacpy(&cp.bdaddr, BDADDR_ANY);
746                 cp.delete_all = 0x01;
747                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
748                             sizeof(cp), &cp);
749         }
750
751         /* Set event mask page 2 if the HCI command for it is supported */
752         if (hdev->commands[22] & 0x04)
753                 hci_set_event_mask_page_2(req);
754
755         /* Read local codec list if the HCI command is supported */
756         if (hdev->commands[29] & 0x20)
757                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
758
759         /* Get MWS transport configuration if the HCI command is supported */
760         if (hdev->commands[30] & 0x08)
761                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
762
763         /* Check for Synchronization Train support */
764         if (lmp_sync_train_capable(hdev))
765                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
766
767         /* Enable Secure Connections if supported and configured */
768         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
769             bredr_sc_enabled(hdev)) {
770                 u8 support = 0x01;
771
772                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
773                             sizeof(support), &support);
774         }
775
776         return 0;
777 }
778
779 static int __hci_init(struct hci_dev *hdev)
780 {
781         int err;
782
783         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
784         if (err < 0)
785                 return err;
786
787         if (hci_dev_test_flag(hdev, HCI_SETUP))
788                 hci_debugfs_create_basic(hdev);
789
790         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
791         if (err < 0)
792                 return err;
793
794         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
795          * BR/EDR/LE type controllers. AMP controllers only need the
796          * first two stages of init.
797          */
798         if (hdev->dev_type != HCI_BREDR)
799                 return 0;
800
801         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
802         if (err < 0)
803                 return err;
804
805         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
806         if (err < 0)
807                 return err;
808
809         /* This function is only called when the controller is actually in
810          * configured state. When the controller is marked as unconfigured,
811          * this initialization procedure is not run.
812          *
813          * It means that it is possible that a controller runs through its
814          * setup phase and then discovers missing settings. If that is the
815          * case, then this function will not be called. It then will only
816          * be called during the config phase.
817          *
818          * So only when in setup phase or config phase, create the debugfs
819          * entries and register the SMP channels.
820          */
821         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
822             !hci_dev_test_flag(hdev, HCI_CONFIG))
823                 return 0;
824
825         hci_debugfs_create_common(hdev);
826
827         if (lmp_bredr_capable(hdev))
828                 hci_debugfs_create_bredr(hdev);
829
830         if (lmp_le_capable(hdev))
831                 hci_debugfs_create_le(hdev);
832
833         return 0;
834 }
835
836 static int hci_init0_req(struct hci_request *req, unsigned long opt)
837 {
838         struct hci_dev *hdev = req->hdev;
839
840         BT_DBG("%s %ld", hdev->name, opt);
841
842         /* Reset */
843         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
844                 hci_reset_req(req, 0);
845
846         /* Read Local Version */
847         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
848
849         /* Read BD Address */
850         if (hdev->set_bdaddr)
851                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
852
853         return 0;
854 }
855
856 static int __hci_unconf_init(struct hci_dev *hdev)
857 {
858         int err;
859
860         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
861                 return 0;
862
863         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
864         if (err < 0)
865                 return err;
866
867         if (hci_dev_test_flag(hdev, HCI_SETUP))
868                 hci_debugfs_create_basic(hdev);
869
870         return 0;
871 }
872
873 static int hci_scan_req(struct hci_request *req, unsigned long opt)
874 {
875         __u8 scan = opt;
876
877         BT_DBG("%s %x", req->hdev->name, scan);
878
879         /* Inquiry and Page scans */
880         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
881         return 0;
882 }
883
884 static int hci_auth_req(struct hci_request *req, unsigned long opt)
885 {
886         __u8 auth = opt;
887
888         BT_DBG("%s %x", req->hdev->name, auth);
889
890         /* Authentication */
891         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
892         return 0;
893 }
894
895 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
896 {
897         __u8 encrypt = opt;
898
899         BT_DBG("%s %x", req->hdev->name, encrypt);
900
901         /* Encryption */
902         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
903         return 0;
904 }
905
906 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
907 {
908         __le16 policy = cpu_to_le16(opt);
909
910         BT_DBG("%s %x", req->hdev->name, policy);
911
912         /* Default link policy */
913         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
914         return 0;
915 }
916
917 /* Get HCI device by index.
918  * Device is held on return. */
919 struct hci_dev *hci_dev_get(int index)
920 {
921         struct hci_dev *hdev = NULL, *d;
922
923         BT_DBG("%d", index);
924
925         if (index < 0)
926                 return NULL;
927
928         read_lock(&hci_dev_list_lock);
929         list_for_each_entry(d, &hci_dev_list, list) {
930                 if (d->id == index) {
931                         hdev = hci_dev_hold(d);
932                         break;
933                 }
934         }
935         read_unlock(&hci_dev_list_lock);
936         return hdev;
937 }
938
939 /* ---- Inquiry support ---- */
940
941 bool hci_discovery_active(struct hci_dev *hdev)
942 {
943         struct discovery_state *discov = &hdev->discovery;
944
945         switch (discov->state) {
946         case DISCOVERY_FINDING:
947         case DISCOVERY_RESOLVING:
948                 return true;
949
950         default:
951                 return false;
952         }
953 }
954
955 void hci_discovery_set_state(struct hci_dev *hdev, int state)
956 {
957         int old_state = hdev->discovery.state;
958
959         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
960
961         if (old_state == state)
962                 return;
963
964         hdev->discovery.state = state;
965
966         switch (state) {
967         case DISCOVERY_STOPPED:
968                 hci_update_background_scan(hdev);
969
970                 if (old_state != DISCOVERY_STARTING)
971                         mgmt_discovering(hdev, 0);
972                 break;
973         case DISCOVERY_STARTING:
974                 break;
975         case DISCOVERY_FINDING:
976                 mgmt_discovering(hdev, 1);
977                 break;
978         case DISCOVERY_RESOLVING:
979                 break;
980         case DISCOVERY_STOPPING:
981                 break;
982         }
983 }
984
985 void hci_inquiry_cache_flush(struct hci_dev *hdev)
986 {
987         struct discovery_state *cache = &hdev->discovery;
988         struct inquiry_entry *p, *n;
989
990         list_for_each_entry_safe(p, n, &cache->all, all) {
991                 list_del(&p->all);
992                 kfree(p);
993         }
994
995         INIT_LIST_HEAD(&cache->unknown);
996         INIT_LIST_HEAD(&cache->resolve);
997 }
998
999 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1000                                                bdaddr_t *bdaddr)
1001 {
1002         struct discovery_state *cache = &hdev->discovery;
1003         struct inquiry_entry *e;
1004
1005         BT_DBG("cache %p, %pMR", cache, bdaddr);
1006
1007         list_for_each_entry(e, &cache->all, all) {
1008                 if (!bacmp(&e->data.bdaddr, bdaddr))
1009                         return e;
1010         }
1011
1012         return NULL;
1013 }
1014
1015 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1016                                                        bdaddr_t *bdaddr)
1017 {
1018         struct discovery_state *cache = &hdev->discovery;
1019         struct inquiry_entry *e;
1020
1021         BT_DBG("cache %p, %pMR", cache, bdaddr);
1022
1023         list_for_each_entry(e, &cache->unknown, list) {
1024                 if (!bacmp(&e->data.bdaddr, bdaddr))
1025                         return e;
1026         }
1027
1028         return NULL;
1029 }
1030
1031 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1032                                                        bdaddr_t *bdaddr,
1033                                                        int state)
1034 {
1035         struct discovery_state *cache = &hdev->discovery;
1036         struct inquiry_entry *e;
1037
1038         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1039
1040         list_for_each_entry(e, &cache->resolve, list) {
1041                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1042                         return e;
1043                 if (!bacmp(&e->data.bdaddr, bdaddr))
1044                         return e;
1045         }
1046
1047         return NULL;
1048 }
1049
1050 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1051                                       struct inquiry_entry *ie)
1052 {
1053         struct discovery_state *cache = &hdev->discovery;
1054         struct list_head *pos = &cache->resolve;
1055         struct inquiry_entry *p;
1056
1057         list_del(&ie->list);
1058
1059         list_for_each_entry(p, &cache->resolve, list) {
1060                 if (p->name_state != NAME_PENDING &&
1061                     abs(p->data.rssi) >= abs(ie->data.rssi))
1062                         break;
1063                 pos = &p->list;
1064         }
1065
1066         list_add(&ie->list, pos);
1067 }
1068
1069 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1070                              bool name_known)
1071 {
1072         struct discovery_state *cache = &hdev->discovery;
1073         struct inquiry_entry *ie;
1074         u32 flags = 0;
1075
1076         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1077
1078         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1079
1080         if (!data->ssp_mode)
1081                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1082
1083         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1084         if (ie) {
1085                 if (!ie->data.ssp_mode)
1086                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1087
1088                 if (ie->name_state == NAME_NEEDED &&
1089                     data->rssi != ie->data.rssi) {
1090                         ie->data.rssi = data->rssi;
1091                         hci_inquiry_cache_update_resolve(hdev, ie);
1092                 }
1093
1094                 goto update;
1095         }
1096
1097         /* Entry not in the cache. Add new one. */
1098         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1099         if (!ie) {
1100                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1101                 goto done;
1102         }
1103
1104         list_add(&ie->all, &cache->all);
1105
1106         if (name_known) {
1107                 ie->name_state = NAME_KNOWN;
1108         } else {
1109                 ie->name_state = NAME_NOT_KNOWN;
1110                 list_add(&ie->list, &cache->unknown);
1111         }
1112
1113 update:
1114         if (name_known && ie->name_state != NAME_KNOWN &&
1115             ie->name_state != NAME_PENDING) {
1116                 ie->name_state = NAME_KNOWN;
1117                 list_del(&ie->list);
1118         }
1119
1120         memcpy(&ie->data, data, sizeof(*data));
1121         ie->timestamp = jiffies;
1122         cache->timestamp = jiffies;
1123
1124         if (ie->name_state == NAME_NOT_KNOWN)
1125                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1126
1127 done:
1128         return flags;
1129 }
1130
1131 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1132 {
1133         struct discovery_state *cache = &hdev->discovery;
1134         struct inquiry_info *info = (struct inquiry_info *) buf;
1135         struct inquiry_entry *e;
1136         int copied = 0;
1137
1138         list_for_each_entry(e, &cache->all, all) {
1139                 struct inquiry_data *data = &e->data;
1140
1141                 if (copied >= num)
1142                         break;
1143
1144                 bacpy(&info->bdaddr, &data->bdaddr);
1145                 info->pscan_rep_mode    = data->pscan_rep_mode;
1146                 info->pscan_period_mode = data->pscan_period_mode;
1147                 info->pscan_mode        = data->pscan_mode;
1148                 memcpy(info->dev_class, data->dev_class, 3);
1149                 info->clock_offset      = data->clock_offset;
1150
1151                 info++;
1152                 copied++;
1153         }
1154
1155         BT_DBG("cache %p, copied %d", cache, copied);
1156         return copied;
1157 }
1158
1159 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1160 {
1161         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1162         struct hci_dev *hdev = req->hdev;
1163         struct hci_cp_inquiry cp;
1164
1165         BT_DBG("%s", hdev->name);
1166
1167         if (test_bit(HCI_INQUIRY, &hdev->flags))
1168                 return 0;
1169
1170         /* Start Inquiry */
1171         memcpy(&cp.lap, &ir->lap, 3);
1172         cp.length  = ir->length;
1173         cp.num_rsp = ir->num_rsp;
1174         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1175
1176         return 0;
1177 }
1178
1179 int hci_inquiry(void __user *arg)
1180 {
1181         __u8 __user *ptr = arg;
1182         struct hci_inquiry_req ir;
1183         struct hci_dev *hdev;
1184         int err = 0, do_inquiry = 0, max_rsp;
1185         long timeo;
1186         __u8 *buf;
1187
1188         if (copy_from_user(&ir, ptr, sizeof(ir)))
1189                 return -EFAULT;
1190
1191         hdev = hci_dev_get(ir.dev_id);
1192         if (!hdev)
1193                 return -ENODEV;
1194
1195         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1196                 err = -EBUSY;
1197                 goto done;
1198         }
1199
1200         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1201                 err = -EOPNOTSUPP;
1202                 goto done;
1203         }
1204
1205         if (hdev->dev_type != HCI_BREDR) {
1206                 err = -EOPNOTSUPP;
1207                 goto done;
1208         }
1209
1210         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1211                 err = -EOPNOTSUPP;
1212                 goto done;
1213         }
1214
1215         hci_dev_lock(hdev);
1216         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1217             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1218                 hci_inquiry_cache_flush(hdev);
1219                 do_inquiry = 1;
1220         }
1221         hci_dev_unlock(hdev);
1222
1223         timeo = ir.length * msecs_to_jiffies(2000);
1224
1225         if (do_inquiry) {
1226                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1227                                    timeo, NULL);
1228                 if (err < 0)
1229                         goto done;
1230
1231                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1232                  * cleared). If it is interrupted by a signal, return -EINTR.
1233                  */
1234                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1235                                 TASK_INTERRUPTIBLE))
1236                         return -EINTR;
1237         }
1238
1239         /* for unlimited number of responses we will use buffer with
1240          * 255 entries
1241          */
1242         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1243
1244         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1245          * copy it to the user space.
1246          */
1247         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1248         if (!buf) {
1249                 err = -ENOMEM;
1250                 goto done;
1251         }
1252
1253         hci_dev_lock(hdev);
1254         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1255         hci_dev_unlock(hdev);
1256
1257         BT_DBG("num_rsp %d", ir.num_rsp);
1258
1259         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1260                 ptr += sizeof(ir);
1261                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1262                                  ir.num_rsp))
1263                         err = -EFAULT;
1264         } else
1265                 err = -EFAULT;
1266
1267         kfree(buf);
1268
1269 done:
1270         hci_dev_put(hdev);
1271         return err;
1272 }
1273
1274 static int hci_dev_do_open(struct hci_dev *hdev)
1275 {
1276         int ret = 0;
1277
1278         BT_DBG("%s %p", hdev->name, hdev);
1279
1280         hci_req_sync_lock(hdev);
1281
1282         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1283                 ret = -ENODEV;
1284                 goto done;
1285         }
1286
1287         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1288             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1289                 /* Check for rfkill but allow the HCI setup stage to
1290                  * proceed (which in itself doesn't cause any RF activity).
1291                  */
1292                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1293                         ret = -ERFKILL;
1294                         goto done;
1295                 }
1296
1297                 /* Check for valid public address or a configured static
1298                  * random adddress, but let the HCI setup proceed to
1299                  * be able to determine if there is a public address
1300                  * or not.
1301                  *
1302                  * In case of user channel usage, it is not important
1303                  * if a public address or static random address is
1304                  * available.
1305                  *
1306                  * This check is only valid for BR/EDR controllers
1307                  * since AMP controllers do not have an address.
1308                  */
1309                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1310                     hdev->dev_type == HCI_BREDR &&
1311                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1312                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1313                         ret = -EADDRNOTAVAIL;
1314                         goto done;
1315                 }
1316         }
1317
1318         if (test_bit(HCI_UP, &hdev->flags)) {
1319                 ret = -EALREADY;
1320                 goto done;
1321         }
1322
1323         if (hdev->open(hdev)) {
1324                 ret = -EIO;
1325                 goto done;
1326         }
1327
1328         set_bit(HCI_RUNNING, &hdev->flags);
1329         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1330
1331         atomic_set(&hdev->cmd_cnt, 1);
1332         set_bit(HCI_INIT, &hdev->flags);
1333
1334         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1335                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1336
1337                 if (hdev->setup)
1338                         ret = hdev->setup(hdev);
1339
1340                 /* The transport driver can set these quirks before
1341                  * creating the HCI device or in its setup callback.
1342                  *
1343                  * In case any of them is set, the controller has to
1344                  * start up as unconfigured.
1345                  */
1346                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1347                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1348                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1349
1350                 /* For an unconfigured controller it is required to
1351                  * read at least the version information provided by
1352                  * the Read Local Version Information command.
1353                  *
1354                  * If the set_bdaddr driver callback is provided, then
1355                  * also the original Bluetooth public device address
1356                  * will be read using the Read BD Address command.
1357                  */
1358                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1359                         ret = __hci_unconf_init(hdev);
1360         }
1361
1362         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1363                 /* If public address change is configured, ensure that
1364                  * the address gets programmed. If the driver does not
1365                  * support changing the public address, fail the power
1366                  * on procedure.
1367                  */
1368                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1369                     hdev->set_bdaddr)
1370                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1371                 else
1372                         ret = -EADDRNOTAVAIL;
1373         }
1374
1375         if (!ret) {
1376                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1377                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1378                         ret = __hci_init(hdev);
1379                         if (!ret && hdev->post_init)
1380                                 ret = hdev->post_init(hdev);
1381                 }
1382         }
1383
1384         /* If the HCI Reset command is clearing all diagnostic settings,
1385          * then they need to be reprogrammed after the init procedure
1386          * completed.
1387          */
1388         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1389             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1390                 ret = hdev->set_diag(hdev, true);
1391
1392         clear_bit(HCI_INIT, &hdev->flags);
1393
1394         if (!ret) {
1395                 hci_dev_hold(hdev);
1396                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1397                 set_bit(HCI_UP, &hdev->flags);
1398                 hci_sock_dev_event(hdev, HCI_DEV_UP);
1399                 hci_leds_update_powered(hdev, true);
1400                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1401                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1402                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1403                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1404                     hci_dev_test_flag(hdev, HCI_MGMT) &&
1405                     hdev->dev_type == HCI_BREDR) {
1406                         ret = __hci_req_hci_power_on(hdev);
1407                         mgmt_power_on(hdev, ret);
1408                 }
1409         } else {
1410                 /* Init failed, cleanup */
1411                 flush_work(&hdev->tx_work);
1412                 flush_work(&hdev->cmd_work);
1413                 flush_work(&hdev->rx_work);
1414
1415                 skb_queue_purge(&hdev->cmd_q);
1416                 skb_queue_purge(&hdev->rx_q);
1417
1418                 if (hdev->flush)
1419                         hdev->flush(hdev);
1420
1421                 if (hdev->sent_cmd) {
1422                         kfree_skb(hdev->sent_cmd);
1423                         hdev->sent_cmd = NULL;
1424                 }
1425
1426                 clear_bit(HCI_RUNNING, &hdev->flags);
1427                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1428
1429                 hdev->close(hdev);
1430                 hdev->flags &= BIT(HCI_RAW);
1431         }
1432
1433 done:
1434         hci_req_sync_unlock(hdev);
1435         return ret;
1436 }
1437
1438 /* ---- HCI ioctl helpers ---- */
1439
1440 int hci_dev_open(__u16 dev)
1441 {
1442         struct hci_dev *hdev;
1443         int err;
1444
1445         hdev = hci_dev_get(dev);
1446         if (!hdev)
1447                 return -ENODEV;
1448
1449         /* Devices that are marked as unconfigured can only be powered
1450          * up as user channel. Trying to bring them up as normal devices
1451          * will result into a failure. Only user channel operation is
1452          * possible.
1453          *
1454          * When this function is called for a user channel, the flag
1455          * HCI_USER_CHANNEL will be set first before attempting to
1456          * open the device.
1457          */
1458         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1459             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1460                 err = -EOPNOTSUPP;
1461                 goto done;
1462         }
1463
1464         /* We need to ensure that no other power on/off work is pending
1465          * before proceeding to call hci_dev_do_open. This is
1466          * particularly important if the setup procedure has not yet
1467          * completed.
1468          */
1469         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1470                 cancel_delayed_work(&hdev->power_off);
1471
1472         /* After this call it is guaranteed that the setup procedure
1473          * has finished. This means that error conditions like RFKILL
1474          * or no valid public or static random address apply.
1475          */
1476         flush_workqueue(hdev->req_workqueue);
1477
1478         /* For controllers not using the management interface and that
1479          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1480          * so that pairing works for them. Once the management interface
1481          * is in use this bit will be cleared again and userspace has
1482          * to explicitly enable it.
1483          */
1484         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1485             !hci_dev_test_flag(hdev, HCI_MGMT))
1486                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1487
1488         err = hci_dev_do_open(hdev);
1489
1490 done:
1491         hci_dev_put(hdev);
1492         return err;
1493 }
1494
1495 /* This function requires the caller holds hdev->lock */
1496 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1497 {
1498         struct hci_conn_params *p;
1499
1500         list_for_each_entry(p, &hdev->le_conn_params, list) {
1501                 if (p->conn) {
1502                         hci_conn_drop(p->conn);
1503                         hci_conn_put(p->conn);
1504                         p->conn = NULL;
1505                 }
1506                 list_del_init(&p->action);
1507         }
1508
1509         BT_DBG("All LE pending actions cleared");
1510 }
1511
1512 int hci_dev_do_close(struct hci_dev *hdev)
1513 {
1514         bool auto_off;
1515
1516         BT_DBG("%s %p", hdev->name, hdev);
1517
1518         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1519             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1520             test_bit(HCI_UP, &hdev->flags)) {
1521                 /* Execute vendor specific shutdown routine */
1522                 if (hdev->shutdown)
1523                         hdev->shutdown(hdev);
1524         }
1525
1526         cancel_delayed_work(&hdev->power_off);
1527
1528         hci_request_cancel_all(hdev);
1529         hci_req_sync_lock(hdev);
1530
1531         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1532                 cancel_delayed_work_sync(&hdev->cmd_timer);
1533                 hci_req_sync_unlock(hdev);
1534                 return 0;
1535         }
1536
1537         hci_leds_update_powered(hdev, false);
1538
1539         /* Flush RX and TX works */
1540         flush_work(&hdev->tx_work);
1541         flush_work(&hdev->rx_work);
1542
1543         if (hdev->discov_timeout > 0) {
1544                 hdev->discov_timeout = 0;
1545                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1546                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1547         }
1548
1549         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1550                 cancel_delayed_work(&hdev->service_cache);
1551
1552         if (hci_dev_test_flag(hdev, HCI_MGMT))
1553                 cancel_delayed_work_sync(&hdev->rpa_expired);
1554
1555         /* Avoid potential lockdep warnings from the *_flush() calls by
1556          * ensuring the workqueue is empty up front.
1557          */
1558         drain_workqueue(hdev->workqueue);
1559
1560         hci_dev_lock(hdev);
1561
1562         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1563
1564         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1565
1566         if (!auto_off && hdev->dev_type == HCI_BREDR &&
1567             hci_dev_test_flag(hdev, HCI_MGMT))
1568                 __mgmt_power_off(hdev);
1569
1570         hci_inquiry_cache_flush(hdev);
1571         hci_pend_le_actions_clear(hdev);
1572         hci_conn_hash_flush(hdev);
1573         hci_dev_unlock(hdev);
1574
1575         smp_unregister(hdev);
1576
1577         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1578
1579         if (hdev->flush)
1580                 hdev->flush(hdev);
1581
1582         /* Reset device */
1583         skb_queue_purge(&hdev->cmd_q);
1584         atomic_set(&hdev->cmd_cnt, 1);
1585         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1586             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1587                 set_bit(HCI_INIT, &hdev->flags);
1588                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1589                 clear_bit(HCI_INIT, &hdev->flags);
1590         }
1591
1592         /* flush cmd  work */
1593         flush_work(&hdev->cmd_work);
1594
1595         /* Drop queues */
1596         skb_queue_purge(&hdev->rx_q);
1597         skb_queue_purge(&hdev->cmd_q);
1598         skb_queue_purge(&hdev->raw_q);
1599
1600         /* Drop last sent command */
1601         if (hdev->sent_cmd) {
1602                 cancel_delayed_work_sync(&hdev->cmd_timer);
1603                 kfree_skb(hdev->sent_cmd);
1604                 hdev->sent_cmd = NULL;
1605         }
1606
1607         clear_bit(HCI_RUNNING, &hdev->flags);
1608         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1609
1610         /* After this point our queues are empty
1611          * and no tasks are scheduled. */
1612         hdev->close(hdev);
1613
1614         /* Clear flags */
1615         hdev->flags &= BIT(HCI_RAW);
1616         hci_dev_clear_volatile_flags(hdev);
1617
1618         /* Controller radio is available but is currently powered down */
1619         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1620
1621         memset(hdev->eir, 0, sizeof(hdev->eir));
1622         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1623         bacpy(&hdev->random_addr, BDADDR_ANY);
1624
1625         hci_req_sync_unlock(hdev);
1626
1627         hci_dev_put(hdev);
1628         return 0;
1629 }
1630
1631 int hci_dev_close(__u16 dev)
1632 {
1633         struct hci_dev *hdev;
1634         int err;
1635
1636         hdev = hci_dev_get(dev);
1637         if (!hdev)
1638                 return -ENODEV;
1639
1640         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1641                 err = -EBUSY;
1642                 goto done;
1643         }
1644
1645         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1646                 cancel_delayed_work(&hdev->power_off);
1647
1648         err = hci_dev_do_close(hdev);
1649
1650 done:
1651         hci_dev_put(hdev);
1652         return err;
1653 }
1654
1655 static int hci_dev_do_reset(struct hci_dev *hdev)
1656 {
1657         int ret;
1658
1659         BT_DBG("%s %p", hdev->name, hdev);
1660
1661         hci_req_sync_lock(hdev);
1662
1663         /* Drop queues */
1664         skb_queue_purge(&hdev->rx_q);
1665         skb_queue_purge(&hdev->cmd_q);
1666
1667         /* Avoid potential lockdep warnings from the *_flush() calls by
1668          * ensuring the workqueue is empty up front.
1669          */
1670         drain_workqueue(hdev->workqueue);
1671
1672         hci_dev_lock(hdev);
1673         hci_inquiry_cache_flush(hdev);
1674         hci_conn_hash_flush(hdev);
1675         hci_dev_unlock(hdev);
1676
1677         if (hdev->flush)
1678                 hdev->flush(hdev);
1679
1680         atomic_set(&hdev->cmd_cnt, 1);
1681         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1682
1683         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1684
1685         hci_req_sync_unlock(hdev);
1686         return ret;
1687 }
1688
1689 int hci_dev_reset(__u16 dev)
1690 {
1691         struct hci_dev *hdev;
1692         int err;
1693
1694         hdev = hci_dev_get(dev);
1695         if (!hdev)
1696                 return -ENODEV;
1697
1698         if (!test_bit(HCI_UP, &hdev->flags)) {
1699                 err = -ENETDOWN;
1700                 goto done;
1701         }
1702
1703         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1704                 err = -EBUSY;
1705                 goto done;
1706         }
1707
1708         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1709                 err = -EOPNOTSUPP;
1710                 goto done;
1711         }
1712
1713         err = hci_dev_do_reset(hdev);
1714
1715 done:
1716         hci_dev_put(hdev);
1717         return err;
1718 }
1719
1720 int hci_dev_reset_stat(__u16 dev)
1721 {
1722         struct hci_dev *hdev;
1723         int ret = 0;
1724
1725         hdev = hci_dev_get(dev);
1726         if (!hdev)
1727                 return -ENODEV;
1728
1729         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1730                 ret = -EBUSY;
1731                 goto done;
1732         }
1733
1734         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1735                 ret = -EOPNOTSUPP;
1736                 goto done;
1737         }
1738
1739         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1740
1741 done:
1742         hci_dev_put(hdev);
1743         return ret;
1744 }
1745
1746 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1747 {
1748         bool conn_changed, discov_changed;
1749
1750         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1751
1752         if ((scan & SCAN_PAGE))
1753                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1754                                                           HCI_CONNECTABLE);
1755         else
1756                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1757                                                            HCI_CONNECTABLE);
1758
1759         if ((scan & SCAN_INQUIRY)) {
1760                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1761                                                             HCI_DISCOVERABLE);
1762         } else {
1763                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1764                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1765                                                              HCI_DISCOVERABLE);
1766         }
1767
1768         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1769                 return;
1770
1771         if (conn_changed || discov_changed) {
1772                 /* In case this was disabled through mgmt */
1773                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1774
1775                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1776                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1777
1778                 mgmt_new_settings(hdev);
1779         }
1780 }
1781
1782 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1783 {
1784         struct hci_dev *hdev;
1785         struct hci_dev_req dr;
1786         int err = 0;
1787
1788         if (copy_from_user(&dr, arg, sizeof(dr)))
1789                 return -EFAULT;
1790
1791         hdev = hci_dev_get(dr.dev_id);
1792         if (!hdev)
1793                 return -ENODEV;
1794
1795         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1796                 err = -EBUSY;
1797                 goto done;
1798         }
1799
1800         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1801                 err = -EOPNOTSUPP;
1802                 goto done;
1803         }
1804
1805         if (hdev->dev_type != HCI_BREDR) {
1806                 err = -EOPNOTSUPP;
1807                 goto done;
1808         }
1809
1810         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1811                 err = -EOPNOTSUPP;
1812                 goto done;
1813         }
1814
1815         switch (cmd) {
1816         case HCISETAUTH:
1817                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1818                                    HCI_INIT_TIMEOUT, NULL);
1819                 break;
1820
1821         case HCISETENCRYPT:
1822                 if (!lmp_encrypt_capable(hdev)) {
1823                         err = -EOPNOTSUPP;
1824                         break;
1825                 }
1826
1827                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1828                         /* Auth must be enabled first */
1829                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1830                                            HCI_INIT_TIMEOUT, NULL);
1831                         if (err)
1832                                 break;
1833                 }
1834
1835                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1836                                    HCI_INIT_TIMEOUT, NULL);
1837                 break;
1838
1839         case HCISETSCAN:
1840                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1841                                    HCI_INIT_TIMEOUT, NULL);
1842
1843                 /* Ensure that the connectable and discoverable states
1844                  * get correctly modified as this was a non-mgmt change.
1845                  */
1846                 if (!err)
1847                         hci_update_scan_state(hdev, dr.dev_opt);
1848                 break;
1849
1850         case HCISETLINKPOL:
1851                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1852                                    HCI_INIT_TIMEOUT, NULL);
1853                 break;
1854
1855         case HCISETLINKMODE:
1856                 hdev->link_mode = ((__u16) dr.dev_opt) &
1857                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1858                 break;
1859
1860         case HCISETPTYPE:
1861                 hdev->pkt_type = (__u16) dr.dev_opt;
1862                 break;
1863
1864         case HCISETACLMTU:
1865                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1866                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1867                 break;
1868
1869         case HCISETSCOMTU:
1870                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1871                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1872                 break;
1873
1874         default:
1875                 err = -EINVAL;
1876                 break;
1877         }
1878
1879 done:
1880         hci_dev_put(hdev);
1881         return err;
1882 }
1883
1884 int hci_get_dev_list(void __user *arg)
1885 {
1886         struct hci_dev *hdev;
1887         struct hci_dev_list_req *dl;
1888         struct hci_dev_req *dr;
1889         int n = 0, size, err;
1890         __u16 dev_num;
1891
1892         if (get_user(dev_num, (__u16 __user *) arg))
1893                 return -EFAULT;
1894
1895         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1896                 return -EINVAL;
1897
1898         size = sizeof(*dl) + dev_num * sizeof(*dr);
1899
1900         dl = kzalloc(size, GFP_KERNEL);
1901         if (!dl)
1902                 return -ENOMEM;
1903
1904         dr = dl->dev_req;
1905
1906         read_lock(&hci_dev_list_lock);
1907         list_for_each_entry(hdev, &hci_dev_list, list) {
1908                 unsigned long flags = hdev->flags;
1909
1910                 /* When the auto-off is configured it means the transport
1911                  * is running, but in that case still indicate that the
1912                  * device is actually down.
1913                  */
1914                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1915                         flags &= ~BIT(HCI_UP);
1916
1917                 (dr + n)->dev_id  = hdev->id;
1918                 (dr + n)->dev_opt = flags;
1919
1920                 if (++n >= dev_num)
1921                         break;
1922         }
1923         read_unlock(&hci_dev_list_lock);
1924
1925         dl->dev_num = n;
1926         size = sizeof(*dl) + n * sizeof(*dr);
1927
1928         err = copy_to_user(arg, dl, size);
1929         kfree(dl);
1930
1931         return err ? -EFAULT : 0;
1932 }
1933
1934 int hci_get_dev_info(void __user *arg)
1935 {
1936         struct hci_dev *hdev;
1937         struct hci_dev_info di;
1938         unsigned long flags;
1939         int err = 0;
1940
1941         if (copy_from_user(&di, arg, sizeof(di)))
1942                 return -EFAULT;
1943
1944         hdev = hci_dev_get(di.dev_id);
1945         if (!hdev)
1946                 return -ENODEV;
1947
1948         /* When the auto-off is configured it means the transport
1949          * is running, but in that case still indicate that the
1950          * device is actually down.
1951          */
1952         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1953                 flags = hdev->flags & ~BIT(HCI_UP);
1954         else
1955                 flags = hdev->flags;
1956
1957         strcpy(di.name, hdev->name);
1958         di.bdaddr   = hdev->bdaddr;
1959         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1960         di.flags    = flags;
1961         di.pkt_type = hdev->pkt_type;
1962         if (lmp_bredr_capable(hdev)) {
1963                 di.acl_mtu  = hdev->acl_mtu;
1964                 di.acl_pkts = hdev->acl_pkts;
1965                 di.sco_mtu  = hdev->sco_mtu;
1966                 di.sco_pkts = hdev->sco_pkts;
1967         } else {
1968                 di.acl_mtu  = hdev->le_mtu;
1969                 di.acl_pkts = hdev->le_pkts;
1970                 di.sco_mtu  = 0;
1971                 di.sco_pkts = 0;
1972         }
1973         di.link_policy = hdev->link_policy;
1974         di.link_mode   = hdev->link_mode;
1975
1976         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1977         memcpy(&di.features, &hdev->features, sizeof(di.features));
1978
1979         if (copy_to_user(arg, &di, sizeof(di)))
1980                 err = -EFAULT;
1981
1982         hci_dev_put(hdev);
1983
1984         return err;
1985 }
1986
1987 /* ---- Interface to HCI drivers ---- */
1988
1989 static int hci_rfkill_set_block(void *data, bool blocked)
1990 {
1991         struct hci_dev *hdev = data;
1992
1993         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1994
1995         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1996                 return -EBUSY;
1997
1998         if (blocked) {
1999                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2000                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2001                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2002                         hci_dev_do_close(hdev);
2003         } else {
2004                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2005         }
2006
2007         return 0;
2008 }
2009
2010 static const struct rfkill_ops hci_rfkill_ops = {
2011         .set_block = hci_rfkill_set_block,
2012 };
2013
2014 static void hci_power_on(struct work_struct *work)
2015 {
2016         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2017         int err;
2018
2019         BT_DBG("%s", hdev->name);
2020
2021         if (test_bit(HCI_UP, &hdev->flags) &&
2022             hci_dev_test_flag(hdev, HCI_MGMT) &&
2023             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2024                 cancel_delayed_work(&hdev->power_off);
2025                 hci_req_sync_lock(hdev);
2026                 err = __hci_req_hci_power_on(hdev);
2027                 hci_req_sync_unlock(hdev);
2028                 mgmt_power_on(hdev, err);
2029                 return;
2030         }
2031
2032         err = hci_dev_do_open(hdev);
2033         if (err < 0) {
2034                 hci_dev_lock(hdev);
2035                 mgmt_set_powered_failed(hdev, err);
2036                 hci_dev_unlock(hdev);
2037                 return;
2038         }
2039
2040         /* During the HCI setup phase, a few error conditions are
2041          * ignored and they need to be checked now. If they are still
2042          * valid, it is important to turn the device back off.
2043          */
2044         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2045             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2046             (hdev->dev_type == HCI_BREDR &&
2047              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2048              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2049                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2050                 hci_dev_do_close(hdev);
2051         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2052                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2053                                    HCI_AUTO_OFF_TIMEOUT);
2054         }
2055
2056         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2057                 /* For unconfigured devices, set the HCI_RAW flag
2058                  * so that userspace can easily identify them.
2059                  */
2060                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2061                         set_bit(HCI_RAW, &hdev->flags);
2062
2063                 /* For fully configured devices, this will send
2064                  * the Index Added event. For unconfigured devices,
2065                  * it will send Unconfigued Index Added event.
2066                  *
2067                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2068                  * and no event will be send.
2069                  */
2070                 mgmt_index_added(hdev);
2071         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2072                 /* When the controller is now configured, then it
2073                  * is important to clear the HCI_RAW flag.
2074                  */
2075                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2076                         clear_bit(HCI_RAW, &hdev->flags);
2077
2078                 /* Powering on the controller with HCI_CONFIG set only
2079                  * happens with the transition from unconfigured to
2080                  * configured. This will send the Index Added event.
2081                  */
2082                 mgmt_index_added(hdev);
2083         }
2084 }
2085
2086 static void hci_power_off(struct work_struct *work)
2087 {
2088         struct hci_dev *hdev = container_of(work, struct hci_dev,
2089                                             power_off.work);
2090
2091         BT_DBG("%s", hdev->name);
2092
2093         hci_dev_do_close(hdev);
2094 }
2095
2096 static void hci_error_reset(struct work_struct *work)
2097 {
2098         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2099
2100         BT_DBG("%s", hdev->name);
2101
2102         if (hdev->hw_error)
2103                 hdev->hw_error(hdev, hdev->hw_error_code);
2104         else
2105                 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2106                        hdev->hw_error_code);
2107
2108         if (hci_dev_do_close(hdev))
2109                 return;
2110
2111         hci_dev_do_open(hdev);
2112 }
2113
2114 void hci_uuids_clear(struct hci_dev *hdev)
2115 {
2116         struct bt_uuid *uuid, *tmp;
2117
2118         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2119                 list_del(&uuid->list);
2120                 kfree(uuid);
2121         }
2122 }
2123
2124 void hci_link_keys_clear(struct hci_dev *hdev)
2125 {
2126         struct link_key *key;
2127
2128         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2129                 list_del_rcu(&key->list);
2130                 kfree_rcu(key, rcu);
2131         }
2132 }
2133
2134 void hci_smp_ltks_clear(struct hci_dev *hdev)
2135 {
2136         struct smp_ltk *k;
2137
2138         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2139                 list_del_rcu(&k->list);
2140                 kfree_rcu(k, rcu);
2141         }
2142 }
2143
2144 void hci_smp_irks_clear(struct hci_dev *hdev)
2145 {
2146         struct smp_irk *k;
2147
2148         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2149                 list_del_rcu(&k->list);
2150                 kfree_rcu(k, rcu);
2151         }
2152 }
2153
2154 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2155 {
2156         struct link_key *k;
2157
2158         rcu_read_lock();
2159         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2160                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2161                         rcu_read_unlock();
2162                         return k;
2163                 }
2164         }
2165         rcu_read_unlock();
2166
2167         return NULL;
2168 }
2169
2170 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2171                                u8 key_type, u8 old_key_type)
2172 {
2173         /* Legacy key */
2174         if (key_type < 0x03)
2175                 return true;
2176
2177         /* Debug keys are insecure so don't store them persistently */
2178         if (key_type == HCI_LK_DEBUG_COMBINATION)
2179                 return false;
2180
2181         /* Changed combination key and there's no previous one */
2182         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2183                 return false;
2184
2185         /* Security mode 3 case */
2186         if (!conn)
2187                 return true;
2188
2189         /* BR/EDR key derived using SC from an LE link */
2190         if (conn->type == LE_LINK)
2191                 return true;
2192
2193         /* Neither local nor remote side had no-bonding as requirement */
2194         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2195                 return true;
2196
2197         /* Local side had dedicated bonding as requirement */
2198         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2199                 return true;
2200
2201         /* Remote side had dedicated bonding as requirement */
2202         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2203                 return true;
2204
2205         /* If none of the above criteria match, then don't store the key
2206          * persistently */
2207         return false;
2208 }
2209
2210 static u8 ltk_role(u8 type)
2211 {
2212         if (type == SMP_LTK)
2213                 return HCI_ROLE_MASTER;
2214
2215         return HCI_ROLE_SLAVE;
2216 }
2217
2218 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2219                              u8 addr_type, u8 role)
2220 {
2221         struct smp_ltk *k;
2222
2223         rcu_read_lock();
2224         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2225                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2226                         continue;
2227
2228                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2229                         rcu_read_unlock();
2230                         return k;
2231                 }
2232         }
2233         rcu_read_unlock();
2234
2235         return NULL;
2236 }
2237
2238 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2239 {
2240         struct smp_irk *irk;
2241
2242         rcu_read_lock();
2243         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2244                 if (!bacmp(&irk->rpa, rpa)) {
2245                         rcu_read_unlock();
2246                         return irk;
2247                 }
2248         }
2249
2250         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2251                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2252                         bacpy(&irk->rpa, rpa);
2253                         rcu_read_unlock();
2254                         return irk;
2255                 }
2256         }
2257         rcu_read_unlock();
2258
2259         return NULL;
2260 }
2261
2262 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2263                                      u8 addr_type)
2264 {
2265         struct smp_irk *irk;
2266
2267         /* Identity Address must be public or static random */
2268         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2269                 return NULL;
2270
2271         rcu_read_lock();
2272         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2273                 if (addr_type == irk->addr_type &&
2274                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2275                         rcu_read_unlock();
2276                         return irk;
2277                 }
2278         }
2279         rcu_read_unlock();
2280
2281         return NULL;
2282 }
2283
2284 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2285                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2286                                   u8 pin_len, bool *persistent)
2287 {
2288         struct link_key *key, *old_key;
2289         u8 old_key_type;
2290
2291         old_key = hci_find_link_key(hdev, bdaddr);
2292         if (old_key) {
2293                 old_key_type = old_key->type;
2294                 key = old_key;
2295         } else {
2296                 old_key_type = conn ? conn->key_type : 0xff;
2297                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2298                 if (!key)
2299                         return NULL;
2300                 list_add_rcu(&key->list, &hdev->link_keys);
2301         }
2302
2303         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2304
2305         /* Some buggy controller combinations generate a changed
2306          * combination key for legacy pairing even when there's no
2307          * previous key */
2308         if (type == HCI_LK_CHANGED_COMBINATION &&
2309             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2310                 type = HCI_LK_COMBINATION;
2311                 if (conn)
2312                         conn->key_type = type;
2313         }
2314
2315         bacpy(&key->bdaddr, bdaddr);
2316         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2317         key->pin_len = pin_len;
2318
2319         if (type == HCI_LK_CHANGED_COMBINATION)
2320                 key->type = old_key_type;
2321         else
2322                 key->type = type;
2323
2324         if (persistent)
2325                 *persistent = hci_persistent_key(hdev, conn, type,
2326                                                  old_key_type);
2327
2328         return key;
2329 }
2330
2331 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2332                             u8 addr_type, u8 type, u8 authenticated,
2333                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2334 {
2335         struct smp_ltk *key, *old_key;
2336         u8 role = ltk_role(type);
2337
2338         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2339         if (old_key)
2340                 key = old_key;
2341         else {
2342                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2343                 if (!key)
2344                         return NULL;
2345                 list_add_rcu(&key->list, &hdev->long_term_keys);
2346         }
2347
2348         bacpy(&key->bdaddr, bdaddr);
2349         key->bdaddr_type = addr_type;
2350         memcpy(key->val, tk, sizeof(key->val));
2351         key->authenticated = authenticated;
2352         key->ediv = ediv;
2353         key->rand = rand;
2354         key->enc_size = enc_size;
2355         key->type = type;
2356
2357         return key;
2358 }
2359
2360 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2361                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2362 {
2363         struct smp_irk *irk;
2364
2365         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2366         if (!irk) {
2367                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2368                 if (!irk)
2369                         return NULL;
2370
2371                 bacpy(&irk->bdaddr, bdaddr);
2372                 irk->addr_type = addr_type;
2373
2374                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2375         }
2376
2377         memcpy(irk->val, val, 16);
2378         bacpy(&irk->rpa, rpa);
2379
2380         return irk;
2381 }
2382
2383 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2384 {
2385         struct link_key *key;
2386
2387         key = hci_find_link_key(hdev, bdaddr);
2388         if (!key)
2389                 return -ENOENT;
2390
2391         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2392
2393         list_del_rcu(&key->list);
2394         kfree_rcu(key, rcu);
2395
2396         return 0;
2397 }
2398
2399 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2400 {
2401         struct smp_ltk *k;
2402         int removed = 0;
2403
2404         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2405                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2406                         continue;
2407
2408                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2409
2410                 list_del_rcu(&k->list);
2411                 kfree_rcu(k, rcu);
2412                 removed++;
2413         }
2414
2415         return removed ? 0 : -ENOENT;
2416 }
2417
2418 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2419 {
2420         struct smp_irk *k;
2421
2422         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2423                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2424                         continue;
2425
2426                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2427
2428                 list_del_rcu(&k->list);
2429                 kfree_rcu(k, rcu);
2430         }
2431 }
2432
2433 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2434 {
2435         struct smp_ltk *k;
2436         struct smp_irk *irk;
2437         u8 addr_type;
2438
2439         if (type == BDADDR_BREDR) {
2440                 if (hci_find_link_key(hdev, bdaddr))
2441                         return true;
2442                 return false;
2443         }
2444
2445         /* Convert to HCI addr type which struct smp_ltk uses */
2446         if (type == BDADDR_LE_PUBLIC)
2447                 addr_type = ADDR_LE_DEV_PUBLIC;
2448         else
2449                 addr_type = ADDR_LE_DEV_RANDOM;
2450
2451         irk = hci_get_irk(hdev, bdaddr, addr_type);
2452         if (irk) {
2453                 bdaddr = &irk->bdaddr;
2454                 addr_type = irk->addr_type;
2455         }
2456
2457         rcu_read_lock();
2458         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2459                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2460                         rcu_read_unlock();
2461                         return true;
2462                 }
2463         }
2464         rcu_read_unlock();
2465
2466         return false;
2467 }
2468
2469 /* HCI command timer function */
2470 static void hci_cmd_timeout(struct work_struct *work)
2471 {
2472         struct hci_dev *hdev = container_of(work, struct hci_dev,
2473                                             cmd_timer.work);
2474
2475         if (hdev->sent_cmd) {
2476                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2477                 u16 opcode = __le16_to_cpu(sent->opcode);
2478
2479                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2480         } else {
2481                 BT_ERR("%s command tx timeout", hdev->name);
2482         }
2483
2484         atomic_set(&hdev->cmd_cnt, 1);
2485         queue_work(hdev->workqueue, &hdev->cmd_work);
2486 }
2487
2488 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2489                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2490 {
2491         struct oob_data *data;
2492
2493         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2494                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2495                         continue;
2496                 if (data->bdaddr_type != bdaddr_type)
2497                         continue;
2498                 return data;
2499         }
2500
2501         return NULL;
2502 }
2503
2504 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2505                                u8 bdaddr_type)
2506 {
2507         struct oob_data *data;
2508
2509         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2510         if (!data)
2511                 return -ENOENT;
2512
2513         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2514
2515         list_del(&data->list);
2516         kfree(data);
2517
2518         return 0;
2519 }
2520
2521 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2522 {
2523         struct oob_data *data, *n;
2524
2525         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2526                 list_del(&data->list);
2527                 kfree(data);
2528         }
2529 }
2530
2531 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2532                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2533                             u8 *hash256, u8 *rand256)
2534 {
2535         struct oob_data *data;
2536
2537         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2538         if (!data) {
2539                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2540                 if (!data)
2541                         return -ENOMEM;
2542
2543                 bacpy(&data->bdaddr, bdaddr);
2544                 data->bdaddr_type = bdaddr_type;
2545                 list_add(&data->list, &hdev->remote_oob_data);
2546         }
2547
2548         if (hash192 && rand192) {
2549                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2550                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2551                 if (hash256 && rand256)
2552                         data->present = 0x03;
2553         } else {
2554                 memset(data->hash192, 0, sizeof(data->hash192));
2555                 memset(data->rand192, 0, sizeof(data->rand192));
2556                 if (hash256 && rand256)
2557                         data->present = 0x02;
2558                 else
2559                         data->present = 0x00;
2560         }
2561
2562         if (hash256 && rand256) {
2563                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2564                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2565         } else {
2566                 memset(data->hash256, 0, sizeof(data->hash256));
2567                 memset(data->rand256, 0, sizeof(data->rand256));
2568                 if (hash192 && rand192)
2569                         data->present = 0x01;
2570         }
2571
2572         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2573
2574         return 0;
2575 }
2576
2577 /* This function requires the caller holds hdev->lock */
2578 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2579 {
2580         struct adv_info *adv_instance;
2581
2582         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2583                 if (adv_instance->instance == instance)
2584                         return adv_instance;
2585         }
2586
2587         return NULL;
2588 }
2589
2590 /* This function requires the caller holds hdev->lock */
2591 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2592 {
2593         struct adv_info *cur_instance;
2594
2595         cur_instance = hci_find_adv_instance(hdev, instance);
2596         if (!cur_instance)
2597                 return NULL;
2598
2599         if (cur_instance == list_last_entry(&hdev->adv_instances,
2600                                             struct adv_info, list))
2601                 return list_first_entry(&hdev->adv_instances,
2602                                                  struct adv_info, list);
2603         else
2604                 return list_next_entry(cur_instance, list);
2605 }
2606
2607 /* This function requires the caller holds hdev->lock */
2608 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2609 {
2610         struct adv_info *adv_instance;
2611
2612         adv_instance = hci_find_adv_instance(hdev, instance);
2613         if (!adv_instance)
2614                 return -ENOENT;
2615
2616         BT_DBG("%s removing %dMR", hdev->name, instance);
2617
2618         if (hdev->cur_adv_instance == instance) {
2619                 if (hdev->adv_instance_timeout) {
2620                         cancel_delayed_work(&hdev->adv_instance_expire);
2621                         hdev->adv_instance_timeout = 0;
2622                 }
2623                 hdev->cur_adv_instance = 0x00;
2624         }
2625
2626         list_del(&adv_instance->list);
2627         kfree(adv_instance);
2628
2629         hdev->adv_instance_cnt--;
2630
2631         return 0;
2632 }
2633
2634 /* This function requires the caller holds hdev->lock */
2635 void hci_adv_instances_clear(struct hci_dev *hdev)
2636 {
2637         struct adv_info *adv_instance, *n;
2638
2639         if (hdev->adv_instance_timeout) {
2640                 cancel_delayed_work(&hdev->adv_instance_expire);
2641                 hdev->adv_instance_timeout = 0;
2642         }
2643
2644         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2645                 list_del(&adv_instance->list);
2646                 kfree(adv_instance);
2647         }
2648
2649         hdev->adv_instance_cnt = 0;
2650         hdev->cur_adv_instance = 0x00;
2651 }
2652
2653 /* This function requires the caller holds hdev->lock */
2654 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2655                          u16 adv_data_len, u8 *adv_data,
2656                          u16 scan_rsp_len, u8 *scan_rsp_data,
2657                          u16 timeout, u16 duration)
2658 {
2659         struct adv_info *adv_instance;
2660
2661         adv_instance = hci_find_adv_instance(hdev, instance);
2662         if (adv_instance) {
2663                 memset(adv_instance->adv_data, 0,
2664                        sizeof(adv_instance->adv_data));
2665                 memset(adv_instance->scan_rsp_data, 0,
2666                        sizeof(adv_instance->scan_rsp_data));
2667         } else {
2668                 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2669                     instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2670                         return -EOVERFLOW;
2671
2672                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2673                 if (!adv_instance)
2674                         return -ENOMEM;
2675
2676                 adv_instance->pending = true;
2677                 adv_instance->instance = instance;
2678                 list_add(&adv_instance->list, &hdev->adv_instances);
2679                 hdev->adv_instance_cnt++;
2680         }
2681
2682         adv_instance->flags = flags;
2683         adv_instance->adv_data_len = adv_data_len;
2684         adv_instance->scan_rsp_len = scan_rsp_len;
2685
2686         if (adv_data_len)
2687                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2688
2689         if (scan_rsp_len)
2690                 memcpy(adv_instance->scan_rsp_data,
2691                        scan_rsp_data, scan_rsp_len);
2692
2693         adv_instance->timeout = timeout;
2694         adv_instance->remaining_time = timeout;
2695
2696         if (duration == 0)
2697                 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2698         else
2699                 adv_instance->duration = duration;
2700
2701         BT_DBG("%s for %dMR", hdev->name, instance);
2702
2703         return 0;
2704 }
2705
2706 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2707                                          bdaddr_t *bdaddr, u8 type)
2708 {
2709         struct bdaddr_list *b;
2710
2711         list_for_each_entry(b, bdaddr_list, list) {
2712                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2713                         return b;
2714         }
2715
2716         return NULL;
2717 }
2718
2719 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2720 {
2721         struct bdaddr_list *b, *n;
2722
2723         list_for_each_entry_safe(b, n, bdaddr_list, list) {
2724                 list_del(&b->list);
2725                 kfree(b);
2726         }
2727 }
2728
2729 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2730 {
2731         struct bdaddr_list *entry;
2732
2733         if (!bacmp(bdaddr, BDADDR_ANY))
2734                 return -EBADF;
2735
2736         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2737                 return -EEXIST;
2738
2739         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2740         if (!entry)
2741                 return -ENOMEM;
2742
2743         bacpy(&entry->bdaddr, bdaddr);
2744         entry->bdaddr_type = type;
2745
2746         list_add(&entry->list, list);
2747
2748         return 0;
2749 }
2750
2751 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2752 {
2753         struct bdaddr_list *entry;
2754
2755         if (!bacmp(bdaddr, BDADDR_ANY)) {
2756                 hci_bdaddr_list_clear(list);
2757                 return 0;
2758         }
2759
2760         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2761         if (!entry)
2762                 return -ENOENT;
2763
2764         list_del(&entry->list);
2765         kfree(entry);
2766
2767         return 0;
2768 }
2769
2770 /* This function requires the caller holds hdev->lock */
2771 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2772                                                bdaddr_t *addr, u8 addr_type)
2773 {
2774         struct hci_conn_params *params;
2775
2776         list_for_each_entry(params, &hdev->le_conn_params, list) {
2777                 if (bacmp(&params->addr, addr) == 0 &&
2778                     params->addr_type == addr_type) {
2779                         return params;
2780                 }
2781         }
2782
2783         return NULL;
2784 }
2785
2786 /* This function requires the caller holds hdev->lock */
2787 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2788                                                   bdaddr_t *addr, u8 addr_type)
2789 {
2790         struct hci_conn_params *param;
2791
2792         list_for_each_entry(param, list, action) {
2793                 if (bacmp(&param->addr, addr) == 0 &&
2794                     param->addr_type == addr_type)
2795                         return param;
2796         }
2797
2798         return NULL;
2799 }
2800
2801 /* This function requires the caller holds hdev->lock */
2802 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2803                                             bdaddr_t *addr, u8 addr_type)
2804 {
2805         struct hci_conn_params *params;
2806
2807         params = hci_conn_params_lookup(hdev, addr, addr_type);
2808         if (params)
2809                 return params;
2810
2811         params = kzalloc(sizeof(*params), GFP_KERNEL);
2812         if (!params) {
2813                 BT_ERR("Out of memory");
2814                 return NULL;
2815         }
2816
2817         bacpy(&params->addr, addr);
2818         params->addr_type = addr_type;
2819
2820         list_add(&params->list, &hdev->le_conn_params);
2821         INIT_LIST_HEAD(&params->action);
2822
2823         params->conn_min_interval = hdev->le_conn_min_interval;
2824         params->conn_max_interval = hdev->le_conn_max_interval;
2825         params->conn_latency = hdev->le_conn_latency;
2826         params->supervision_timeout = hdev->le_supv_timeout;
2827         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2828
2829         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2830
2831         return params;
2832 }
2833
2834 static void hci_conn_params_free(struct hci_conn_params *params)
2835 {
2836         if (params->conn) {
2837                 hci_conn_drop(params->conn);
2838                 hci_conn_put(params->conn);
2839         }
2840
2841         list_del(&params->action);
2842         list_del(&params->list);
2843         kfree(params);
2844 }
2845
2846 /* This function requires the caller holds hdev->lock */
2847 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2848 {
2849         struct hci_conn_params *params;
2850
2851         params = hci_conn_params_lookup(hdev, addr, addr_type);
2852         if (!params)
2853                 return;
2854
2855         hci_conn_params_free(params);
2856
2857         hci_update_background_scan(hdev);
2858
2859         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2860 }
2861
2862 /* This function requires the caller holds hdev->lock */
2863 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2864 {
2865         struct hci_conn_params *params, *tmp;
2866
2867         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2868                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2869                         continue;
2870
2871                 /* If trying to estabilish one time connection to disabled
2872                  * device, leave the params, but mark them as just once.
2873                  */
2874                 if (params->explicit_connect) {
2875                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2876                         continue;
2877                 }
2878
2879                 list_del(&params->list);
2880                 kfree(params);
2881         }
2882
2883         BT_DBG("All LE disabled connection parameters were removed");
2884 }
2885
2886 /* This function requires the caller holds hdev->lock */
2887 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2888 {
2889         struct hci_conn_params *params, *tmp;
2890
2891         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2892                 hci_conn_params_free(params);
2893
2894         BT_DBG("All LE connection parameters were removed");
2895 }
2896
2897 /* Copy the Identity Address of the controller.
2898  *
2899  * If the controller has a public BD_ADDR, then by default use that one.
2900  * If this is a LE only controller without a public address, default to
2901  * the static random address.
2902  *
2903  * For debugging purposes it is possible to force controllers with a
2904  * public address to use the static random address instead.
2905  *
2906  * In case BR/EDR has been disabled on a dual-mode controller and
2907  * userspace has configured a static address, then that address
2908  * becomes the identity address instead of the public BR/EDR address.
2909  */
2910 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2911                                u8 *bdaddr_type)
2912 {
2913         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2914             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2915             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2916              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2917                 bacpy(bdaddr, &hdev->static_addr);
2918                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2919         } else {
2920                 bacpy(bdaddr, &hdev->bdaddr);
2921                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2922         }
2923 }
2924
2925 /* Alloc HCI device */
2926 struct hci_dev *hci_alloc_dev(void)
2927 {
2928         struct hci_dev *hdev;
2929
2930         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2931         if (!hdev)
2932                 return NULL;
2933
2934         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2935         hdev->esco_type = (ESCO_HV1);
2936         hdev->link_mode = (HCI_LM_ACCEPT);
2937         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2938         hdev->io_capability = 0x03;     /* No Input No Output */
2939         hdev->manufacturer = 0xffff;    /* Default to internal use */
2940         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2941         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2942         hdev->adv_instance_cnt = 0;
2943         hdev->cur_adv_instance = 0x00;
2944         hdev->adv_instance_timeout = 0;
2945
2946         hdev->sniff_max_interval = 800;
2947         hdev->sniff_min_interval = 80;
2948
2949         hdev->le_adv_channel_map = 0x07;
2950         hdev->le_adv_min_interval = 0x0800;
2951         hdev->le_adv_max_interval = 0x0800;
2952         hdev->le_scan_interval = 0x0060;
2953         hdev->le_scan_window = 0x0030;
2954         hdev->le_conn_min_interval = 0x0028;
2955         hdev->le_conn_max_interval = 0x0038;
2956         hdev->le_conn_latency = 0x0000;
2957         hdev->le_supv_timeout = 0x002a;
2958         hdev->le_def_tx_len = 0x001b;
2959         hdev->le_def_tx_time = 0x0148;
2960         hdev->le_max_tx_len = 0x001b;
2961         hdev->le_max_tx_time = 0x0148;
2962         hdev->le_max_rx_len = 0x001b;
2963         hdev->le_max_rx_time = 0x0148;
2964
2965         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2966         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2967         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2968         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2969
2970         mutex_init(&hdev->lock);
2971         mutex_init(&hdev->req_lock);
2972
2973         INIT_LIST_HEAD(&hdev->mgmt_pending);
2974         INIT_LIST_HEAD(&hdev->blacklist);
2975         INIT_LIST_HEAD(&hdev->whitelist);
2976         INIT_LIST_HEAD(&hdev->uuids);
2977         INIT_LIST_HEAD(&hdev->link_keys);
2978         INIT_LIST_HEAD(&hdev->long_term_keys);
2979         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2980         INIT_LIST_HEAD(&hdev->remote_oob_data);
2981         INIT_LIST_HEAD(&hdev->le_white_list);
2982         INIT_LIST_HEAD(&hdev->le_conn_params);
2983         INIT_LIST_HEAD(&hdev->pend_le_conns);
2984         INIT_LIST_HEAD(&hdev->pend_le_reports);
2985         INIT_LIST_HEAD(&hdev->conn_hash.list);
2986         INIT_LIST_HEAD(&hdev->adv_instances);
2987
2988         INIT_WORK(&hdev->rx_work, hci_rx_work);
2989         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2990         INIT_WORK(&hdev->tx_work, hci_tx_work);
2991         INIT_WORK(&hdev->power_on, hci_power_on);
2992         INIT_WORK(&hdev->error_reset, hci_error_reset);
2993
2994         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2995
2996         skb_queue_head_init(&hdev->rx_q);
2997         skb_queue_head_init(&hdev->cmd_q);
2998         skb_queue_head_init(&hdev->raw_q);
2999
3000         init_waitqueue_head(&hdev->req_wait_q);
3001
3002         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3003
3004         hci_request_setup(hdev);
3005
3006         hci_init_sysfs(hdev);
3007         discovery_init(hdev);
3008
3009         return hdev;
3010 }
3011 EXPORT_SYMBOL(hci_alloc_dev);
3012
3013 /* Free HCI device */
3014 void hci_free_dev(struct hci_dev *hdev)
3015 {
3016         /* will free via device release */
3017         put_device(&hdev->dev);
3018 }
3019 EXPORT_SYMBOL(hci_free_dev);
3020
3021 /* Register HCI device */
3022 int hci_register_dev(struct hci_dev *hdev)
3023 {
3024         int id, error;
3025
3026         if (!hdev->open || !hdev->close || !hdev->send)
3027                 return -EINVAL;
3028
3029         /* Do not allow HCI_AMP devices to register at index 0,
3030          * so the index can be used as the AMP controller ID.
3031          */
3032         switch (hdev->dev_type) {
3033         case HCI_BREDR:
3034                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3035                 break;
3036         case HCI_AMP:
3037                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3038                 break;
3039         default:
3040                 return -EINVAL;
3041         }
3042
3043         if (id < 0)
3044                 return id;
3045
3046         sprintf(hdev->name, "hci%d", id);
3047         hdev->id = id;
3048
3049         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3050
3051         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3052                                           WQ_MEM_RECLAIM, 1, hdev->name);
3053         if (!hdev->workqueue) {
3054                 error = -ENOMEM;
3055                 goto err;
3056         }
3057
3058         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3059                                               WQ_MEM_RECLAIM, 1, hdev->name);
3060         if (!hdev->req_workqueue) {
3061                 destroy_workqueue(hdev->workqueue);
3062                 error = -ENOMEM;
3063                 goto err;
3064         }
3065
3066         if (!IS_ERR_OR_NULL(bt_debugfs))
3067                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3068
3069         dev_set_name(&hdev->dev, "%s", hdev->name);
3070
3071         error = device_add(&hdev->dev);
3072         if (error < 0)
3073                 goto err_wqueue;
3074
3075         hci_leds_init(hdev);
3076
3077         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3078                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3079                                     hdev);
3080         if (hdev->rfkill) {
3081                 if (rfkill_register(hdev->rfkill) < 0) {
3082                         rfkill_destroy(hdev->rfkill);
3083                         hdev->rfkill = NULL;
3084                 }
3085         }
3086
3087         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3088                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3089
3090         hci_dev_set_flag(hdev, HCI_SETUP);
3091         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3092
3093         if (hdev->dev_type == HCI_BREDR) {
3094                 /* Assume BR/EDR support until proven otherwise (such as
3095                  * through reading supported features during init.
3096                  */
3097                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3098         }
3099
3100         write_lock(&hci_dev_list_lock);
3101         list_add(&hdev->list, &hci_dev_list);
3102         write_unlock(&hci_dev_list_lock);
3103
3104         /* Devices that are marked for raw-only usage are unconfigured
3105          * and should not be included in normal operation.
3106          */
3107         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3108                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3109
3110         hci_sock_dev_event(hdev, HCI_DEV_REG);
3111         hci_dev_hold(hdev);
3112
3113         queue_work(hdev->req_workqueue, &hdev->power_on);
3114
3115         return id;
3116
3117 err_wqueue:
3118         destroy_workqueue(hdev->workqueue);
3119         destroy_workqueue(hdev->req_workqueue);
3120 err:
3121         ida_simple_remove(&hci_index_ida, hdev->id);
3122
3123         return error;
3124 }
3125 EXPORT_SYMBOL(hci_register_dev);
3126
3127 /* Unregister HCI device */
3128 void hci_unregister_dev(struct hci_dev *hdev)
3129 {
3130         int id;
3131
3132         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3133
3134         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3135
3136         id = hdev->id;
3137
3138         write_lock(&hci_dev_list_lock);
3139         list_del(&hdev->list);
3140         write_unlock(&hci_dev_list_lock);
3141
3142         hci_dev_do_close(hdev);
3143
3144         cancel_work_sync(&hdev->power_on);
3145
3146         if (!test_bit(HCI_INIT, &hdev->flags) &&
3147             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3148             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3149                 hci_dev_lock(hdev);
3150                 mgmt_index_removed(hdev);
3151                 hci_dev_unlock(hdev);
3152         }
3153
3154         /* mgmt_index_removed should take care of emptying the
3155          * pending list */
3156         BUG_ON(!list_empty(&hdev->mgmt_pending));
3157
3158         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3159
3160         if (hdev->rfkill) {
3161                 rfkill_unregister(hdev->rfkill);
3162                 rfkill_destroy(hdev->rfkill);
3163         }
3164
3165         device_del(&hdev->dev);
3166
3167         debugfs_remove_recursive(hdev->debugfs);
3168
3169         destroy_workqueue(hdev->workqueue);
3170         destroy_workqueue(hdev->req_workqueue);
3171
3172         hci_dev_lock(hdev);
3173         hci_bdaddr_list_clear(&hdev->blacklist);
3174         hci_bdaddr_list_clear(&hdev->whitelist);
3175         hci_uuids_clear(hdev);
3176         hci_link_keys_clear(hdev);
3177         hci_smp_ltks_clear(hdev);
3178         hci_smp_irks_clear(hdev);
3179         hci_remote_oob_data_clear(hdev);
3180         hci_adv_instances_clear(hdev);
3181         hci_bdaddr_list_clear(&hdev->le_white_list);
3182         hci_conn_params_clear_all(hdev);
3183         hci_discovery_filter_clear(hdev);
3184         hci_dev_unlock(hdev);
3185
3186         hci_dev_put(hdev);
3187
3188         ida_simple_remove(&hci_index_ida, id);
3189 }
3190 EXPORT_SYMBOL(hci_unregister_dev);
3191
3192 /* Suspend HCI device */
3193 int hci_suspend_dev(struct hci_dev *hdev)
3194 {
3195         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3196         return 0;
3197 }
3198 EXPORT_SYMBOL(hci_suspend_dev);
3199
3200 /* Resume HCI device */
3201 int hci_resume_dev(struct hci_dev *hdev)
3202 {
3203         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3204         return 0;
3205 }
3206 EXPORT_SYMBOL(hci_resume_dev);
3207
3208 /* Reset HCI device */
3209 int hci_reset_dev(struct hci_dev *hdev)
3210 {
3211         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3212         struct sk_buff *skb;
3213
3214         skb = bt_skb_alloc(3, GFP_ATOMIC);
3215         if (!skb)
3216                 return -ENOMEM;
3217
3218         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3219         memcpy(skb_put(skb, 3), hw_err, 3);
3220
3221         /* Send Hardware Error to upper stack */
3222         return hci_recv_frame(hdev, skb);
3223 }
3224 EXPORT_SYMBOL(hci_reset_dev);
3225
3226 /* Receive frame from HCI drivers */
3227 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3228 {
3229         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3230                       && !test_bit(HCI_INIT, &hdev->flags))) {
3231                 kfree_skb(skb);
3232                 return -ENXIO;
3233         }
3234
3235         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3236             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3237             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
3238                 kfree_skb(skb);
3239                 return -EINVAL;
3240         }
3241
3242         /* Incoming skb */
3243         bt_cb(skb)->incoming = 1;
3244
3245         /* Time stamp */
3246         __net_timestamp(skb);
3247
3248         skb_queue_tail(&hdev->rx_q, skb);
3249         queue_work(hdev->workqueue, &hdev->rx_work);
3250
3251         return 0;
3252 }
3253 EXPORT_SYMBOL(hci_recv_frame);
3254
3255 /* Receive diagnostic message from HCI drivers */
3256 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3257 {
3258         /* Mark as diagnostic packet */
3259         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3260
3261         /* Time stamp */
3262         __net_timestamp(skb);
3263
3264         skb_queue_tail(&hdev->rx_q, skb);
3265         queue_work(hdev->workqueue, &hdev->rx_work);
3266
3267         return 0;
3268 }
3269 EXPORT_SYMBOL(hci_recv_diag);
3270
3271 /* ---- Interface to upper protocols ---- */
3272
3273 int hci_register_cb(struct hci_cb *cb)
3274 {
3275         BT_DBG("%p name %s", cb, cb->name);
3276
3277         mutex_lock(&hci_cb_list_lock);
3278         list_add_tail(&cb->list, &hci_cb_list);
3279         mutex_unlock(&hci_cb_list_lock);
3280
3281         return 0;
3282 }
3283 EXPORT_SYMBOL(hci_register_cb);
3284
3285 int hci_unregister_cb(struct hci_cb *cb)
3286 {
3287         BT_DBG("%p name %s", cb, cb->name);
3288
3289         mutex_lock(&hci_cb_list_lock);
3290         list_del(&cb->list);
3291         mutex_unlock(&hci_cb_list_lock);
3292
3293         return 0;
3294 }
3295 EXPORT_SYMBOL(hci_unregister_cb);
3296
3297 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3298 {
3299         int err;
3300
3301         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3302                skb->len);
3303
3304         /* Time stamp */
3305         __net_timestamp(skb);
3306
3307         /* Send copy to monitor */
3308         hci_send_to_monitor(hdev, skb);
3309
3310         if (atomic_read(&hdev->promisc)) {
3311                 /* Send copy to the sockets */
3312                 hci_send_to_sock(hdev, skb);
3313         }
3314
3315         /* Get rid of skb owner, prior to sending to the driver. */
3316         skb_orphan(skb);
3317
3318         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3319                 kfree_skb(skb);
3320                 return;
3321         }
3322
3323         err = hdev->send(hdev, skb);
3324         if (err < 0) {
3325                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3326                 kfree_skb(skb);
3327         }
3328 }
3329
3330 /* Send HCI command */
3331 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3332                  const void *param)
3333 {
3334         struct sk_buff *skb;
3335
3336         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3337
3338         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3339         if (!skb) {
3340                 BT_ERR("%s no memory for command", hdev->name);
3341                 return -ENOMEM;
3342         }
3343
3344         /* Stand-alone HCI commands must be flagged as
3345          * single-command requests.
3346          */
3347         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3348
3349         skb_queue_tail(&hdev->cmd_q, skb);
3350         queue_work(hdev->workqueue, &hdev->cmd_work);
3351
3352         return 0;
3353 }
3354
3355 /* Get data from the previously sent command */
3356 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3357 {
3358         struct hci_command_hdr *hdr;
3359
3360         if (!hdev->sent_cmd)
3361                 return NULL;
3362
3363         hdr = (void *) hdev->sent_cmd->data;
3364
3365         if (hdr->opcode != cpu_to_le16(opcode))
3366                 return NULL;
3367
3368         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3369
3370         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3371 }
3372
3373 /* Send HCI command and wait for command commplete event */
3374 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3375                              const void *param, u32 timeout)
3376 {
3377         struct sk_buff *skb;
3378
3379         if (!test_bit(HCI_UP, &hdev->flags))
3380                 return ERR_PTR(-ENETDOWN);
3381
3382         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3383
3384         hci_req_sync_lock(hdev);
3385         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3386         hci_req_sync_unlock(hdev);
3387
3388         return skb;
3389 }
3390 EXPORT_SYMBOL(hci_cmd_sync);
3391
3392 /* Send ACL data */
3393 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3394 {
3395         struct hci_acl_hdr *hdr;
3396         int len = skb->len;
3397
3398         skb_push(skb, HCI_ACL_HDR_SIZE);
3399         skb_reset_transport_header(skb);
3400         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3401         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3402         hdr->dlen   = cpu_to_le16(len);
3403 }
3404
3405 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3406                           struct sk_buff *skb, __u16 flags)
3407 {
3408         struct hci_conn *conn = chan->conn;
3409         struct hci_dev *hdev = conn->hdev;
3410         struct sk_buff *list;
3411
3412         skb->len = skb_headlen(skb);
3413         skb->data_len = 0;
3414
3415         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3416
3417         switch (hdev->dev_type) {
3418         case HCI_BREDR:
3419                 hci_add_acl_hdr(skb, conn->handle, flags);
3420                 break;
3421         case HCI_AMP:
3422                 hci_add_acl_hdr(skb, chan->handle, flags);
3423                 break;
3424         default:
3425                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3426                 return;
3427         }
3428
3429         list = skb_shinfo(skb)->frag_list;
3430         if (!list) {
3431                 /* Non fragmented */
3432                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3433
3434                 skb_queue_tail(queue, skb);
3435         } else {
3436                 /* Fragmented */
3437                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3438
3439                 skb_shinfo(skb)->frag_list = NULL;
3440
3441                 /* Queue all fragments atomically. We need to use spin_lock_bh
3442                  * here because of 6LoWPAN links, as there this function is
3443                  * called from softirq and using normal spin lock could cause
3444                  * deadlocks.
3445                  */
3446                 spin_lock_bh(&queue->lock);
3447
3448                 __skb_queue_tail(queue, skb);
3449
3450                 flags &= ~ACL_START;
3451                 flags |= ACL_CONT;
3452                 do {
3453                         skb = list; list = list->next;
3454
3455                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3456                         hci_add_acl_hdr(skb, conn->handle, flags);
3457
3458                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3459
3460                         __skb_queue_tail(queue, skb);
3461                 } while (list);
3462
3463                 spin_unlock_bh(&queue->lock);
3464         }
3465 }
3466
3467 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3468 {
3469         struct hci_dev *hdev = chan->conn->hdev;
3470
3471         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3472
3473         hci_queue_acl(chan, &chan->data_q, skb, flags);
3474
3475         queue_work(hdev->workqueue, &hdev->tx_work);
3476 }
3477
3478 /* Send SCO data */
3479 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3480 {
3481         struct hci_dev *hdev = conn->hdev;
3482         struct hci_sco_hdr hdr;
3483
3484         BT_DBG("%s len %d", hdev->name, skb->len);
3485
3486         hdr.handle = cpu_to_le16(conn->handle);
3487         hdr.dlen   = skb->len;
3488
3489         skb_push(skb, HCI_SCO_HDR_SIZE);
3490         skb_reset_transport_header(skb);
3491         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3492
3493         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3494
3495         skb_queue_tail(&conn->data_q, skb);
3496         queue_work(hdev->workqueue, &hdev->tx_work);
3497 }
3498
3499 /* ---- HCI TX task (outgoing data) ---- */
3500
3501 /* HCI Connection scheduler */
3502 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3503                                      int *quote)
3504 {
3505         struct hci_conn_hash *h = &hdev->conn_hash;
3506         struct hci_conn *conn = NULL, *c;
3507         unsigned int num = 0, min = ~0;
3508
3509         /* We don't have to lock device here. Connections are always
3510          * added and removed with TX task disabled. */
3511
3512         rcu_read_lock();
3513
3514         list_for_each_entry_rcu(c, &h->list, list) {
3515                 if (c->type != type || skb_queue_empty(&c->data_q))
3516                         continue;
3517
3518                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3519                         continue;
3520
3521                 num++;
3522
3523                 if (c->sent < min) {
3524                         min  = c->sent;
3525                         conn = c;
3526                 }
3527
3528                 if (hci_conn_num(hdev, type) == num)
3529                         break;
3530         }
3531
3532         rcu_read_unlock();
3533
3534         if (conn) {
3535                 int cnt, q;
3536
3537                 switch (conn->type) {
3538                 case ACL_LINK:
3539                         cnt = hdev->acl_cnt;
3540                         break;
3541                 case SCO_LINK:
3542                 case ESCO_LINK:
3543                         cnt = hdev->sco_cnt;
3544                         break;
3545                 case LE_LINK:
3546                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3547                         break;
3548                 default:
3549                         cnt = 0;
3550                         BT_ERR("Unknown link type");
3551                 }
3552
3553                 q = cnt / num;
3554                 *quote = q ? q : 1;
3555         } else
3556                 *quote = 0;
3557
3558         BT_DBG("conn %p quote %d", conn, *quote);
3559         return conn;
3560 }
3561
3562 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3563 {
3564         struct hci_conn_hash *h = &hdev->conn_hash;
3565         struct hci_conn *c;
3566
3567         BT_ERR("%s link tx timeout", hdev->name);
3568
3569         rcu_read_lock();
3570
3571         /* Kill stalled connections */
3572         list_for_each_entry_rcu(c, &h->list, list) {
3573                 if (c->type == type && c->sent) {
3574                         BT_ERR("%s killing stalled connection %pMR",
3575                                hdev->name, &c->dst);
3576                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3577                 }
3578         }
3579
3580         rcu_read_unlock();
3581 }
3582
3583 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3584                                       int *quote)
3585 {
3586         struct hci_conn_hash *h = &hdev->conn_hash;
3587         struct hci_chan *chan = NULL;
3588         unsigned int num = 0, min = ~0, cur_prio = 0;
3589         struct hci_conn *conn;
3590         int cnt, q, conn_num = 0;
3591
3592         BT_DBG("%s", hdev->name);
3593
3594         rcu_read_lock();
3595
3596         list_for_each_entry_rcu(conn, &h->list, list) {
3597                 struct hci_chan *tmp;
3598
3599                 if (conn->type != type)
3600                         continue;
3601
3602                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3603                         continue;
3604
3605                 conn_num++;
3606
3607                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3608                         struct sk_buff *skb;
3609
3610                         if (skb_queue_empty(&tmp->data_q))
3611                                 continue;
3612
3613                         skb = skb_peek(&tmp->data_q);
3614                         if (skb->priority < cur_prio)
3615                                 continue;
3616
3617                         if (skb->priority > cur_prio) {
3618                                 num = 0;
3619                                 min = ~0;
3620                                 cur_prio = skb->priority;
3621                         }
3622
3623                         num++;
3624
3625                         if (conn->sent < min) {
3626                                 min  = conn->sent;
3627                                 chan = tmp;
3628                         }
3629                 }
3630
3631                 if (hci_conn_num(hdev, type) == conn_num)
3632                         break;
3633         }
3634
3635         rcu_read_unlock();
3636
3637         if (!chan)
3638                 return NULL;
3639
3640         switch (chan->conn->type) {
3641         case ACL_LINK:
3642                 cnt = hdev->acl_cnt;
3643                 break;
3644         case AMP_LINK:
3645                 cnt = hdev->block_cnt;
3646                 break;
3647         case SCO_LINK:
3648         case ESCO_LINK:
3649                 cnt = hdev->sco_cnt;
3650                 break;
3651         case LE_LINK:
3652                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3653                 break;
3654         default:
3655                 cnt = 0;
3656                 BT_ERR("Unknown link type");
3657         }
3658
3659         q = cnt / num;
3660         *quote = q ? q : 1;
3661         BT_DBG("chan %p quote %d", chan, *quote);
3662         return chan;
3663 }
3664
3665 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3666 {
3667         struct hci_conn_hash *h = &hdev->conn_hash;
3668         struct hci_conn *conn;
3669         int num = 0;
3670
3671         BT_DBG("%s", hdev->name);
3672
3673         rcu_read_lock();
3674
3675         list_for_each_entry_rcu(conn, &h->list, list) {
3676                 struct hci_chan *chan;
3677
3678                 if (conn->type != type)
3679                         continue;
3680
3681                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3682                         continue;
3683
3684                 num++;
3685
3686                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3687                         struct sk_buff *skb;
3688
3689                         if (chan->sent) {
3690                                 chan->sent = 0;
3691                                 continue;
3692                         }
3693
3694                         if (skb_queue_empty(&chan->data_q))
3695                                 continue;
3696
3697                         skb = skb_peek(&chan->data_q);
3698                         if (skb->priority >= HCI_PRIO_MAX - 1)
3699                                 continue;
3700
3701                         skb->priority = HCI_PRIO_MAX - 1;
3702
3703                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3704                                skb->priority);
3705                 }
3706
3707                 if (hci_conn_num(hdev, type) == num)
3708                         break;
3709         }
3710
3711         rcu_read_unlock();
3712
3713 }
3714
3715 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3716 {
3717         /* Calculate count of blocks used by this packet */
3718         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3719 }
3720
3721 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3722 {
3723         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3724                 /* ACL tx timeout must be longer than maximum
3725                  * link supervision timeout (40.9 seconds) */
3726                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3727                                        HCI_ACL_TX_TIMEOUT))
3728                         hci_link_tx_to(hdev, ACL_LINK);
3729         }
3730 }
3731
3732 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3733 {
3734         unsigned int cnt = hdev->acl_cnt;
3735         struct hci_chan *chan;
3736         struct sk_buff *skb;
3737         int quote;
3738
3739         __check_timeout(hdev, cnt);
3740
3741         while (hdev->acl_cnt &&
3742                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3743                 u32 priority = (skb_peek(&chan->data_q))->priority;
3744                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3745                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3746                                skb->len, skb->priority);
3747
3748                         /* Stop if priority has changed */
3749                         if (skb->priority < priority)
3750                                 break;
3751
3752                         skb = skb_dequeue(&chan->data_q);
3753
3754                         hci_conn_enter_active_mode(chan->conn,
3755                                                    bt_cb(skb)->force_active);
3756
3757                         hci_send_frame(hdev, skb);
3758                         hdev->acl_last_tx = jiffies;
3759
3760                         hdev->acl_cnt--;
3761                         chan->sent++;
3762                         chan->conn->sent++;
3763                 }
3764         }
3765
3766         if (cnt != hdev->acl_cnt)
3767                 hci_prio_recalculate(hdev, ACL_LINK);
3768 }
3769
3770 static void hci_sched_acl_blk(struct hci_dev *hdev)
3771 {
3772         unsigned int cnt = hdev->block_cnt;
3773         struct hci_chan *chan;
3774         struct sk_buff *skb;
3775         int quote;
3776         u8 type;
3777
3778         __check_timeout(hdev, cnt);
3779
3780         BT_DBG("%s", hdev->name);
3781
3782         if (hdev->dev_type == HCI_AMP)
3783                 type = AMP_LINK;
3784         else
3785                 type = ACL_LINK;
3786
3787         while (hdev->block_cnt > 0 &&
3788                (chan = hci_chan_sent(hdev, type, &quote))) {
3789                 u32 priority = (skb_peek(&chan->data_q))->priority;
3790                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3791                         int blocks;
3792
3793                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3794                                skb->len, skb->priority);
3795
3796                         /* Stop if priority has changed */
3797                         if (skb->priority < priority)
3798                                 break;
3799
3800                         skb = skb_dequeue(&chan->data_q);
3801
3802                         blocks = __get_blocks(hdev, skb);
3803                         if (blocks > hdev->block_cnt)
3804                                 return;
3805
3806                         hci_conn_enter_active_mode(chan->conn,
3807                                                    bt_cb(skb)->force_active);
3808
3809                         hci_send_frame(hdev, skb);
3810                         hdev->acl_last_tx = jiffies;
3811
3812                         hdev->block_cnt -= blocks;
3813                         quote -= blocks;
3814
3815                         chan->sent += blocks;
3816                         chan->conn->sent += blocks;
3817                 }
3818         }
3819
3820         if (cnt != hdev->block_cnt)
3821                 hci_prio_recalculate(hdev, type);
3822 }
3823
3824 static void hci_sched_acl(struct hci_dev *hdev)
3825 {
3826         BT_DBG("%s", hdev->name);
3827
3828         /* No ACL link over BR/EDR controller */
3829         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3830                 return;
3831
3832         /* No AMP link over AMP controller */
3833         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3834                 return;
3835
3836         switch (hdev->flow_ctl_mode) {
3837         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3838                 hci_sched_acl_pkt(hdev);
3839                 break;
3840
3841         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3842                 hci_sched_acl_blk(hdev);
3843                 break;
3844         }
3845 }
3846
3847 /* Schedule SCO */
3848 static void hci_sched_sco(struct hci_dev *hdev)
3849 {
3850         struct hci_conn *conn;
3851         struct sk_buff *skb;
3852         int quote;
3853
3854         BT_DBG("%s", hdev->name);
3855
3856         if (!hci_conn_num(hdev, SCO_LINK))
3857                 return;
3858
3859         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3860                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3861                         BT_DBG("skb %p len %d", skb, skb->len);
3862                         hci_send_frame(hdev, skb);
3863
3864                         conn->sent++;
3865                         if (conn->sent == ~0)
3866                                 conn->sent = 0;
3867                 }
3868         }
3869 }
3870
3871 static void hci_sched_esco(struct hci_dev *hdev)
3872 {
3873         struct hci_conn *conn;
3874         struct sk_buff *skb;
3875         int quote;
3876
3877         BT_DBG("%s", hdev->name);
3878
3879         if (!hci_conn_num(hdev, ESCO_LINK))
3880                 return;
3881
3882         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3883                                                      &quote))) {
3884                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3885                         BT_DBG("skb %p len %d", skb, skb->len);
3886                         hci_send_frame(hdev, skb);
3887
3888                         conn->sent++;
3889                         if (conn->sent == ~0)
3890                                 conn->sent = 0;
3891                 }
3892         }
3893 }
3894
3895 static void hci_sched_le(struct hci_dev *hdev)
3896 {
3897         struct hci_chan *chan;
3898         struct sk_buff *skb;
3899         int quote, cnt, tmp;
3900
3901         BT_DBG("%s", hdev->name);
3902
3903         if (!hci_conn_num(hdev, LE_LINK))
3904                 return;
3905
3906         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3907                 /* LE tx timeout must be longer than maximum
3908                  * link supervision timeout (40.9 seconds) */
3909                 if (!hdev->le_cnt && hdev->le_pkts &&
3910                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3911                         hci_link_tx_to(hdev, LE_LINK);
3912         }
3913
3914         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3915         tmp = cnt;
3916         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3917                 u32 priority = (skb_peek(&chan->data_q))->priority;
3918                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3919                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3920                                skb->len, skb->priority);
3921
3922                         /* Stop if priority has changed */
3923                         if (skb->priority < priority)
3924                                 break;
3925
3926                         skb = skb_dequeue(&chan->data_q);
3927
3928                         hci_send_frame(hdev, skb);
3929                         hdev->le_last_tx = jiffies;
3930
3931                         cnt--;
3932                         chan->sent++;
3933                         chan->conn->sent++;
3934                 }
3935         }
3936
3937         if (hdev->le_pkts)
3938                 hdev->le_cnt = cnt;
3939         else
3940                 hdev->acl_cnt = cnt;
3941
3942         if (cnt != tmp)
3943                 hci_prio_recalculate(hdev, LE_LINK);
3944 }
3945
3946 static void hci_tx_work(struct work_struct *work)
3947 {
3948         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3949         struct sk_buff *skb;
3950
3951         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3952                hdev->sco_cnt, hdev->le_cnt);
3953
3954         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3955                 /* Schedule queues and send stuff to HCI driver */
3956                 hci_sched_acl(hdev);
3957                 hci_sched_sco(hdev);
3958                 hci_sched_esco(hdev);
3959                 hci_sched_le(hdev);
3960         }
3961
3962         /* Send next queued raw (unknown type) packet */
3963         while ((skb = skb_dequeue(&hdev->raw_q)))
3964                 hci_send_frame(hdev, skb);
3965 }
3966
3967 /* ----- HCI RX task (incoming data processing) ----- */
3968
3969 /* ACL data packet */
3970 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3971 {
3972         struct hci_acl_hdr *hdr = (void *) skb->data;
3973         struct hci_conn *conn;
3974         __u16 handle, flags;
3975
3976         skb_pull(skb, HCI_ACL_HDR_SIZE);
3977
3978         handle = __le16_to_cpu(hdr->handle);
3979         flags  = hci_flags(handle);
3980         handle = hci_handle(handle);
3981
3982         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3983                handle, flags);
3984
3985         hdev->stat.acl_rx++;
3986
3987         hci_dev_lock(hdev);
3988         conn = hci_conn_hash_lookup_handle(hdev, handle);
3989         hci_dev_unlock(hdev);
3990
3991         if (conn) {
3992                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3993
3994                 /* Send to upper protocol */
3995                 l2cap_recv_acldata(conn, skb, flags);
3996                 return;
3997         } else {
3998                 BT_ERR("%s ACL packet for unknown connection handle %d",
3999                        hdev->name, handle);
4000         }
4001
4002         kfree_skb(skb);
4003 }
4004
4005 /* SCO data packet */
4006 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4007 {
4008         struct hci_sco_hdr *hdr = (void *) skb->data;
4009         struct hci_conn *conn;
4010         __u16 handle;
4011
4012         skb_pull(skb, HCI_SCO_HDR_SIZE);
4013
4014         handle = __le16_to_cpu(hdr->handle);
4015
4016         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4017
4018         hdev->stat.sco_rx++;
4019
4020         hci_dev_lock(hdev);
4021         conn = hci_conn_hash_lookup_handle(hdev, handle);
4022         hci_dev_unlock(hdev);
4023
4024         if (conn) {
4025                 /* Send to upper protocol */
4026                 sco_recv_scodata(conn, skb);
4027                 return;
4028         } else {
4029                 BT_ERR("%s SCO packet for unknown connection handle %d",
4030                        hdev->name, handle);
4031         }
4032
4033         kfree_skb(skb);
4034 }
4035
4036 static bool hci_req_is_complete(struct hci_dev *hdev)
4037 {
4038         struct sk_buff *skb;
4039
4040         skb = skb_peek(&hdev->cmd_q);
4041         if (!skb)
4042                 return true;
4043
4044         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4045 }
4046
4047 static void hci_resend_last(struct hci_dev *hdev)
4048 {
4049         struct hci_command_hdr *sent;
4050         struct sk_buff *skb;
4051         u16 opcode;
4052
4053         if (!hdev->sent_cmd)
4054                 return;
4055
4056         sent = (void *) hdev->sent_cmd->data;
4057         opcode = __le16_to_cpu(sent->opcode);
4058         if (opcode == HCI_OP_RESET)
4059                 return;
4060
4061         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4062         if (!skb)
4063                 return;
4064
4065         skb_queue_head(&hdev->cmd_q, skb);
4066         queue_work(hdev->workqueue, &hdev->cmd_work);
4067 }
4068
4069 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4070                           hci_req_complete_t *req_complete,
4071                           hci_req_complete_skb_t *req_complete_skb)
4072 {
4073         struct sk_buff *skb;
4074         unsigned long flags;
4075
4076         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4077
4078         /* If the completed command doesn't match the last one that was
4079          * sent we need to do special handling of it.
4080          */
4081         if (!hci_sent_cmd_data(hdev, opcode)) {
4082                 /* Some CSR based controllers generate a spontaneous
4083                  * reset complete event during init and any pending
4084                  * command will never be completed. In such a case we
4085                  * need to resend whatever was the last sent
4086                  * command.
4087                  */
4088                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4089                         hci_resend_last(hdev);
4090
4091                 return;
4092         }
4093
4094         /* If the command succeeded and there's still more commands in
4095          * this request the request is not yet complete.
4096          */
4097         if (!status && !hci_req_is_complete(hdev))
4098                 return;
4099
4100         /* If this was the last command in a request the complete
4101          * callback would be found in hdev->sent_cmd instead of the
4102          * command queue (hdev->cmd_q).
4103          */
4104         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4105                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4106                 return;
4107         }
4108
4109         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4110                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4111                 return;
4112         }
4113
4114         /* Remove all pending commands belonging to this request */
4115         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4116         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4117                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4118                         __skb_queue_head(&hdev->cmd_q, skb);
4119                         break;
4120                 }
4121
4122                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4123                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4124                 else
4125                         *req_complete = bt_cb(skb)->hci.req_complete;
4126                 kfree_skb(skb);
4127         }
4128         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4129 }
4130
4131 static void hci_rx_work(struct work_struct *work)
4132 {
4133         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4134         struct sk_buff *skb;
4135
4136         BT_DBG("%s", hdev->name);
4137
4138         while ((skb = skb_dequeue(&hdev->rx_q))) {
4139                 /* Send copy to monitor */
4140                 hci_send_to_monitor(hdev, skb);
4141
4142                 if (atomic_read(&hdev->promisc)) {
4143                         /* Send copy to the sockets */
4144                         hci_send_to_sock(hdev, skb);
4145                 }
4146
4147                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4148                         kfree_skb(skb);
4149                         continue;
4150                 }
4151
4152                 if (test_bit(HCI_INIT, &hdev->flags)) {
4153                         /* Don't process data packets in this states. */
4154                         switch (hci_skb_pkt_type(skb)) {
4155                         case HCI_ACLDATA_PKT:
4156                         case HCI_SCODATA_PKT:
4157                                 kfree_skb(skb);
4158                                 continue;
4159                         }
4160                 }
4161
4162                 /* Process frame */
4163                 switch (hci_skb_pkt_type(skb)) {
4164                 case HCI_EVENT_PKT:
4165                         BT_DBG("%s Event packet", hdev->name);
4166                         hci_event_packet(hdev, skb);
4167                         break;
4168
4169                 case HCI_ACLDATA_PKT:
4170                         BT_DBG("%s ACL data packet", hdev->name);
4171                         hci_acldata_packet(hdev, skb);
4172                         break;
4173
4174                 case HCI_SCODATA_PKT:
4175                         BT_DBG("%s SCO data packet", hdev->name);
4176                         hci_scodata_packet(hdev, skb);
4177                         break;
4178
4179                 default:
4180                         kfree_skb(skb);
4181                         break;
4182                 }
4183         }
4184 }
4185
4186 static void hci_cmd_work(struct work_struct *work)
4187 {
4188         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4189         struct sk_buff *skb;
4190
4191         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4192                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4193
4194         /* Send queued commands */
4195         if (atomic_read(&hdev->cmd_cnt)) {
4196                 skb = skb_dequeue(&hdev->cmd_q);
4197                 if (!skb)
4198                         return;
4199
4200                 kfree_skb(hdev->sent_cmd);
4201
4202                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4203                 if (hdev->sent_cmd) {
4204                         atomic_dec(&hdev->cmd_cnt);
4205                         hci_send_frame(hdev, skb);
4206                         if (test_bit(HCI_RESET, &hdev->flags))
4207                                 cancel_delayed_work(&hdev->cmd_timer);
4208                         else
4209                                 schedule_delayed_work(&hdev->cmd_timer,
4210                                                       HCI_CMD_TIMEOUT);
4211                 } else {
4212                         skb_queue_head(&hdev->cmd_q, skb);
4213                         queue_work(hdev->workqueue, &hdev->cmd_work);
4214                 }
4215         }
4216 }