Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[cascardo/linux.git] / net / bluetooth / hci_request.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2014 Intel Corporation
5
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <asm/unaligned.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32
33 #define HCI_REQ_DONE      0
34 #define HCI_REQ_PEND      1
35 #define HCI_REQ_CANCELED  2
36
37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38 {
39         skb_queue_head_init(&req->cmd_q);
40         req->hdev = hdev;
41         req->err = 0;
42 }
43
44 static int req_run(struct hci_request *req, hci_req_complete_t complete,
45                    hci_req_complete_skb_t complete_skb)
46 {
47         struct hci_dev *hdev = req->hdev;
48         struct sk_buff *skb;
49         unsigned long flags;
50
51         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
52
53         /* If an error occurred during request building, remove all HCI
54          * commands queued on the HCI request queue.
55          */
56         if (req->err) {
57                 skb_queue_purge(&req->cmd_q);
58                 return req->err;
59         }
60
61         /* Do not allow empty requests */
62         if (skb_queue_empty(&req->cmd_q))
63                 return -ENODATA;
64
65         skb = skb_peek_tail(&req->cmd_q);
66         if (complete) {
67                 bt_cb(skb)->hci.req_complete = complete;
68         } else if (complete_skb) {
69                 bt_cb(skb)->hci.req_complete_skb = complete_skb;
70                 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
71         }
72
73         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
74         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
75         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
76
77         queue_work(hdev->workqueue, &hdev->cmd_work);
78
79         return 0;
80 }
81
82 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
83 {
84         return req_run(req, complete, NULL);
85 }
86
87 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
88 {
89         return req_run(req, NULL, complete);
90 }
91
92 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
93                                   struct sk_buff *skb)
94 {
95         BT_DBG("%s result 0x%2.2x", hdev->name, result);
96
97         if (hdev->req_status == HCI_REQ_PEND) {
98                 hdev->req_result = result;
99                 hdev->req_status = HCI_REQ_DONE;
100                 if (skb)
101                         hdev->req_skb = skb_get(skb);
102                 wake_up_interruptible(&hdev->req_wait_q);
103         }
104 }
105
106 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
107 {
108         BT_DBG("%s err 0x%2.2x", hdev->name, err);
109
110         if (hdev->req_status == HCI_REQ_PEND) {
111                 hdev->req_result = err;
112                 hdev->req_status = HCI_REQ_CANCELED;
113                 wake_up_interruptible(&hdev->req_wait_q);
114         }
115 }
116
117 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
118                                   const void *param, u8 event, u32 timeout)
119 {
120         DECLARE_WAITQUEUE(wait, current);
121         struct hci_request req;
122         struct sk_buff *skb;
123         int err = 0;
124
125         BT_DBG("%s", hdev->name);
126
127         hci_req_init(&req, hdev);
128
129         hci_req_add_ev(&req, opcode, plen, param, event);
130
131         hdev->req_status = HCI_REQ_PEND;
132
133         add_wait_queue(&hdev->req_wait_q, &wait);
134         set_current_state(TASK_INTERRUPTIBLE);
135
136         err = hci_req_run_skb(&req, hci_req_sync_complete);
137         if (err < 0) {
138                 remove_wait_queue(&hdev->req_wait_q, &wait);
139                 set_current_state(TASK_RUNNING);
140                 return ERR_PTR(err);
141         }
142
143         schedule_timeout(timeout);
144
145         remove_wait_queue(&hdev->req_wait_q, &wait);
146
147         if (signal_pending(current))
148                 return ERR_PTR(-EINTR);
149
150         switch (hdev->req_status) {
151         case HCI_REQ_DONE:
152                 err = -bt_to_errno(hdev->req_result);
153                 break;
154
155         case HCI_REQ_CANCELED:
156                 err = -hdev->req_result;
157                 break;
158
159         default:
160                 err = -ETIMEDOUT;
161                 break;
162         }
163
164         hdev->req_status = hdev->req_result = 0;
165         skb = hdev->req_skb;
166         hdev->req_skb = NULL;
167
168         BT_DBG("%s end: err %d", hdev->name, err);
169
170         if (err < 0) {
171                 kfree_skb(skb);
172                 return ERR_PTR(err);
173         }
174
175         if (!skb)
176                 return ERR_PTR(-ENODATA);
177
178         return skb;
179 }
180 EXPORT_SYMBOL(__hci_cmd_sync_ev);
181
182 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
183                                const void *param, u32 timeout)
184 {
185         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
186 }
187 EXPORT_SYMBOL(__hci_cmd_sync);
188
189 /* Execute request and wait for completion. */
190 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
191                                                      unsigned long opt),
192                    unsigned long opt, u32 timeout, u8 *hci_status)
193 {
194         struct hci_request req;
195         DECLARE_WAITQUEUE(wait, current);
196         int err = 0;
197
198         BT_DBG("%s start", hdev->name);
199
200         hci_req_init(&req, hdev);
201
202         hdev->req_status = HCI_REQ_PEND;
203
204         err = func(&req, opt);
205         if (err) {
206                 if (hci_status)
207                         *hci_status = HCI_ERROR_UNSPECIFIED;
208                 return err;
209         }
210
211         add_wait_queue(&hdev->req_wait_q, &wait);
212         set_current_state(TASK_INTERRUPTIBLE);
213
214         err = hci_req_run_skb(&req, hci_req_sync_complete);
215         if (err < 0) {
216                 hdev->req_status = 0;
217
218                 remove_wait_queue(&hdev->req_wait_q, &wait);
219                 set_current_state(TASK_RUNNING);
220
221                 /* ENODATA means the HCI request command queue is empty.
222                  * This can happen when a request with conditionals doesn't
223                  * trigger any commands to be sent. This is normal behavior
224                  * and should not trigger an error return.
225                  */
226                 if (err == -ENODATA) {
227                         if (hci_status)
228                                 *hci_status = 0;
229                         return 0;
230                 }
231
232                 if (hci_status)
233                         *hci_status = HCI_ERROR_UNSPECIFIED;
234
235                 return err;
236         }
237
238         schedule_timeout(timeout);
239
240         remove_wait_queue(&hdev->req_wait_q, &wait);
241
242         if (signal_pending(current))
243                 return -EINTR;
244
245         switch (hdev->req_status) {
246         case HCI_REQ_DONE:
247                 err = -bt_to_errno(hdev->req_result);
248                 if (hci_status)
249                         *hci_status = hdev->req_result;
250                 break;
251
252         case HCI_REQ_CANCELED:
253                 err = -hdev->req_result;
254                 if (hci_status)
255                         *hci_status = HCI_ERROR_UNSPECIFIED;
256                 break;
257
258         default:
259                 err = -ETIMEDOUT;
260                 if (hci_status)
261                         *hci_status = HCI_ERROR_UNSPECIFIED;
262                 break;
263         }
264
265         kfree_skb(hdev->req_skb);
266         hdev->req_skb = NULL;
267         hdev->req_status = hdev->req_result = 0;
268
269         BT_DBG("%s end: err %d", hdev->name, err);
270
271         return err;
272 }
273
274 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
275                                                   unsigned long opt),
276                  unsigned long opt, u32 timeout, u8 *hci_status)
277 {
278         int ret;
279
280         if (!test_bit(HCI_UP, &hdev->flags))
281                 return -ENETDOWN;
282
283         /* Serialize all requests */
284         hci_req_sync_lock(hdev);
285         ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
286         hci_req_sync_unlock(hdev);
287
288         return ret;
289 }
290
291 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
292                                 const void *param)
293 {
294         int len = HCI_COMMAND_HDR_SIZE + plen;
295         struct hci_command_hdr *hdr;
296         struct sk_buff *skb;
297
298         skb = bt_skb_alloc(len, GFP_ATOMIC);
299         if (!skb)
300                 return NULL;
301
302         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
303         hdr->opcode = cpu_to_le16(opcode);
304         hdr->plen   = plen;
305
306         if (plen)
307                 memcpy(skb_put(skb, plen), param, plen);
308
309         BT_DBG("skb len %d", skb->len);
310
311         hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
312         hci_skb_opcode(skb) = opcode;
313
314         return skb;
315 }
316
317 /* Queue a command to an asynchronous HCI request */
318 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
319                     const void *param, u8 event)
320 {
321         struct hci_dev *hdev = req->hdev;
322         struct sk_buff *skb;
323
324         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
325
326         /* If an error occurred during request building, there is no point in
327          * queueing the HCI command. We can simply return.
328          */
329         if (req->err)
330                 return;
331
332         skb = hci_prepare_cmd(hdev, opcode, plen, param);
333         if (!skb) {
334                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
335                        hdev->name, opcode);
336                 req->err = -ENOMEM;
337                 return;
338         }
339
340         if (skb_queue_empty(&req->cmd_q))
341                 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
342
343         bt_cb(skb)->hci.req_event = event;
344
345         skb_queue_tail(&req->cmd_q, skb);
346 }
347
348 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
349                  const void *param)
350 {
351         hci_req_add_ev(req, opcode, plen, param, 0);
352 }
353
354 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
355 {
356         struct hci_dev *hdev = req->hdev;
357         struct hci_cp_write_page_scan_activity acp;
358         u8 type;
359
360         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
361                 return;
362
363         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
364                 return;
365
366         if (enable) {
367                 type = PAGE_SCAN_TYPE_INTERLACED;
368
369                 /* 160 msec page scan interval */
370                 acp.interval = cpu_to_le16(0x0100);
371         } else {
372                 type = PAGE_SCAN_TYPE_STANDARD; /* default */
373
374                 /* default 1.28 sec page scan */
375                 acp.interval = cpu_to_le16(0x0800);
376         }
377
378         acp.window = cpu_to_le16(0x0012);
379
380         if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
381             __cpu_to_le16(hdev->page_scan_window) != acp.window)
382                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
383                             sizeof(acp), &acp);
384
385         if (hdev->page_scan_type != type)
386                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
387 }
388
389 /* This function controls the background scanning based on hdev->pend_le_conns
390  * list. If there are pending LE connection we start the background scanning,
391  * otherwise we stop it.
392  *
393  * This function requires the caller holds hdev->lock.
394  */
395 static void __hci_update_background_scan(struct hci_request *req)
396 {
397         struct hci_dev *hdev = req->hdev;
398
399         if (!test_bit(HCI_UP, &hdev->flags) ||
400             test_bit(HCI_INIT, &hdev->flags) ||
401             hci_dev_test_flag(hdev, HCI_SETUP) ||
402             hci_dev_test_flag(hdev, HCI_CONFIG) ||
403             hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
404             hci_dev_test_flag(hdev, HCI_UNREGISTER))
405                 return;
406
407         /* No point in doing scanning if LE support hasn't been enabled */
408         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
409                 return;
410
411         /* If discovery is active don't interfere with it */
412         if (hdev->discovery.state != DISCOVERY_STOPPED)
413                 return;
414
415         /* Reset RSSI and UUID filters when starting background scanning
416          * since these filters are meant for service discovery only.
417          *
418          * The Start Discovery and Start Service Discovery operations
419          * ensure to set proper values for RSSI threshold and UUID
420          * filter list. So it is safe to just reset them here.
421          */
422         hci_discovery_filter_clear(hdev);
423
424         if (list_empty(&hdev->pend_le_conns) &&
425             list_empty(&hdev->pend_le_reports)) {
426                 /* If there is no pending LE connections or devices
427                  * to be scanned for, we should stop the background
428                  * scanning.
429                  */
430
431                 /* If controller is not scanning we are done. */
432                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
433                         return;
434
435                 hci_req_add_le_scan_disable(req);
436
437                 BT_DBG("%s stopping background scanning", hdev->name);
438         } else {
439                 /* If there is at least one pending LE connection, we should
440                  * keep the background scan running.
441                  */
442
443                 /* If controller is connecting, we should not start scanning
444                  * since some controllers are not able to scan and connect at
445                  * the same time.
446                  */
447                 if (hci_lookup_le_connect(hdev))
448                         return;
449
450                 /* If controller is currently scanning, we stop it to ensure we
451                  * don't miss any advertising (due to duplicates filter).
452                  */
453                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
454                         hci_req_add_le_scan_disable(req);
455
456                 hci_req_add_le_passive_scan(req);
457
458                 BT_DBG("%s starting background scanning", hdev->name);
459         }
460 }
461
462 void __hci_req_update_name(struct hci_request *req)
463 {
464         struct hci_dev *hdev = req->hdev;
465         struct hci_cp_write_local_name cp;
466
467         memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
468
469         hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
470 }
471
472 #define PNP_INFO_SVCLASS_ID             0x1200
473
474 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
475 {
476         u8 *ptr = data, *uuids_start = NULL;
477         struct bt_uuid *uuid;
478
479         if (len < 4)
480                 return ptr;
481
482         list_for_each_entry(uuid, &hdev->uuids, list) {
483                 u16 uuid16;
484
485                 if (uuid->size != 16)
486                         continue;
487
488                 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
489                 if (uuid16 < 0x1100)
490                         continue;
491
492                 if (uuid16 == PNP_INFO_SVCLASS_ID)
493                         continue;
494
495                 if (!uuids_start) {
496                         uuids_start = ptr;
497                         uuids_start[0] = 1;
498                         uuids_start[1] = EIR_UUID16_ALL;
499                         ptr += 2;
500                 }
501
502                 /* Stop if not enough space to put next UUID */
503                 if ((ptr - data) + sizeof(u16) > len) {
504                         uuids_start[1] = EIR_UUID16_SOME;
505                         break;
506                 }
507
508                 *ptr++ = (uuid16 & 0x00ff);
509                 *ptr++ = (uuid16 & 0xff00) >> 8;
510                 uuids_start[0] += sizeof(uuid16);
511         }
512
513         return ptr;
514 }
515
516 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
517 {
518         u8 *ptr = data, *uuids_start = NULL;
519         struct bt_uuid *uuid;
520
521         if (len < 6)
522                 return ptr;
523
524         list_for_each_entry(uuid, &hdev->uuids, list) {
525                 if (uuid->size != 32)
526                         continue;
527
528                 if (!uuids_start) {
529                         uuids_start = ptr;
530                         uuids_start[0] = 1;
531                         uuids_start[1] = EIR_UUID32_ALL;
532                         ptr += 2;
533                 }
534
535                 /* Stop if not enough space to put next UUID */
536                 if ((ptr - data) + sizeof(u32) > len) {
537                         uuids_start[1] = EIR_UUID32_SOME;
538                         break;
539                 }
540
541                 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
542                 ptr += sizeof(u32);
543                 uuids_start[0] += sizeof(u32);
544         }
545
546         return ptr;
547 }
548
549 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
550 {
551         u8 *ptr = data, *uuids_start = NULL;
552         struct bt_uuid *uuid;
553
554         if (len < 18)
555                 return ptr;
556
557         list_for_each_entry(uuid, &hdev->uuids, list) {
558                 if (uuid->size != 128)
559                         continue;
560
561                 if (!uuids_start) {
562                         uuids_start = ptr;
563                         uuids_start[0] = 1;
564                         uuids_start[1] = EIR_UUID128_ALL;
565                         ptr += 2;
566                 }
567
568                 /* Stop if not enough space to put next UUID */
569                 if ((ptr - data) + 16 > len) {
570                         uuids_start[1] = EIR_UUID128_SOME;
571                         break;
572                 }
573
574                 memcpy(ptr, uuid->uuid, 16);
575                 ptr += 16;
576                 uuids_start[0] += 16;
577         }
578
579         return ptr;
580 }
581
582 static void create_eir(struct hci_dev *hdev, u8 *data)
583 {
584         u8 *ptr = data;
585         size_t name_len;
586
587         name_len = strlen(hdev->dev_name);
588
589         if (name_len > 0) {
590                 /* EIR Data type */
591                 if (name_len > 48) {
592                         name_len = 48;
593                         ptr[1] = EIR_NAME_SHORT;
594                 } else
595                         ptr[1] = EIR_NAME_COMPLETE;
596
597                 /* EIR Data length */
598                 ptr[0] = name_len + 1;
599
600                 memcpy(ptr + 2, hdev->dev_name, name_len);
601
602                 ptr += (name_len + 2);
603         }
604
605         if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
606                 ptr[0] = 2;
607                 ptr[1] = EIR_TX_POWER;
608                 ptr[2] = (u8) hdev->inq_tx_power;
609
610                 ptr += 3;
611         }
612
613         if (hdev->devid_source > 0) {
614                 ptr[0] = 9;
615                 ptr[1] = EIR_DEVICE_ID;
616
617                 put_unaligned_le16(hdev->devid_source, ptr + 2);
618                 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
619                 put_unaligned_le16(hdev->devid_product, ptr + 6);
620                 put_unaligned_le16(hdev->devid_version, ptr + 8);
621
622                 ptr += 10;
623         }
624
625         ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
626         ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
627         ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
628 }
629
630 void __hci_req_update_eir(struct hci_request *req)
631 {
632         struct hci_dev *hdev = req->hdev;
633         struct hci_cp_write_eir cp;
634
635         if (!hdev_is_powered(hdev))
636                 return;
637
638         if (!lmp_ext_inq_capable(hdev))
639                 return;
640
641         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
642                 return;
643
644         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
645                 return;
646
647         memset(&cp, 0, sizeof(cp));
648
649         create_eir(hdev, cp.data);
650
651         if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
652                 return;
653
654         memcpy(hdev->eir, cp.data, sizeof(cp.data));
655
656         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
657 }
658
659 void hci_req_add_le_scan_disable(struct hci_request *req)
660 {
661         struct hci_cp_le_set_scan_enable cp;
662
663         memset(&cp, 0, sizeof(cp));
664         cp.enable = LE_SCAN_DISABLE;
665         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
666 }
667
668 static void add_to_white_list(struct hci_request *req,
669                               struct hci_conn_params *params)
670 {
671         struct hci_cp_le_add_to_white_list cp;
672
673         cp.bdaddr_type = params->addr_type;
674         bacpy(&cp.bdaddr, &params->addr);
675
676         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
677 }
678
679 static u8 update_white_list(struct hci_request *req)
680 {
681         struct hci_dev *hdev = req->hdev;
682         struct hci_conn_params *params;
683         struct bdaddr_list *b;
684         uint8_t white_list_entries = 0;
685
686         /* Go through the current white list programmed into the
687          * controller one by one and check if that address is still
688          * in the list of pending connections or list of devices to
689          * report. If not present in either list, then queue the
690          * command to remove it from the controller.
691          */
692         list_for_each_entry(b, &hdev->le_white_list, list) {
693                 /* If the device is neither in pend_le_conns nor
694                  * pend_le_reports then remove it from the whitelist.
695                  */
696                 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
697                                                &b->bdaddr, b->bdaddr_type) &&
698                     !hci_pend_le_action_lookup(&hdev->pend_le_reports,
699                                                &b->bdaddr, b->bdaddr_type)) {
700                         struct hci_cp_le_del_from_white_list cp;
701
702                         cp.bdaddr_type = b->bdaddr_type;
703                         bacpy(&cp.bdaddr, &b->bdaddr);
704
705                         hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
706                                     sizeof(cp), &cp);
707                         continue;
708                 }
709
710                 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
711                         /* White list can not be used with RPAs */
712                         return 0x00;
713                 }
714
715                 white_list_entries++;
716         }
717
718         /* Since all no longer valid white list entries have been
719          * removed, walk through the list of pending connections
720          * and ensure that any new device gets programmed into
721          * the controller.
722          *
723          * If the list of the devices is larger than the list of
724          * available white list entries in the controller, then
725          * just abort and return filer policy value to not use the
726          * white list.
727          */
728         list_for_each_entry(params, &hdev->pend_le_conns, action) {
729                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
730                                            &params->addr, params->addr_type))
731                         continue;
732
733                 if (white_list_entries >= hdev->le_white_list_size) {
734                         /* Select filter policy to accept all advertising */
735                         return 0x00;
736                 }
737
738                 if (hci_find_irk_by_addr(hdev, &params->addr,
739                                          params->addr_type)) {
740                         /* White list can not be used with RPAs */
741                         return 0x00;
742                 }
743
744                 white_list_entries++;
745                 add_to_white_list(req, params);
746         }
747
748         /* After adding all new pending connections, walk through
749          * the list of pending reports and also add these to the
750          * white list if there is still space.
751          */
752         list_for_each_entry(params, &hdev->pend_le_reports, action) {
753                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
754                                            &params->addr, params->addr_type))
755                         continue;
756
757                 if (white_list_entries >= hdev->le_white_list_size) {
758                         /* Select filter policy to accept all advertising */
759                         return 0x00;
760                 }
761
762                 if (hci_find_irk_by_addr(hdev, &params->addr,
763                                          params->addr_type)) {
764                         /* White list can not be used with RPAs */
765                         return 0x00;
766                 }
767
768                 white_list_entries++;
769                 add_to_white_list(req, params);
770         }
771
772         /* Select filter policy to use white list */
773         return 0x01;
774 }
775
776 static bool scan_use_rpa(struct hci_dev *hdev)
777 {
778         return hci_dev_test_flag(hdev, HCI_PRIVACY);
779 }
780
781 void hci_req_add_le_passive_scan(struct hci_request *req)
782 {
783         struct hci_cp_le_set_scan_param param_cp;
784         struct hci_cp_le_set_scan_enable enable_cp;
785         struct hci_dev *hdev = req->hdev;
786         u8 own_addr_type;
787         u8 filter_policy;
788
789         /* Set require_privacy to false since no SCAN_REQ are send
790          * during passive scanning. Not using an non-resolvable address
791          * here is important so that peer devices using direct
792          * advertising with our address will be correctly reported
793          * by the controller.
794          */
795         if (hci_update_random_address(req, false, scan_use_rpa(hdev),
796                                       &own_addr_type))
797                 return;
798
799         /* Adding or removing entries from the white list must
800          * happen before enabling scanning. The controller does
801          * not allow white list modification while scanning.
802          */
803         filter_policy = update_white_list(req);
804
805         /* When the controller is using random resolvable addresses and
806          * with that having LE privacy enabled, then controllers with
807          * Extended Scanner Filter Policies support can now enable support
808          * for handling directed advertising.
809          *
810          * So instead of using filter polices 0x00 (no whitelist)
811          * and 0x01 (whitelist enabled) use the new filter policies
812          * 0x02 (no whitelist) and 0x03 (whitelist enabled).
813          */
814         if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
815             (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
816                 filter_policy |= 0x02;
817
818         memset(&param_cp, 0, sizeof(param_cp));
819         param_cp.type = LE_SCAN_PASSIVE;
820         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
821         param_cp.window = cpu_to_le16(hdev->le_scan_window);
822         param_cp.own_address_type = own_addr_type;
823         param_cp.filter_policy = filter_policy;
824         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
825                     &param_cp);
826
827         memset(&enable_cp, 0, sizeof(enable_cp));
828         enable_cp.enable = LE_SCAN_ENABLE;
829         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
830         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
831                     &enable_cp);
832 }
833
834 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
835 {
836         u8 instance = hdev->cur_adv_instance;
837         struct adv_info *adv_instance;
838
839         /* Ignore instance 0 */
840         if (instance == 0x00)
841                 return 0;
842
843         adv_instance = hci_find_adv_instance(hdev, instance);
844         if (!adv_instance)
845                 return 0;
846
847         /* TODO: Take into account the "appearance" and "local-name" flags here.
848          * These are currently being ignored as they are not supported.
849          */
850         return adv_instance->scan_rsp_len;
851 }
852
853 void __hci_req_disable_advertising(struct hci_request *req)
854 {
855         u8 enable = 0x00;
856
857         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
858 }
859
860 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
861 {
862         u32 flags;
863         struct adv_info *adv_instance;
864
865         if (instance == 0x00) {
866                 /* Instance 0 always manages the "Tx Power" and "Flags"
867                  * fields
868                  */
869                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
870
871                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
872                  * corresponds to the "connectable" instance flag.
873                  */
874                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
875                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
876
877                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
878                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
879                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
880                         flags |= MGMT_ADV_FLAG_DISCOV;
881
882                 return flags;
883         }
884
885         adv_instance = hci_find_adv_instance(hdev, instance);
886
887         /* Return 0 when we got an invalid instance identifier. */
888         if (!adv_instance)
889                 return 0;
890
891         return adv_instance->flags;
892 }
893
894 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
895 {
896         /* If privacy is not enabled don't use RPA */
897         if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
898                 return false;
899
900         /* If basic privacy mode is enabled use RPA */
901         if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
902                 return true;
903
904         /* If limited privacy mode is enabled don't use RPA if we're
905          * both discoverable and bondable.
906          */
907         if ((flags & MGMT_ADV_FLAG_DISCOV) &&
908             hci_dev_test_flag(hdev, HCI_BONDABLE))
909                 return false;
910
911         /* We're neither bondable nor discoverable in the limited
912          * privacy mode, therefore use RPA.
913          */
914         return true;
915 }
916
917 void __hci_req_enable_advertising(struct hci_request *req)
918 {
919         struct hci_dev *hdev = req->hdev;
920         struct hci_cp_le_set_adv_param cp;
921         u8 own_addr_type, enable = 0x01;
922         bool connectable;
923         u32 flags;
924
925         if (hci_conn_num(hdev, LE_LINK) > 0)
926                 return;
927
928         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
929                 __hci_req_disable_advertising(req);
930
931         /* Clear the HCI_LE_ADV bit temporarily so that the
932          * hci_update_random_address knows that it's safe to go ahead
933          * and write a new random address. The flag will be set back on
934          * as soon as the SET_ADV_ENABLE HCI command completes.
935          */
936         hci_dev_clear_flag(hdev, HCI_LE_ADV);
937
938         flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
939
940         /* If the "connectable" instance flag was not set, then choose between
941          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
942          */
943         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
944                       mgmt_get_connectable(hdev);
945
946         /* Set require_privacy to true only when non-connectable
947          * advertising is used. In that case it is fine to use a
948          * non-resolvable private address.
949          */
950         if (hci_update_random_address(req, !connectable,
951                                       adv_use_rpa(hdev, flags),
952                                       &own_addr_type) < 0)
953                 return;
954
955         memset(&cp, 0, sizeof(cp));
956         cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
957         cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
958
959         if (connectable)
960                 cp.type = LE_ADV_IND;
961         else if (get_cur_adv_instance_scan_rsp_len(hdev))
962                 cp.type = LE_ADV_SCAN_IND;
963         else
964                 cp.type = LE_ADV_NONCONN_IND;
965
966         cp.own_address_type = own_addr_type;
967         cp.channel_map = hdev->le_adv_channel_map;
968
969         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
970
971         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
972 }
973
974 static u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
975 {
976         size_t name_len;
977         int max_len;
978
979         max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
980         name_len = strlen(hdev->dev_name);
981         if (name_len > 0 && max_len > 0) {
982
983                 if (name_len > max_len) {
984                         name_len = max_len;
985                         ptr[1] = EIR_NAME_SHORT;
986                 } else
987                         ptr[1] = EIR_NAME_COMPLETE;
988
989                 ptr[0] = name_len + 1;
990
991                 memcpy(ptr + 2, hdev->dev_name, name_len);
992
993                 ad_len += (name_len + 2);
994                 ptr += (name_len + 2);
995         }
996
997         return ad_len;
998 }
999
1000 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1001 {
1002         return append_local_name(hdev, ptr, 0);
1003 }
1004
1005 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1006                                         u8 *ptr)
1007 {
1008         struct adv_info *adv_instance;
1009         u32 instance_flags;
1010         u8 scan_rsp_len = 0;
1011
1012         adv_instance = hci_find_adv_instance(hdev, instance);
1013         if (!adv_instance)
1014                 return 0;
1015
1016         instance_flags = adv_instance->flags;
1017
1018         if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1019                 ptr[0] = 3;
1020                 ptr[1] = EIR_APPEARANCE;
1021                 put_unaligned_le16(hdev->appearance, ptr + 2);
1022                 scan_rsp_len += 4;
1023                 ptr += 4;
1024         }
1025
1026         memcpy(ptr, adv_instance->scan_rsp_data,
1027                adv_instance->scan_rsp_len);
1028
1029         scan_rsp_len += adv_instance->scan_rsp_len;
1030         ptr += adv_instance->scan_rsp_len;
1031
1032         if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1033                 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1034
1035         return scan_rsp_len;
1036 }
1037
1038 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1039 {
1040         struct hci_dev *hdev = req->hdev;
1041         struct hci_cp_le_set_scan_rsp_data cp;
1042         u8 len;
1043
1044         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1045                 return;
1046
1047         memset(&cp, 0, sizeof(cp));
1048
1049         if (instance)
1050                 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
1051         else
1052                 len = create_default_scan_rsp_data(hdev, cp.data);
1053
1054         if (hdev->scan_rsp_data_len == len &&
1055             !memcmp(cp.data, hdev->scan_rsp_data, len))
1056                 return;
1057
1058         memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1059         hdev->scan_rsp_data_len = len;
1060
1061         cp.length = len;
1062
1063         hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1064 }
1065
1066 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1067 {
1068         struct adv_info *adv_instance = NULL;
1069         u8 ad_len = 0, flags = 0;
1070         u32 instance_flags;
1071
1072         /* Return 0 when the current instance identifier is invalid. */
1073         if (instance) {
1074                 adv_instance = hci_find_adv_instance(hdev, instance);
1075                 if (!adv_instance)
1076                         return 0;
1077         }
1078
1079         instance_flags = get_adv_instance_flags(hdev, instance);
1080
1081         /* The Add Advertising command allows userspace to set both the general
1082          * and limited discoverable flags.
1083          */
1084         if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1085                 flags |= LE_AD_GENERAL;
1086
1087         if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1088                 flags |= LE_AD_LIMITED;
1089
1090         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1091                 flags |= LE_AD_NO_BREDR;
1092
1093         if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1094                 /* If a discovery flag wasn't provided, simply use the global
1095                  * settings.
1096                  */
1097                 if (!flags)
1098                         flags |= mgmt_get_adv_discov_flags(hdev);
1099
1100                 /* If flags would still be empty, then there is no need to
1101                  * include the "Flags" AD field".
1102                  */
1103                 if (flags) {
1104                         ptr[0] = 0x02;
1105                         ptr[1] = EIR_FLAGS;
1106                         ptr[2] = flags;
1107
1108                         ad_len += 3;
1109                         ptr += 3;
1110                 }
1111         }
1112
1113         if (adv_instance) {
1114                 memcpy(ptr, adv_instance->adv_data,
1115                        adv_instance->adv_data_len);
1116                 ad_len += adv_instance->adv_data_len;
1117                 ptr += adv_instance->adv_data_len;
1118         }
1119
1120         /* Provide Tx Power only if we can provide a valid value for it */
1121         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1122             (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1123                 ptr[0] = 0x02;
1124                 ptr[1] = EIR_TX_POWER;
1125                 ptr[2] = (u8)hdev->adv_tx_power;
1126
1127                 ad_len += 3;
1128                 ptr += 3;
1129         }
1130
1131         return ad_len;
1132 }
1133
1134 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1135 {
1136         struct hci_dev *hdev = req->hdev;
1137         struct hci_cp_le_set_adv_data cp;
1138         u8 len;
1139
1140         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1141                 return;
1142
1143         memset(&cp, 0, sizeof(cp));
1144
1145         len = create_instance_adv_data(hdev, instance, cp.data);
1146
1147         /* There's nothing to do if the data hasn't changed */
1148         if (hdev->adv_data_len == len &&
1149             memcmp(cp.data, hdev->adv_data, len) == 0)
1150                 return;
1151
1152         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1153         hdev->adv_data_len = len;
1154
1155         cp.length = len;
1156
1157         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1158 }
1159
1160 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1161 {
1162         struct hci_request req;
1163
1164         hci_req_init(&req, hdev);
1165         __hci_req_update_adv_data(&req, instance);
1166
1167         return hci_req_run(&req, NULL);
1168 }
1169
1170 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1171 {
1172         BT_DBG("%s status %u", hdev->name, status);
1173 }
1174
1175 void hci_req_reenable_advertising(struct hci_dev *hdev)
1176 {
1177         struct hci_request req;
1178
1179         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1180             list_empty(&hdev->adv_instances))
1181                 return;
1182
1183         hci_req_init(&req, hdev);
1184
1185         if (hdev->cur_adv_instance) {
1186                 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1187                                                 true);
1188         } else {
1189                 __hci_req_update_adv_data(&req, 0x00);
1190                 __hci_req_update_scan_rsp_data(&req, 0x00);
1191                 __hci_req_enable_advertising(&req);
1192         }
1193
1194         hci_req_run(&req, adv_enable_complete);
1195 }
1196
1197 static void adv_timeout_expire(struct work_struct *work)
1198 {
1199         struct hci_dev *hdev = container_of(work, struct hci_dev,
1200                                             adv_instance_expire.work);
1201
1202         struct hci_request req;
1203         u8 instance;
1204
1205         BT_DBG("%s", hdev->name);
1206
1207         hci_dev_lock(hdev);
1208
1209         hdev->adv_instance_timeout = 0;
1210
1211         instance = hdev->cur_adv_instance;
1212         if (instance == 0x00)
1213                 goto unlock;
1214
1215         hci_req_init(&req, hdev);
1216
1217         hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1218
1219         if (list_empty(&hdev->adv_instances))
1220                 __hci_req_disable_advertising(&req);
1221
1222         hci_req_run(&req, NULL);
1223
1224 unlock:
1225         hci_dev_unlock(hdev);
1226 }
1227
1228 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1229                                     bool force)
1230 {
1231         struct hci_dev *hdev = req->hdev;
1232         struct adv_info *adv_instance = NULL;
1233         u16 timeout;
1234
1235         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1236             list_empty(&hdev->adv_instances))
1237                 return -EPERM;
1238
1239         if (hdev->adv_instance_timeout)
1240                 return -EBUSY;
1241
1242         adv_instance = hci_find_adv_instance(hdev, instance);
1243         if (!adv_instance)
1244                 return -ENOENT;
1245
1246         /* A zero timeout means unlimited advertising. As long as there is
1247          * only one instance, duration should be ignored. We still set a timeout
1248          * in case further instances are being added later on.
1249          *
1250          * If the remaining lifetime of the instance is more than the duration
1251          * then the timeout corresponds to the duration, otherwise it will be
1252          * reduced to the remaining instance lifetime.
1253          */
1254         if (adv_instance->timeout == 0 ||
1255             adv_instance->duration <= adv_instance->remaining_time)
1256                 timeout = adv_instance->duration;
1257         else
1258                 timeout = adv_instance->remaining_time;
1259
1260         /* The remaining time is being reduced unless the instance is being
1261          * advertised without time limit.
1262          */
1263         if (adv_instance->timeout)
1264                 adv_instance->remaining_time =
1265                                 adv_instance->remaining_time - timeout;
1266
1267         hdev->adv_instance_timeout = timeout;
1268         queue_delayed_work(hdev->req_workqueue,
1269                            &hdev->adv_instance_expire,
1270                            msecs_to_jiffies(timeout * 1000));
1271
1272         /* If we're just re-scheduling the same instance again then do not
1273          * execute any HCI commands. This happens when a single instance is
1274          * being advertised.
1275          */
1276         if (!force && hdev->cur_adv_instance == instance &&
1277             hci_dev_test_flag(hdev, HCI_LE_ADV))
1278                 return 0;
1279
1280         hdev->cur_adv_instance = instance;
1281         __hci_req_update_adv_data(req, instance);
1282         __hci_req_update_scan_rsp_data(req, instance);
1283         __hci_req_enable_advertising(req);
1284
1285         return 0;
1286 }
1287
1288 static void cancel_adv_timeout(struct hci_dev *hdev)
1289 {
1290         if (hdev->adv_instance_timeout) {
1291                 hdev->adv_instance_timeout = 0;
1292                 cancel_delayed_work(&hdev->adv_instance_expire);
1293         }
1294 }
1295
1296 /* For a single instance:
1297  * - force == true: The instance will be removed even when its remaining
1298  *   lifetime is not zero.
1299  * - force == false: the instance will be deactivated but kept stored unless
1300  *   the remaining lifetime is zero.
1301  *
1302  * For instance == 0x00:
1303  * - force == true: All instances will be removed regardless of their timeout
1304  *   setting.
1305  * - force == false: Only instances that have a timeout will be removed.
1306  */
1307 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1308                                 struct hci_request *req, u8 instance,
1309                                 bool force)
1310 {
1311         struct adv_info *adv_instance, *n, *next_instance = NULL;
1312         int err;
1313         u8 rem_inst;
1314
1315         /* Cancel any timeout concerning the removed instance(s). */
1316         if (!instance || hdev->cur_adv_instance == instance)
1317                 cancel_adv_timeout(hdev);
1318
1319         /* Get the next instance to advertise BEFORE we remove
1320          * the current one. This can be the same instance again
1321          * if there is only one instance.
1322          */
1323         if (instance && hdev->cur_adv_instance == instance)
1324                 next_instance = hci_get_next_instance(hdev, instance);
1325
1326         if (instance == 0x00) {
1327                 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1328                                          list) {
1329                         if (!(force || adv_instance->timeout))
1330                                 continue;
1331
1332                         rem_inst = adv_instance->instance;
1333                         err = hci_remove_adv_instance(hdev, rem_inst);
1334                         if (!err)
1335                                 mgmt_advertising_removed(sk, hdev, rem_inst);
1336                 }
1337         } else {
1338                 adv_instance = hci_find_adv_instance(hdev, instance);
1339
1340                 if (force || (adv_instance && adv_instance->timeout &&
1341                               !adv_instance->remaining_time)) {
1342                         /* Don't advertise a removed instance. */
1343                         if (next_instance &&
1344                             next_instance->instance == instance)
1345                                 next_instance = NULL;
1346
1347                         err = hci_remove_adv_instance(hdev, instance);
1348                         if (!err)
1349                                 mgmt_advertising_removed(sk, hdev, instance);
1350                 }
1351         }
1352
1353         if (!req || !hdev_is_powered(hdev) ||
1354             hci_dev_test_flag(hdev, HCI_ADVERTISING))
1355                 return;
1356
1357         if (next_instance)
1358                 __hci_req_schedule_adv_instance(req, next_instance->instance,
1359                                                 false);
1360 }
1361
1362 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1363 {
1364         struct hci_dev *hdev = req->hdev;
1365
1366         /* If we're advertising or initiating an LE connection we can't
1367          * go ahead and change the random address at this time. This is
1368          * because the eventual initiator address used for the
1369          * subsequently created connection will be undefined (some
1370          * controllers use the new address and others the one we had
1371          * when the operation started).
1372          *
1373          * In this kind of scenario skip the update and let the random
1374          * address be updated at the next cycle.
1375          */
1376         if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1377             hci_lookup_le_connect(hdev)) {
1378                 BT_DBG("Deferring random address update");
1379                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1380                 return;
1381         }
1382
1383         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1384 }
1385
1386 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1387                               bool use_rpa, u8 *own_addr_type)
1388 {
1389         struct hci_dev *hdev = req->hdev;
1390         int err;
1391
1392         /* If privacy is enabled use a resolvable private address. If
1393          * current RPA has expired or there is something else than
1394          * the current RPA in use, then generate a new one.
1395          */
1396         if (use_rpa) {
1397                 int to;
1398
1399                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1400
1401                 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1402                     !bacmp(&hdev->random_addr, &hdev->rpa))
1403                         return 0;
1404
1405                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1406                 if (err < 0) {
1407                         BT_ERR("%s failed to generate new RPA", hdev->name);
1408                         return err;
1409                 }
1410
1411                 set_random_addr(req, &hdev->rpa);
1412
1413                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1414                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1415
1416                 return 0;
1417         }
1418
1419         /* In case of required privacy without resolvable private address,
1420          * use an non-resolvable private address. This is useful for active
1421          * scanning and non-connectable advertising.
1422          */
1423         if (require_privacy) {
1424                 bdaddr_t nrpa;
1425
1426                 while (true) {
1427                         /* The non-resolvable private address is generated
1428                          * from random six bytes with the two most significant
1429                          * bits cleared.
1430                          */
1431                         get_random_bytes(&nrpa, 6);
1432                         nrpa.b[5] &= 0x3f;
1433
1434                         /* The non-resolvable private address shall not be
1435                          * equal to the public address.
1436                          */
1437                         if (bacmp(&hdev->bdaddr, &nrpa))
1438                                 break;
1439                 }
1440
1441                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1442                 set_random_addr(req, &nrpa);
1443                 return 0;
1444         }
1445
1446         /* If forcing static address is in use or there is no public
1447          * address use the static address as random address (but skip
1448          * the HCI command if the current random address is already the
1449          * static one.
1450          *
1451          * In case BR/EDR has been disabled on a dual-mode controller
1452          * and a static address has been configured, then use that
1453          * address instead of the public BR/EDR address.
1454          */
1455         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1456             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1457             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1458              bacmp(&hdev->static_addr, BDADDR_ANY))) {
1459                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1460                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1461                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1462                                     &hdev->static_addr);
1463                 return 0;
1464         }
1465
1466         /* Neither privacy nor static address is being used so use a
1467          * public address.
1468          */
1469         *own_addr_type = ADDR_LE_DEV_PUBLIC;
1470
1471         return 0;
1472 }
1473
1474 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1475 {
1476         struct bdaddr_list *b;
1477
1478         list_for_each_entry(b, &hdev->whitelist, list) {
1479                 struct hci_conn *conn;
1480
1481                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1482                 if (!conn)
1483                         return true;
1484
1485                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1486                         return true;
1487         }
1488
1489         return false;
1490 }
1491
1492 void __hci_req_update_scan(struct hci_request *req)
1493 {
1494         struct hci_dev *hdev = req->hdev;
1495         u8 scan;
1496
1497         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1498                 return;
1499
1500         if (!hdev_is_powered(hdev))
1501                 return;
1502
1503         if (mgmt_powering_down(hdev))
1504                 return;
1505
1506         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1507             disconnected_whitelist_entries(hdev))
1508                 scan = SCAN_PAGE;
1509         else
1510                 scan = SCAN_DISABLED;
1511
1512         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1513                 scan |= SCAN_INQUIRY;
1514
1515         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1516             test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1517                 return;
1518
1519         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1520 }
1521
1522 static int update_scan(struct hci_request *req, unsigned long opt)
1523 {
1524         hci_dev_lock(req->hdev);
1525         __hci_req_update_scan(req);
1526         hci_dev_unlock(req->hdev);
1527         return 0;
1528 }
1529
1530 static void scan_update_work(struct work_struct *work)
1531 {
1532         struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1533
1534         hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1535 }
1536
1537 static int connectable_update(struct hci_request *req, unsigned long opt)
1538 {
1539         struct hci_dev *hdev = req->hdev;
1540
1541         hci_dev_lock(hdev);
1542
1543         __hci_req_update_scan(req);
1544
1545         /* If BR/EDR is not enabled and we disable advertising as a
1546          * by-product of disabling connectable, we need to update the
1547          * advertising flags.
1548          */
1549         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1550                 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
1551
1552         /* Update the advertising parameters if necessary */
1553         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1554             !list_empty(&hdev->adv_instances))
1555                 __hci_req_enable_advertising(req);
1556
1557         __hci_update_background_scan(req);
1558
1559         hci_dev_unlock(hdev);
1560
1561         return 0;
1562 }
1563
1564 static void connectable_update_work(struct work_struct *work)
1565 {
1566         struct hci_dev *hdev = container_of(work, struct hci_dev,
1567                                             connectable_update);
1568         u8 status;
1569
1570         hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1571         mgmt_set_connectable_complete(hdev, status);
1572 }
1573
1574 static u8 get_service_classes(struct hci_dev *hdev)
1575 {
1576         struct bt_uuid *uuid;
1577         u8 val = 0;
1578
1579         list_for_each_entry(uuid, &hdev->uuids, list)
1580                 val |= uuid->svc_hint;
1581
1582         return val;
1583 }
1584
1585 void __hci_req_update_class(struct hci_request *req)
1586 {
1587         struct hci_dev *hdev = req->hdev;
1588         u8 cod[3];
1589
1590         BT_DBG("%s", hdev->name);
1591
1592         if (!hdev_is_powered(hdev))
1593                 return;
1594
1595         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1596                 return;
1597
1598         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1599                 return;
1600
1601         cod[0] = hdev->minor_class;
1602         cod[1] = hdev->major_class;
1603         cod[2] = get_service_classes(hdev);
1604
1605         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1606                 cod[1] |= 0x20;
1607
1608         if (memcmp(cod, hdev->dev_class, 3) == 0)
1609                 return;
1610
1611         hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1612 }
1613
1614 static void write_iac(struct hci_request *req)
1615 {
1616         struct hci_dev *hdev = req->hdev;
1617         struct hci_cp_write_current_iac_lap cp;
1618
1619         if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1620                 return;
1621
1622         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1623                 /* Limited discoverable mode */
1624                 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1625                 cp.iac_lap[0] = 0x00;   /* LIAC */
1626                 cp.iac_lap[1] = 0x8b;
1627                 cp.iac_lap[2] = 0x9e;
1628                 cp.iac_lap[3] = 0x33;   /* GIAC */
1629                 cp.iac_lap[4] = 0x8b;
1630                 cp.iac_lap[5] = 0x9e;
1631         } else {
1632                 /* General discoverable mode */
1633                 cp.num_iac = 1;
1634                 cp.iac_lap[0] = 0x33;   /* GIAC */
1635                 cp.iac_lap[1] = 0x8b;
1636                 cp.iac_lap[2] = 0x9e;
1637         }
1638
1639         hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1640                     (cp.num_iac * 3) + 1, &cp);
1641 }
1642
1643 static int discoverable_update(struct hci_request *req, unsigned long opt)
1644 {
1645         struct hci_dev *hdev = req->hdev;
1646
1647         hci_dev_lock(hdev);
1648
1649         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1650                 write_iac(req);
1651                 __hci_req_update_scan(req);
1652                 __hci_req_update_class(req);
1653         }
1654
1655         /* Advertising instances don't use the global discoverable setting, so
1656          * only update AD if advertising was enabled using Set Advertising.
1657          */
1658         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1659                 __hci_req_update_adv_data(req, 0x00);
1660
1661                 /* Discoverable mode affects the local advertising
1662                  * address in limited privacy mode.
1663                  */
1664                 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1665                         __hci_req_enable_advertising(req);
1666         }
1667
1668         hci_dev_unlock(hdev);
1669
1670         return 0;
1671 }
1672
1673 static void discoverable_update_work(struct work_struct *work)
1674 {
1675         struct hci_dev *hdev = container_of(work, struct hci_dev,
1676                                             discoverable_update);
1677         u8 status;
1678
1679         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1680         mgmt_set_discoverable_complete(hdev, status);
1681 }
1682
1683 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1684                       u8 reason)
1685 {
1686         switch (conn->state) {
1687         case BT_CONNECTED:
1688         case BT_CONFIG:
1689                 if (conn->type == AMP_LINK) {
1690                         struct hci_cp_disconn_phy_link cp;
1691
1692                         cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1693                         cp.reason = reason;
1694                         hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1695                                     &cp);
1696                 } else {
1697                         struct hci_cp_disconnect dc;
1698
1699                         dc.handle = cpu_to_le16(conn->handle);
1700                         dc.reason = reason;
1701                         hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1702                 }
1703
1704                 conn->state = BT_DISCONN;
1705
1706                 break;
1707         case BT_CONNECT:
1708                 if (conn->type == LE_LINK) {
1709                         if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1710                                 break;
1711                         hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1712                                     0, NULL);
1713                 } else if (conn->type == ACL_LINK) {
1714                         if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1715                                 break;
1716                         hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1717                                     6, &conn->dst);
1718                 }
1719                 break;
1720         case BT_CONNECT2:
1721                 if (conn->type == ACL_LINK) {
1722                         struct hci_cp_reject_conn_req rej;
1723
1724                         bacpy(&rej.bdaddr, &conn->dst);
1725                         rej.reason = reason;
1726
1727                         hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1728                                     sizeof(rej), &rej);
1729                 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1730                         struct hci_cp_reject_sync_conn_req rej;
1731
1732                         bacpy(&rej.bdaddr, &conn->dst);
1733
1734                         /* SCO rejection has its own limited set of
1735                          * allowed error values (0x0D-0x0F) which isn't
1736                          * compatible with most values passed to this
1737                          * function. To be safe hard-code one of the
1738                          * values that's suitable for SCO.
1739                          */
1740                         rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
1741
1742                         hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1743                                     sizeof(rej), &rej);
1744                 }
1745                 break;
1746         default:
1747                 conn->state = BT_CLOSED;
1748                 break;
1749         }
1750 }
1751
1752 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1753 {
1754         if (status)
1755                 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1756 }
1757
1758 int hci_abort_conn(struct hci_conn *conn, u8 reason)
1759 {
1760         struct hci_request req;
1761         int err;
1762
1763         hci_req_init(&req, conn->hdev);
1764
1765         __hci_abort_conn(&req, conn, reason);
1766
1767         err = hci_req_run(&req, abort_conn_complete);
1768         if (err && err != -ENODATA) {
1769                 BT_ERR("Failed to run HCI request: err %d", err);
1770                 return err;
1771         }
1772
1773         return 0;
1774 }
1775
1776 static int update_bg_scan(struct hci_request *req, unsigned long opt)
1777 {
1778         hci_dev_lock(req->hdev);
1779         __hci_update_background_scan(req);
1780         hci_dev_unlock(req->hdev);
1781         return 0;
1782 }
1783
1784 static void bg_scan_update(struct work_struct *work)
1785 {
1786         struct hci_dev *hdev = container_of(work, struct hci_dev,
1787                                             bg_scan_update);
1788         struct hci_conn *conn;
1789         u8 status;
1790         int err;
1791
1792         err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1793         if (!err)
1794                 return;
1795
1796         hci_dev_lock(hdev);
1797
1798         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1799         if (conn)
1800                 hci_le_conn_failed(conn, status);
1801
1802         hci_dev_unlock(hdev);
1803 }
1804
1805 static int le_scan_disable(struct hci_request *req, unsigned long opt)
1806 {
1807         hci_req_add_le_scan_disable(req);
1808         return 0;
1809 }
1810
1811 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1812 {
1813         u8 length = opt;
1814         const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1815         const u8 liac[3] = { 0x00, 0x8b, 0x9e };
1816         struct hci_cp_inquiry cp;
1817
1818         BT_DBG("%s", req->hdev->name);
1819
1820         hci_dev_lock(req->hdev);
1821         hci_inquiry_cache_flush(req->hdev);
1822         hci_dev_unlock(req->hdev);
1823
1824         memset(&cp, 0, sizeof(cp));
1825
1826         if (req->hdev->discovery.limited)
1827                 memcpy(&cp.lap, liac, sizeof(cp.lap));
1828         else
1829                 memcpy(&cp.lap, giac, sizeof(cp.lap));
1830
1831         cp.length = length;
1832
1833         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1834
1835         return 0;
1836 }
1837
1838 static void le_scan_disable_work(struct work_struct *work)
1839 {
1840         struct hci_dev *hdev = container_of(work, struct hci_dev,
1841                                             le_scan_disable.work);
1842         u8 status;
1843
1844         BT_DBG("%s", hdev->name);
1845
1846         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1847                 return;
1848
1849         cancel_delayed_work(&hdev->le_scan_restart);
1850
1851         hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1852         if (status) {
1853                 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
1854                 return;
1855         }
1856
1857         hdev->discovery.scan_start = 0;
1858
1859         /* If we were running LE only scan, change discovery state. If
1860          * we were running both LE and BR/EDR inquiry simultaneously,
1861          * and BR/EDR inquiry is already finished, stop discovery,
1862          * otherwise BR/EDR inquiry will stop discovery when finished.
1863          * If we will resolve remote device name, do not change
1864          * discovery state.
1865          */
1866
1867         if (hdev->discovery.type == DISCOV_TYPE_LE)
1868                 goto discov_stopped;
1869
1870         if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1871                 return;
1872
1873         if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1874                 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1875                     hdev->discovery.state != DISCOVERY_RESOLVING)
1876                         goto discov_stopped;
1877
1878                 return;
1879         }
1880
1881         hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1882                      HCI_CMD_TIMEOUT, &status);
1883         if (status) {
1884                 BT_ERR("Inquiry failed: status 0x%02x", status);
1885                 goto discov_stopped;
1886         }
1887
1888         return;
1889
1890 discov_stopped:
1891         hci_dev_lock(hdev);
1892         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1893         hci_dev_unlock(hdev);
1894 }
1895
1896 static int le_scan_restart(struct hci_request *req, unsigned long opt)
1897 {
1898         struct hci_dev *hdev = req->hdev;
1899         struct hci_cp_le_set_scan_enable cp;
1900
1901         /* If controller is not scanning we are done. */
1902         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1903                 return 0;
1904
1905         hci_req_add_le_scan_disable(req);
1906
1907         memset(&cp, 0, sizeof(cp));
1908         cp.enable = LE_SCAN_ENABLE;
1909         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1910         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1911
1912         return 0;
1913 }
1914
1915 static void le_scan_restart_work(struct work_struct *work)
1916 {
1917         struct hci_dev *hdev = container_of(work, struct hci_dev,
1918                                             le_scan_restart.work);
1919         unsigned long timeout, duration, scan_start, now;
1920         u8 status;
1921
1922         BT_DBG("%s", hdev->name);
1923
1924         hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
1925         if (status) {
1926                 BT_ERR("Failed to restart LE scan: status %d", status);
1927                 return;
1928         }
1929
1930         hci_dev_lock(hdev);
1931
1932         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1933             !hdev->discovery.scan_start)
1934                 goto unlock;
1935
1936         /* When the scan was started, hdev->le_scan_disable has been queued
1937          * after duration from scan_start. During scan restart this job
1938          * has been canceled, and we need to queue it again after proper
1939          * timeout, to make sure that scan does not run indefinitely.
1940          */
1941         duration = hdev->discovery.scan_duration;
1942         scan_start = hdev->discovery.scan_start;
1943         now = jiffies;
1944         if (now - scan_start <= duration) {
1945                 int elapsed;
1946
1947                 if (now >= scan_start)
1948                         elapsed = now - scan_start;
1949                 else
1950                         elapsed = ULONG_MAX - scan_start + now;
1951
1952                 timeout = duration - elapsed;
1953         } else {
1954                 timeout = 0;
1955         }
1956
1957         queue_delayed_work(hdev->req_workqueue,
1958                            &hdev->le_scan_disable, timeout);
1959
1960 unlock:
1961         hci_dev_unlock(hdev);
1962 }
1963
1964 static void disable_advertising(struct hci_request *req)
1965 {
1966         u8 enable = 0x00;
1967
1968         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1969 }
1970
1971 static int active_scan(struct hci_request *req, unsigned long opt)
1972 {
1973         uint16_t interval = opt;
1974         struct hci_dev *hdev = req->hdev;
1975         struct hci_cp_le_set_scan_param param_cp;
1976         struct hci_cp_le_set_scan_enable enable_cp;
1977         u8 own_addr_type;
1978         int err;
1979
1980         BT_DBG("%s", hdev->name);
1981
1982         if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
1983                 hci_dev_lock(hdev);
1984
1985                 /* Don't let discovery abort an outgoing connection attempt
1986                  * that's using directed advertising.
1987                  */
1988                 if (hci_lookup_le_connect(hdev)) {
1989                         hci_dev_unlock(hdev);
1990                         return -EBUSY;
1991                 }
1992
1993                 cancel_adv_timeout(hdev);
1994                 hci_dev_unlock(hdev);
1995
1996                 disable_advertising(req);
1997         }
1998
1999         /* If controller is scanning, it means the background scanning is
2000          * running. Thus, we should temporarily stop it in order to set the
2001          * discovery scanning parameters.
2002          */
2003         if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2004                 hci_req_add_le_scan_disable(req);
2005
2006         /* All active scans will be done with either a resolvable private
2007          * address (when privacy feature has been enabled) or non-resolvable
2008          * private address.
2009          */
2010         err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2011                                         &own_addr_type);
2012         if (err < 0)
2013                 own_addr_type = ADDR_LE_DEV_PUBLIC;
2014
2015         memset(&param_cp, 0, sizeof(param_cp));
2016         param_cp.type = LE_SCAN_ACTIVE;
2017         param_cp.interval = cpu_to_le16(interval);
2018         param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
2019         param_cp.own_address_type = own_addr_type;
2020
2021         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
2022                     &param_cp);
2023
2024         memset(&enable_cp, 0, sizeof(enable_cp));
2025         enable_cp.enable = LE_SCAN_ENABLE;
2026         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2027
2028         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2029                     &enable_cp);
2030
2031         return 0;
2032 }
2033
2034 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2035 {
2036         int err;
2037
2038         BT_DBG("%s", req->hdev->name);
2039
2040         err = active_scan(req, opt);
2041         if (err)
2042                 return err;
2043
2044         return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2045 }
2046
2047 static void start_discovery(struct hci_dev *hdev, u8 *status)
2048 {
2049         unsigned long timeout;
2050
2051         BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2052
2053         switch (hdev->discovery.type) {
2054         case DISCOV_TYPE_BREDR:
2055                 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2056                         hci_req_sync(hdev, bredr_inquiry,
2057                                      DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2058                                      status);
2059                 return;
2060         case DISCOV_TYPE_INTERLEAVED:
2061                 /* When running simultaneous discovery, the LE scanning time
2062                  * should occupy the whole discovery time sine BR/EDR inquiry
2063                  * and LE scanning are scheduled by the controller.
2064                  *
2065                  * For interleaving discovery in comparison, BR/EDR inquiry
2066                  * and LE scanning are done sequentially with separate
2067                  * timeouts.
2068                  */
2069                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2070                              &hdev->quirks)) {
2071                         timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2072                         /* During simultaneous discovery, we double LE scan
2073                          * interval. We must leave some time for the controller
2074                          * to do BR/EDR inquiry.
2075                          */
2076                         hci_req_sync(hdev, interleaved_discov,
2077                                      DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2078                                      status);
2079                         break;
2080                 }
2081
2082                 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2083                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2084                              HCI_CMD_TIMEOUT, status);
2085                 break;
2086         case DISCOV_TYPE_LE:
2087                 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2088                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2089                              HCI_CMD_TIMEOUT, status);
2090                 break;
2091         default:
2092                 *status = HCI_ERROR_UNSPECIFIED;
2093                 return;
2094         }
2095
2096         if (*status)
2097                 return;
2098
2099         BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2100
2101         /* When service discovery is used and the controller has a
2102          * strict duplicate filter, it is important to remember the
2103          * start and duration of the scan. This is required for
2104          * restarting scanning during the discovery phase.
2105          */
2106         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2107                      hdev->discovery.result_filtering) {
2108                 hdev->discovery.scan_start = jiffies;
2109                 hdev->discovery.scan_duration = timeout;
2110         }
2111
2112         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2113                            timeout);
2114 }
2115
2116 bool hci_req_stop_discovery(struct hci_request *req)
2117 {
2118         struct hci_dev *hdev = req->hdev;
2119         struct discovery_state *d = &hdev->discovery;
2120         struct hci_cp_remote_name_req_cancel cp;
2121         struct inquiry_entry *e;
2122         bool ret = false;
2123
2124         BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2125
2126         if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2127                 if (test_bit(HCI_INQUIRY, &hdev->flags))
2128                         hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2129
2130                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2131                         cancel_delayed_work(&hdev->le_scan_disable);
2132                         hci_req_add_le_scan_disable(req);
2133                 }
2134
2135                 ret = true;
2136         } else {
2137                 /* Passive scanning */
2138                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2139                         hci_req_add_le_scan_disable(req);
2140                         ret = true;
2141                 }
2142         }
2143
2144         /* No further actions needed for LE-only discovery */
2145         if (d->type == DISCOV_TYPE_LE)
2146                 return ret;
2147
2148         if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2149                 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2150                                                      NAME_PENDING);
2151                 if (!e)
2152                         return ret;
2153
2154                 bacpy(&cp.bdaddr, &e->data.bdaddr);
2155                 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2156                             &cp);
2157                 ret = true;
2158         }
2159
2160         return ret;
2161 }
2162
2163 static int stop_discovery(struct hci_request *req, unsigned long opt)
2164 {
2165         hci_dev_lock(req->hdev);
2166         hci_req_stop_discovery(req);
2167         hci_dev_unlock(req->hdev);
2168
2169         return 0;
2170 }
2171
2172 static void discov_update(struct work_struct *work)
2173 {
2174         struct hci_dev *hdev = container_of(work, struct hci_dev,
2175                                             discov_update);
2176         u8 status = 0;
2177
2178         switch (hdev->discovery.state) {
2179         case DISCOVERY_STARTING:
2180                 start_discovery(hdev, &status);
2181                 mgmt_start_discovery_complete(hdev, status);
2182                 if (status)
2183                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2184                 else
2185                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2186                 break;
2187         case DISCOVERY_STOPPING:
2188                 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2189                 mgmt_stop_discovery_complete(hdev, status);
2190                 if (!status)
2191                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2192                 break;
2193         case DISCOVERY_STOPPED:
2194         default:
2195                 return;
2196         }
2197 }
2198
2199 static void discov_off(struct work_struct *work)
2200 {
2201         struct hci_dev *hdev = container_of(work, struct hci_dev,
2202                                             discov_off.work);
2203
2204         BT_DBG("%s", hdev->name);
2205
2206         hci_dev_lock(hdev);
2207
2208         /* When discoverable timeout triggers, then just make sure
2209          * the limited discoverable flag is cleared. Even in the case
2210          * of a timeout triggered from general discoverable, it is
2211          * safe to unconditionally clear the flag.
2212          */
2213         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2214         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2215         hdev->discov_timeout = 0;
2216
2217         hci_dev_unlock(hdev);
2218
2219         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2220         mgmt_new_settings(hdev);
2221 }
2222
2223 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2224 {
2225         struct hci_dev *hdev = req->hdev;
2226         u8 link_sec;
2227
2228         hci_dev_lock(hdev);
2229
2230         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2231             !lmp_host_ssp_capable(hdev)) {
2232                 u8 mode = 0x01;
2233
2234                 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2235
2236                 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2237                         u8 support = 0x01;
2238
2239                         hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2240                                     sizeof(support), &support);
2241                 }
2242         }
2243
2244         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2245             lmp_bredr_capable(hdev)) {
2246                 struct hci_cp_write_le_host_supported cp;
2247
2248                 cp.le = 0x01;
2249                 cp.simul = 0x00;
2250
2251                 /* Check first if we already have the right
2252                  * host state (host features set)
2253                  */
2254                 if (cp.le != lmp_host_le_capable(hdev) ||
2255                     cp.simul != lmp_host_le_br_capable(hdev))
2256                         hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2257                                     sizeof(cp), &cp);
2258         }
2259
2260         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2261                 /* Make sure the controller has a good default for
2262                  * advertising data. This also applies to the case
2263                  * where BR/EDR was toggled during the AUTO_OFF phase.
2264                  */
2265                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2266                     list_empty(&hdev->adv_instances)) {
2267                         __hci_req_update_adv_data(req, 0x00);
2268                         __hci_req_update_scan_rsp_data(req, 0x00);
2269
2270                         if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2271                                 __hci_req_enable_advertising(req);
2272                 } else if (!list_empty(&hdev->adv_instances)) {
2273                         struct adv_info *adv_instance;
2274
2275                         adv_instance = list_first_entry(&hdev->adv_instances,
2276                                                         struct adv_info, list);
2277                         __hci_req_schedule_adv_instance(req,
2278                                                         adv_instance->instance,
2279                                                         true);
2280                 }
2281         }
2282
2283         link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2284         if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2285                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2286                             sizeof(link_sec), &link_sec);
2287
2288         if (lmp_bredr_capable(hdev)) {
2289                 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2290                         __hci_req_write_fast_connectable(req, true);
2291                 else
2292                         __hci_req_write_fast_connectable(req, false);
2293                 __hci_req_update_scan(req);
2294                 __hci_req_update_class(req);
2295                 __hci_req_update_name(req);
2296                 __hci_req_update_eir(req);
2297         }
2298
2299         hci_dev_unlock(hdev);
2300         return 0;
2301 }
2302
2303 int __hci_req_hci_power_on(struct hci_dev *hdev)
2304 {
2305         /* Register the available SMP channels (BR/EDR and LE) only when
2306          * successfully powering on the controller. This late
2307          * registration is required so that LE SMP can clearly decide if
2308          * the public address or static address is used.
2309          */
2310         smp_register(hdev);
2311
2312         return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2313                               NULL);
2314 }
2315
2316 void hci_request_setup(struct hci_dev *hdev)
2317 {
2318         INIT_WORK(&hdev->discov_update, discov_update);
2319         INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2320         INIT_WORK(&hdev->scan_update, scan_update_work);
2321         INIT_WORK(&hdev->connectable_update, connectable_update_work);
2322         INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2323         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2324         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2325         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2326         INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2327 }
2328
2329 void hci_request_cancel_all(struct hci_dev *hdev)
2330 {
2331         hci_req_sync_cancel(hdev, ENODEV);
2332
2333         cancel_work_sync(&hdev->discov_update);
2334         cancel_work_sync(&hdev->bg_scan_update);
2335         cancel_work_sync(&hdev->scan_update);
2336         cancel_work_sync(&hdev->connectable_update);
2337         cancel_work_sync(&hdev->discoverable_update);
2338         cancel_delayed_work_sync(&hdev->discov_off);
2339         cancel_delayed_work_sync(&hdev->le_scan_disable);
2340         cancel_delayed_work_sync(&hdev->le_scan_restart);
2341
2342         if (hdev->adv_instance_timeout) {
2343                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2344                 hdev->adv_instance_timeout = 0;
2345         }
2346 }