Merge remote-tracking branches 'spi/fix/dt', 'spi/fix/fsl-dspi' and 'spi/fix/fsl...
[cascardo/linux.git] / net / bluetooth / hci_request.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2014 Intel Corporation
5
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <net/bluetooth/bluetooth.h>
25 #include <net/bluetooth/hci_core.h>
26 #include <net/bluetooth/mgmt.h>
27
28 #include "smp.h"
29 #include "hci_request.h"
30
31 #define HCI_REQ_DONE      0
32 #define HCI_REQ_PEND      1
33 #define HCI_REQ_CANCELED  2
34
35 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
36 {
37         skb_queue_head_init(&req->cmd_q);
38         req->hdev = hdev;
39         req->err = 0;
40 }
41
42 static int req_run(struct hci_request *req, hci_req_complete_t complete,
43                    hci_req_complete_skb_t complete_skb)
44 {
45         struct hci_dev *hdev = req->hdev;
46         struct sk_buff *skb;
47         unsigned long flags;
48
49         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
50
51         /* If an error occurred during request building, remove all HCI
52          * commands queued on the HCI request queue.
53          */
54         if (req->err) {
55                 skb_queue_purge(&req->cmd_q);
56                 return req->err;
57         }
58
59         /* Do not allow empty requests */
60         if (skb_queue_empty(&req->cmd_q))
61                 return -ENODATA;
62
63         skb = skb_peek_tail(&req->cmd_q);
64         if (complete) {
65                 bt_cb(skb)->hci.req_complete = complete;
66         } else if (complete_skb) {
67                 bt_cb(skb)->hci.req_complete_skb = complete_skb;
68                 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
69         }
70
71         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
72         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
73         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
74
75         queue_work(hdev->workqueue, &hdev->cmd_work);
76
77         return 0;
78 }
79
80 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
81 {
82         return req_run(req, complete, NULL);
83 }
84
85 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
86 {
87         return req_run(req, NULL, complete);
88 }
89
90 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
91                                   struct sk_buff *skb)
92 {
93         BT_DBG("%s result 0x%2.2x", hdev->name, result);
94
95         if (hdev->req_status == HCI_REQ_PEND) {
96                 hdev->req_result = result;
97                 hdev->req_status = HCI_REQ_DONE;
98                 if (skb)
99                         hdev->req_skb = skb_get(skb);
100                 wake_up_interruptible(&hdev->req_wait_q);
101         }
102 }
103
104 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
105 {
106         BT_DBG("%s err 0x%2.2x", hdev->name, err);
107
108         if (hdev->req_status == HCI_REQ_PEND) {
109                 hdev->req_result = err;
110                 hdev->req_status = HCI_REQ_CANCELED;
111                 wake_up_interruptible(&hdev->req_wait_q);
112         }
113 }
114
115 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
116                                   const void *param, u8 event, u32 timeout)
117 {
118         DECLARE_WAITQUEUE(wait, current);
119         struct hci_request req;
120         struct sk_buff *skb;
121         int err = 0;
122
123         BT_DBG("%s", hdev->name);
124
125         hci_req_init(&req, hdev);
126
127         hci_req_add_ev(&req, opcode, plen, param, event);
128
129         hdev->req_status = HCI_REQ_PEND;
130
131         add_wait_queue(&hdev->req_wait_q, &wait);
132         set_current_state(TASK_INTERRUPTIBLE);
133
134         err = hci_req_run_skb(&req, hci_req_sync_complete);
135         if (err < 0) {
136                 remove_wait_queue(&hdev->req_wait_q, &wait);
137                 set_current_state(TASK_RUNNING);
138                 return ERR_PTR(err);
139         }
140
141         schedule_timeout(timeout);
142
143         remove_wait_queue(&hdev->req_wait_q, &wait);
144
145         if (signal_pending(current))
146                 return ERR_PTR(-EINTR);
147
148         switch (hdev->req_status) {
149         case HCI_REQ_DONE:
150                 err = -bt_to_errno(hdev->req_result);
151                 break;
152
153         case HCI_REQ_CANCELED:
154                 err = -hdev->req_result;
155                 break;
156
157         default:
158                 err = -ETIMEDOUT;
159                 break;
160         }
161
162         hdev->req_status = hdev->req_result = 0;
163         skb = hdev->req_skb;
164         hdev->req_skb = NULL;
165
166         BT_DBG("%s end: err %d", hdev->name, err);
167
168         if (err < 0) {
169                 kfree_skb(skb);
170                 return ERR_PTR(err);
171         }
172
173         if (!skb)
174                 return ERR_PTR(-ENODATA);
175
176         return skb;
177 }
178 EXPORT_SYMBOL(__hci_cmd_sync_ev);
179
180 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
181                                const void *param, u32 timeout)
182 {
183         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
184 }
185 EXPORT_SYMBOL(__hci_cmd_sync);
186
187 /* Execute request and wait for completion. */
188 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
189                                                      unsigned long opt),
190                    unsigned long opt, u32 timeout, u8 *hci_status)
191 {
192         struct hci_request req;
193         DECLARE_WAITQUEUE(wait, current);
194         int err = 0;
195
196         BT_DBG("%s start", hdev->name);
197
198         hci_req_init(&req, hdev);
199
200         hdev->req_status = HCI_REQ_PEND;
201
202         err = func(&req, opt);
203         if (err) {
204                 if (hci_status)
205                         *hci_status = HCI_ERROR_UNSPECIFIED;
206                 return err;
207         }
208
209         add_wait_queue(&hdev->req_wait_q, &wait);
210         set_current_state(TASK_INTERRUPTIBLE);
211
212         err = hci_req_run_skb(&req, hci_req_sync_complete);
213         if (err < 0) {
214                 hdev->req_status = 0;
215
216                 remove_wait_queue(&hdev->req_wait_q, &wait);
217                 set_current_state(TASK_RUNNING);
218
219                 /* ENODATA means the HCI request command queue is empty.
220                  * This can happen when a request with conditionals doesn't
221                  * trigger any commands to be sent. This is normal behavior
222                  * and should not trigger an error return.
223                  */
224                 if (err == -ENODATA) {
225                         if (hci_status)
226                                 *hci_status = 0;
227                         return 0;
228                 }
229
230                 if (hci_status)
231                         *hci_status = HCI_ERROR_UNSPECIFIED;
232
233                 return err;
234         }
235
236         schedule_timeout(timeout);
237
238         remove_wait_queue(&hdev->req_wait_q, &wait);
239
240         if (signal_pending(current))
241                 return -EINTR;
242
243         switch (hdev->req_status) {
244         case HCI_REQ_DONE:
245                 err = -bt_to_errno(hdev->req_result);
246                 if (hci_status)
247                         *hci_status = hdev->req_result;
248                 break;
249
250         case HCI_REQ_CANCELED:
251                 err = -hdev->req_result;
252                 if (hci_status)
253                         *hci_status = HCI_ERROR_UNSPECIFIED;
254                 break;
255
256         default:
257                 err = -ETIMEDOUT;
258                 if (hci_status)
259                         *hci_status = HCI_ERROR_UNSPECIFIED;
260                 break;
261         }
262
263         kfree_skb(hdev->req_skb);
264         hdev->req_skb = NULL;
265         hdev->req_status = hdev->req_result = 0;
266
267         BT_DBG("%s end: err %d", hdev->name, err);
268
269         return err;
270 }
271
272 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
273                                                   unsigned long opt),
274                  unsigned long opt, u32 timeout, u8 *hci_status)
275 {
276         int ret;
277
278         if (!test_bit(HCI_UP, &hdev->flags))
279                 return -ENETDOWN;
280
281         /* Serialize all requests */
282         hci_req_sync_lock(hdev);
283         ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
284         hci_req_sync_unlock(hdev);
285
286         return ret;
287 }
288
289 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
290                                 const void *param)
291 {
292         int len = HCI_COMMAND_HDR_SIZE + plen;
293         struct hci_command_hdr *hdr;
294         struct sk_buff *skb;
295
296         skb = bt_skb_alloc(len, GFP_ATOMIC);
297         if (!skb)
298                 return NULL;
299
300         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
301         hdr->opcode = cpu_to_le16(opcode);
302         hdr->plen   = plen;
303
304         if (plen)
305                 memcpy(skb_put(skb, plen), param, plen);
306
307         BT_DBG("skb len %d", skb->len);
308
309         hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
310         hci_skb_opcode(skb) = opcode;
311
312         return skb;
313 }
314
315 /* Queue a command to an asynchronous HCI request */
316 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
317                     const void *param, u8 event)
318 {
319         struct hci_dev *hdev = req->hdev;
320         struct sk_buff *skb;
321
322         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
323
324         /* If an error occurred during request building, there is no point in
325          * queueing the HCI command. We can simply return.
326          */
327         if (req->err)
328                 return;
329
330         skb = hci_prepare_cmd(hdev, opcode, plen, param);
331         if (!skb) {
332                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
333                        hdev->name, opcode);
334                 req->err = -ENOMEM;
335                 return;
336         }
337
338         if (skb_queue_empty(&req->cmd_q))
339                 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
340
341         bt_cb(skb)->hci.req_event = event;
342
343         skb_queue_tail(&req->cmd_q, skb);
344 }
345
346 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
347                  const void *param)
348 {
349         hci_req_add_ev(req, opcode, plen, param, 0);
350 }
351
352 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
353 {
354         struct hci_dev *hdev = req->hdev;
355         struct hci_cp_write_page_scan_activity acp;
356         u8 type;
357
358         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
359                 return;
360
361         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
362                 return;
363
364         if (enable) {
365                 type = PAGE_SCAN_TYPE_INTERLACED;
366
367                 /* 160 msec page scan interval */
368                 acp.interval = cpu_to_le16(0x0100);
369         } else {
370                 type = PAGE_SCAN_TYPE_STANDARD; /* default */
371
372                 /* default 1.28 sec page scan */
373                 acp.interval = cpu_to_le16(0x0800);
374         }
375
376         acp.window = cpu_to_le16(0x0012);
377
378         if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
379             __cpu_to_le16(hdev->page_scan_window) != acp.window)
380                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
381                             sizeof(acp), &acp);
382
383         if (hdev->page_scan_type != type)
384                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
385 }
386
387 /* This function controls the background scanning based on hdev->pend_le_conns
388  * list. If there are pending LE connection we start the background scanning,
389  * otherwise we stop it.
390  *
391  * This function requires the caller holds hdev->lock.
392  */
393 static void __hci_update_background_scan(struct hci_request *req)
394 {
395         struct hci_dev *hdev = req->hdev;
396
397         if (!test_bit(HCI_UP, &hdev->flags) ||
398             test_bit(HCI_INIT, &hdev->flags) ||
399             hci_dev_test_flag(hdev, HCI_SETUP) ||
400             hci_dev_test_flag(hdev, HCI_CONFIG) ||
401             hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
402             hci_dev_test_flag(hdev, HCI_UNREGISTER))
403                 return;
404
405         /* No point in doing scanning if LE support hasn't been enabled */
406         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
407                 return;
408
409         /* If discovery is active don't interfere with it */
410         if (hdev->discovery.state != DISCOVERY_STOPPED)
411                 return;
412
413         /* Reset RSSI and UUID filters when starting background scanning
414          * since these filters are meant for service discovery only.
415          *
416          * The Start Discovery and Start Service Discovery operations
417          * ensure to set proper values for RSSI threshold and UUID
418          * filter list. So it is safe to just reset them here.
419          */
420         hci_discovery_filter_clear(hdev);
421
422         if (list_empty(&hdev->pend_le_conns) &&
423             list_empty(&hdev->pend_le_reports)) {
424                 /* If there is no pending LE connections or devices
425                  * to be scanned for, we should stop the background
426                  * scanning.
427                  */
428
429                 /* If controller is not scanning we are done. */
430                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
431                         return;
432
433                 hci_req_add_le_scan_disable(req);
434
435                 BT_DBG("%s stopping background scanning", hdev->name);
436         } else {
437                 /* If there is at least one pending LE connection, we should
438                  * keep the background scan running.
439                  */
440
441                 /* If controller is connecting, we should not start scanning
442                  * since some controllers are not able to scan and connect at
443                  * the same time.
444                  */
445                 if (hci_lookup_le_connect(hdev))
446                         return;
447
448                 /* If controller is currently scanning, we stop it to ensure we
449                  * don't miss any advertising (due to duplicates filter).
450                  */
451                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
452                         hci_req_add_le_scan_disable(req);
453
454                 hci_req_add_le_passive_scan(req);
455
456                 BT_DBG("%s starting background scanning", hdev->name);
457         }
458 }
459
460 void __hci_req_update_name(struct hci_request *req)
461 {
462         struct hci_dev *hdev = req->hdev;
463         struct hci_cp_write_local_name cp;
464
465         memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
466
467         hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
468 }
469
470 #define PNP_INFO_SVCLASS_ID             0x1200
471
472 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
473 {
474         u8 *ptr = data, *uuids_start = NULL;
475         struct bt_uuid *uuid;
476
477         if (len < 4)
478                 return ptr;
479
480         list_for_each_entry(uuid, &hdev->uuids, list) {
481                 u16 uuid16;
482
483                 if (uuid->size != 16)
484                         continue;
485
486                 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
487                 if (uuid16 < 0x1100)
488                         continue;
489
490                 if (uuid16 == PNP_INFO_SVCLASS_ID)
491                         continue;
492
493                 if (!uuids_start) {
494                         uuids_start = ptr;
495                         uuids_start[0] = 1;
496                         uuids_start[1] = EIR_UUID16_ALL;
497                         ptr += 2;
498                 }
499
500                 /* Stop if not enough space to put next UUID */
501                 if ((ptr - data) + sizeof(u16) > len) {
502                         uuids_start[1] = EIR_UUID16_SOME;
503                         break;
504                 }
505
506                 *ptr++ = (uuid16 & 0x00ff);
507                 *ptr++ = (uuid16 & 0xff00) >> 8;
508                 uuids_start[0] += sizeof(uuid16);
509         }
510
511         return ptr;
512 }
513
514 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
515 {
516         u8 *ptr = data, *uuids_start = NULL;
517         struct bt_uuid *uuid;
518
519         if (len < 6)
520                 return ptr;
521
522         list_for_each_entry(uuid, &hdev->uuids, list) {
523                 if (uuid->size != 32)
524                         continue;
525
526                 if (!uuids_start) {
527                         uuids_start = ptr;
528                         uuids_start[0] = 1;
529                         uuids_start[1] = EIR_UUID32_ALL;
530                         ptr += 2;
531                 }
532
533                 /* Stop if not enough space to put next UUID */
534                 if ((ptr - data) + sizeof(u32) > len) {
535                         uuids_start[1] = EIR_UUID32_SOME;
536                         break;
537                 }
538
539                 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
540                 ptr += sizeof(u32);
541                 uuids_start[0] += sizeof(u32);
542         }
543
544         return ptr;
545 }
546
547 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
548 {
549         u8 *ptr = data, *uuids_start = NULL;
550         struct bt_uuid *uuid;
551
552         if (len < 18)
553                 return ptr;
554
555         list_for_each_entry(uuid, &hdev->uuids, list) {
556                 if (uuid->size != 128)
557                         continue;
558
559                 if (!uuids_start) {
560                         uuids_start = ptr;
561                         uuids_start[0] = 1;
562                         uuids_start[1] = EIR_UUID128_ALL;
563                         ptr += 2;
564                 }
565
566                 /* Stop if not enough space to put next UUID */
567                 if ((ptr - data) + 16 > len) {
568                         uuids_start[1] = EIR_UUID128_SOME;
569                         break;
570                 }
571
572                 memcpy(ptr, uuid->uuid, 16);
573                 ptr += 16;
574                 uuids_start[0] += 16;
575         }
576
577         return ptr;
578 }
579
580 static void create_eir(struct hci_dev *hdev, u8 *data)
581 {
582         u8 *ptr = data;
583         size_t name_len;
584
585         name_len = strlen(hdev->dev_name);
586
587         if (name_len > 0) {
588                 /* EIR Data type */
589                 if (name_len > 48) {
590                         name_len = 48;
591                         ptr[1] = EIR_NAME_SHORT;
592                 } else
593                         ptr[1] = EIR_NAME_COMPLETE;
594
595                 /* EIR Data length */
596                 ptr[0] = name_len + 1;
597
598                 memcpy(ptr + 2, hdev->dev_name, name_len);
599
600                 ptr += (name_len + 2);
601         }
602
603         if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
604                 ptr[0] = 2;
605                 ptr[1] = EIR_TX_POWER;
606                 ptr[2] = (u8) hdev->inq_tx_power;
607
608                 ptr += 3;
609         }
610
611         if (hdev->devid_source > 0) {
612                 ptr[0] = 9;
613                 ptr[1] = EIR_DEVICE_ID;
614
615                 put_unaligned_le16(hdev->devid_source, ptr + 2);
616                 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
617                 put_unaligned_le16(hdev->devid_product, ptr + 6);
618                 put_unaligned_le16(hdev->devid_version, ptr + 8);
619
620                 ptr += 10;
621         }
622
623         ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
624         ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
625         ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
626 }
627
628 void __hci_req_update_eir(struct hci_request *req)
629 {
630         struct hci_dev *hdev = req->hdev;
631         struct hci_cp_write_eir cp;
632
633         if (!hdev_is_powered(hdev))
634                 return;
635
636         if (!lmp_ext_inq_capable(hdev))
637                 return;
638
639         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
640                 return;
641
642         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
643                 return;
644
645         memset(&cp, 0, sizeof(cp));
646
647         create_eir(hdev, cp.data);
648
649         if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
650                 return;
651
652         memcpy(hdev->eir, cp.data, sizeof(cp.data));
653
654         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
655 }
656
657 void hci_req_add_le_scan_disable(struct hci_request *req)
658 {
659         struct hci_cp_le_set_scan_enable cp;
660
661         memset(&cp, 0, sizeof(cp));
662         cp.enable = LE_SCAN_DISABLE;
663         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
664 }
665
666 static void add_to_white_list(struct hci_request *req,
667                               struct hci_conn_params *params)
668 {
669         struct hci_cp_le_add_to_white_list cp;
670
671         cp.bdaddr_type = params->addr_type;
672         bacpy(&cp.bdaddr, &params->addr);
673
674         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
675 }
676
677 static u8 update_white_list(struct hci_request *req)
678 {
679         struct hci_dev *hdev = req->hdev;
680         struct hci_conn_params *params;
681         struct bdaddr_list *b;
682         uint8_t white_list_entries = 0;
683
684         /* Go through the current white list programmed into the
685          * controller one by one and check if that address is still
686          * in the list of pending connections or list of devices to
687          * report. If not present in either list, then queue the
688          * command to remove it from the controller.
689          */
690         list_for_each_entry(b, &hdev->le_white_list, list) {
691                 /* If the device is neither in pend_le_conns nor
692                  * pend_le_reports then remove it from the whitelist.
693                  */
694                 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
695                                                &b->bdaddr, b->bdaddr_type) &&
696                     !hci_pend_le_action_lookup(&hdev->pend_le_reports,
697                                                &b->bdaddr, b->bdaddr_type)) {
698                         struct hci_cp_le_del_from_white_list cp;
699
700                         cp.bdaddr_type = b->bdaddr_type;
701                         bacpy(&cp.bdaddr, &b->bdaddr);
702
703                         hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
704                                     sizeof(cp), &cp);
705                         continue;
706                 }
707
708                 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
709                         /* White list can not be used with RPAs */
710                         return 0x00;
711                 }
712
713                 white_list_entries++;
714         }
715
716         /* Since all no longer valid white list entries have been
717          * removed, walk through the list of pending connections
718          * and ensure that any new device gets programmed into
719          * the controller.
720          *
721          * If the list of the devices is larger than the list of
722          * available white list entries in the controller, then
723          * just abort and return filer policy value to not use the
724          * white list.
725          */
726         list_for_each_entry(params, &hdev->pend_le_conns, action) {
727                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
728                                            &params->addr, params->addr_type))
729                         continue;
730
731                 if (white_list_entries >= hdev->le_white_list_size) {
732                         /* Select filter policy to accept all advertising */
733                         return 0x00;
734                 }
735
736                 if (hci_find_irk_by_addr(hdev, &params->addr,
737                                          params->addr_type)) {
738                         /* White list can not be used with RPAs */
739                         return 0x00;
740                 }
741
742                 white_list_entries++;
743                 add_to_white_list(req, params);
744         }
745
746         /* After adding all new pending connections, walk through
747          * the list of pending reports and also add these to the
748          * white list if there is still space.
749          */
750         list_for_each_entry(params, &hdev->pend_le_reports, action) {
751                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
752                                            &params->addr, params->addr_type))
753                         continue;
754
755                 if (white_list_entries >= hdev->le_white_list_size) {
756                         /* Select filter policy to accept all advertising */
757                         return 0x00;
758                 }
759
760                 if (hci_find_irk_by_addr(hdev, &params->addr,
761                                          params->addr_type)) {
762                         /* White list can not be used with RPAs */
763                         return 0x00;
764                 }
765
766                 white_list_entries++;
767                 add_to_white_list(req, params);
768         }
769
770         /* Select filter policy to use white list */
771         return 0x01;
772 }
773
774 static bool scan_use_rpa(struct hci_dev *hdev)
775 {
776         return hci_dev_test_flag(hdev, HCI_PRIVACY);
777 }
778
779 void hci_req_add_le_passive_scan(struct hci_request *req)
780 {
781         struct hci_cp_le_set_scan_param param_cp;
782         struct hci_cp_le_set_scan_enable enable_cp;
783         struct hci_dev *hdev = req->hdev;
784         u8 own_addr_type;
785         u8 filter_policy;
786
787         /* Set require_privacy to false since no SCAN_REQ are send
788          * during passive scanning. Not using an non-resolvable address
789          * here is important so that peer devices using direct
790          * advertising with our address will be correctly reported
791          * by the controller.
792          */
793         if (hci_update_random_address(req, false, scan_use_rpa(hdev),
794                                       &own_addr_type))
795                 return;
796
797         /* Adding or removing entries from the white list must
798          * happen before enabling scanning. The controller does
799          * not allow white list modification while scanning.
800          */
801         filter_policy = update_white_list(req);
802
803         /* When the controller is using random resolvable addresses and
804          * with that having LE privacy enabled, then controllers with
805          * Extended Scanner Filter Policies support can now enable support
806          * for handling directed advertising.
807          *
808          * So instead of using filter polices 0x00 (no whitelist)
809          * and 0x01 (whitelist enabled) use the new filter policies
810          * 0x02 (no whitelist) and 0x03 (whitelist enabled).
811          */
812         if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
813             (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
814                 filter_policy |= 0x02;
815
816         memset(&param_cp, 0, sizeof(param_cp));
817         param_cp.type = LE_SCAN_PASSIVE;
818         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
819         param_cp.window = cpu_to_le16(hdev->le_scan_window);
820         param_cp.own_address_type = own_addr_type;
821         param_cp.filter_policy = filter_policy;
822         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
823                     &param_cp);
824
825         memset(&enable_cp, 0, sizeof(enable_cp));
826         enable_cp.enable = LE_SCAN_ENABLE;
827         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
828         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
829                     &enable_cp);
830 }
831
832 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
833 {
834         u8 instance = hdev->cur_adv_instance;
835         struct adv_info *adv_instance;
836
837         /* Ignore instance 0 */
838         if (instance == 0x00)
839                 return 0;
840
841         adv_instance = hci_find_adv_instance(hdev, instance);
842         if (!adv_instance)
843                 return 0;
844
845         /* TODO: Take into account the "appearance" and "local-name" flags here.
846          * These are currently being ignored as they are not supported.
847          */
848         return adv_instance->scan_rsp_len;
849 }
850
851 void __hci_req_disable_advertising(struct hci_request *req)
852 {
853         u8 enable = 0x00;
854
855         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
856 }
857
858 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
859 {
860         u32 flags;
861         struct adv_info *adv_instance;
862
863         if (instance == 0x00) {
864                 /* Instance 0 always manages the "Tx Power" and "Flags"
865                  * fields
866                  */
867                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
868
869                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
870                  * corresponds to the "connectable" instance flag.
871                  */
872                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
873                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
874
875                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
876                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
877                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
878                         flags |= MGMT_ADV_FLAG_DISCOV;
879
880                 return flags;
881         }
882
883         adv_instance = hci_find_adv_instance(hdev, instance);
884
885         /* Return 0 when we got an invalid instance identifier. */
886         if (!adv_instance)
887                 return 0;
888
889         return adv_instance->flags;
890 }
891
892 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
893 {
894         /* If privacy is not enabled don't use RPA */
895         if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
896                 return false;
897
898         /* If basic privacy mode is enabled use RPA */
899         if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
900                 return true;
901
902         /* If limited privacy mode is enabled don't use RPA if we're
903          * both discoverable and bondable.
904          */
905         if ((flags & MGMT_ADV_FLAG_DISCOV) &&
906             hci_dev_test_flag(hdev, HCI_BONDABLE))
907                 return false;
908
909         /* We're neither bondable nor discoverable in the limited
910          * privacy mode, therefore use RPA.
911          */
912         return true;
913 }
914
915 void __hci_req_enable_advertising(struct hci_request *req)
916 {
917         struct hci_dev *hdev = req->hdev;
918         struct hci_cp_le_set_adv_param cp;
919         u8 own_addr_type, enable = 0x01;
920         bool connectable;
921         u32 flags;
922
923         if (hci_conn_num(hdev, LE_LINK) > 0)
924                 return;
925
926         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
927                 __hci_req_disable_advertising(req);
928
929         /* Clear the HCI_LE_ADV bit temporarily so that the
930          * hci_update_random_address knows that it's safe to go ahead
931          * and write a new random address. The flag will be set back on
932          * as soon as the SET_ADV_ENABLE HCI command completes.
933          */
934         hci_dev_clear_flag(hdev, HCI_LE_ADV);
935
936         flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
937
938         /* If the "connectable" instance flag was not set, then choose between
939          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
940          */
941         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
942                       mgmt_get_connectable(hdev);
943
944         /* Set require_privacy to true only when non-connectable
945          * advertising is used. In that case it is fine to use a
946          * non-resolvable private address.
947          */
948         if (hci_update_random_address(req, !connectable,
949                                       adv_use_rpa(hdev, flags),
950                                       &own_addr_type) < 0)
951                 return;
952
953         memset(&cp, 0, sizeof(cp));
954         cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
955         cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
956
957         if (connectable)
958                 cp.type = LE_ADV_IND;
959         else if (get_cur_adv_instance_scan_rsp_len(hdev))
960                 cp.type = LE_ADV_SCAN_IND;
961         else
962                 cp.type = LE_ADV_NONCONN_IND;
963
964         cp.own_address_type = own_addr_type;
965         cp.channel_map = hdev->le_adv_channel_map;
966
967         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
968
969         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
970 }
971
972 static u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
973 {
974         size_t complete_len;
975         size_t short_len;
976         int max_len;
977
978         max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
979         complete_len = strlen(hdev->dev_name);
980         short_len = strlen(hdev->short_name);
981
982         /* no space left for name */
983         if (max_len < 1)
984                 return ad_len;
985
986         /* no name set */
987         if (!complete_len)
988                 return ad_len;
989
990         /* complete name fits and is eq to max short name len or smaller */
991         if (complete_len <= max_len &&
992             complete_len <= HCI_MAX_SHORT_NAME_LENGTH) {
993                 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
994                                        hdev->dev_name, complete_len);
995         }
996
997         /* short name set and fits */
998         if (short_len && short_len <= max_len) {
999                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1000                                        hdev->short_name, short_len);
1001         }
1002
1003         /* no short name set so shorten complete name */
1004         if (!short_len) {
1005                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1006                                        hdev->dev_name, max_len);
1007         }
1008
1009         return ad_len;
1010 }
1011
1012 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1013 {
1014         return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1015 }
1016
1017 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1018 {
1019         u8 scan_rsp_len = 0;
1020
1021         if (hdev->appearance) {
1022                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1023         }
1024
1025         return append_local_name(hdev, ptr, scan_rsp_len);
1026 }
1027
1028 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1029                                         u8 *ptr)
1030 {
1031         struct adv_info *adv_instance;
1032         u32 instance_flags;
1033         u8 scan_rsp_len = 0;
1034
1035         adv_instance = hci_find_adv_instance(hdev, instance);
1036         if (!adv_instance)
1037                 return 0;
1038
1039         instance_flags = adv_instance->flags;
1040
1041         if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1042                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1043         }
1044
1045         memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1046                adv_instance->scan_rsp_len);
1047
1048         scan_rsp_len += adv_instance->scan_rsp_len;
1049
1050         if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1051                 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1052
1053         return scan_rsp_len;
1054 }
1055
1056 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1057 {
1058         struct hci_dev *hdev = req->hdev;
1059         struct hci_cp_le_set_scan_rsp_data cp;
1060         u8 len;
1061
1062         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1063                 return;
1064
1065         memset(&cp, 0, sizeof(cp));
1066
1067         if (instance)
1068                 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
1069         else
1070                 len = create_default_scan_rsp_data(hdev, cp.data);
1071
1072         if (hdev->scan_rsp_data_len == len &&
1073             !memcmp(cp.data, hdev->scan_rsp_data, len))
1074                 return;
1075
1076         memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1077         hdev->scan_rsp_data_len = len;
1078
1079         cp.length = len;
1080
1081         hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1082 }
1083
1084 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1085 {
1086         struct adv_info *adv_instance = NULL;
1087         u8 ad_len = 0, flags = 0;
1088         u32 instance_flags;
1089
1090         /* Return 0 when the current instance identifier is invalid. */
1091         if (instance) {
1092                 adv_instance = hci_find_adv_instance(hdev, instance);
1093                 if (!adv_instance)
1094                         return 0;
1095         }
1096
1097         instance_flags = get_adv_instance_flags(hdev, instance);
1098
1099         /* The Add Advertising command allows userspace to set both the general
1100          * and limited discoverable flags.
1101          */
1102         if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1103                 flags |= LE_AD_GENERAL;
1104
1105         if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1106                 flags |= LE_AD_LIMITED;
1107
1108         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1109                 flags |= LE_AD_NO_BREDR;
1110
1111         if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1112                 /* If a discovery flag wasn't provided, simply use the global
1113                  * settings.
1114                  */
1115                 if (!flags)
1116                         flags |= mgmt_get_adv_discov_flags(hdev);
1117
1118                 /* If flags would still be empty, then there is no need to
1119                  * include the "Flags" AD field".
1120                  */
1121                 if (flags) {
1122                         ptr[0] = 0x02;
1123                         ptr[1] = EIR_FLAGS;
1124                         ptr[2] = flags;
1125
1126                         ad_len += 3;
1127                         ptr += 3;
1128                 }
1129         }
1130
1131         if (adv_instance) {
1132                 memcpy(ptr, adv_instance->adv_data,
1133                        adv_instance->adv_data_len);
1134                 ad_len += adv_instance->adv_data_len;
1135                 ptr += adv_instance->adv_data_len;
1136         }
1137
1138         /* Provide Tx Power only if we can provide a valid value for it */
1139         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1140             (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1141                 ptr[0] = 0x02;
1142                 ptr[1] = EIR_TX_POWER;
1143                 ptr[2] = (u8)hdev->adv_tx_power;
1144
1145                 ad_len += 3;
1146                 ptr += 3;
1147         }
1148
1149         return ad_len;
1150 }
1151
1152 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1153 {
1154         struct hci_dev *hdev = req->hdev;
1155         struct hci_cp_le_set_adv_data cp;
1156         u8 len;
1157
1158         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1159                 return;
1160
1161         memset(&cp, 0, sizeof(cp));
1162
1163         len = create_instance_adv_data(hdev, instance, cp.data);
1164
1165         /* There's nothing to do if the data hasn't changed */
1166         if (hdev->adv_data_len == len &&
1167             memcmp(cp.data, hdev->adv_data, len) == 0)
1168                 return;
1169
1170         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1171         hdev->adv_data_len = len;
1172
1173         cp.length = len;
1174
1175         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1176 }
1177
1178 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1179 {
1180         struct hci_request req;
1181
1182         hci_req_init(&req, hdev);
1183         __hci_req_update_adv_data(&req, instance);
1184
1185         return hci_req_run(&req, NULL);
1186 }
1187
1188 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1189 {
1190         BT_DBG("%s status %u", hdev->name, status);
1191 }
1192
1193 void hci_req_reenable_advertising(struct hci_dev *hdev)
1194 {
1195         struct hci_request req;
1196
1197         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1198             list_empty(&hdev->adv_instances))
1199                 return;
1200
1201         hci_req_init(&req, hdev);
1202
1203         if (hdev->cur_adv_instance) {
1204                 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1205                                                 true);
1206         } else {
1207                 __hci_req_update_adv_data(&req, 0x00);
1208                 __hci_req_update_scan_rsp_data(&req, 0x00);
1209                 __hci_req_enable_advertising(&req);
1210         }
1211
1212         hci_req_run(&req, adv_enable_complete);
1213 }
1214
1215 static void adv_timeout_expire(struct work_struct *work)
1216 {
1217         struct hci_dev *hdev = container_of(work, struct hci_dev,
1218                                             adv_instance_expire.work);
1219
1220         struct hci_request req;
1221         u8 instance;
1222
1223         BT_DBG("%s", hdev->name);
1224
1225         hci_dev_lock(hdev);
1226
1227         hdev->adv_instance_timeout = 0;
1228
1229         instance = hdev->cur_adv_instance;
1230         if (instance == 0x00)
1231                 goto unlock;
1232
1233         hci_req_init(&req, hdev);
1234
1235         hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1236
1237         if (list_empty(&hdev->adv_instances))
1238                 __hci_req_disable_advertising(&req);
1239
1240         hci_req_run(&req, NULL);
1241
1242 unlock:
1243         hci_dev_unlock(hdev);
1244 }
1245
1246 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1247                                     bool force)
1248 {
1249         struct hci_dev *hdev = req->hdev;
1250         struct adv_info *adv_instance = NULL;
1251         u16 timeout;
1252
1253         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1254             list_empty(&hdev->adv_instances))
1255                 return -EPERM;
1256
1257         if (hdev->adv_instance_timeout)
1258                 return -EBUSY;
1259
1260         adv_instance = hci_find_adv_instance(hdev, instance);
1261         if (!adv_instance)
1262                 return -ENOENT;
1263
1264         /* A zero timeout means unlimited advertising. As long as there is
1265          * only one instance, duration should be ignored. We still set a timeout
1266          * in case further instances are being added later on.
1267          *
1268          * If the remaining lifetime of the instance is more than the duration
1269          * then the timeout corresponds to the duration, otherwise it will be
1270          * reduced to the remaining instance lifetime.
1271          */
1272         if (adv_instance->timeout == 0 ||
1273             adv_instance->duration <= adv_instance->remaining_time)
1274                 timeout = adv_instance->duration;
1275         else
1276                 timeout = adv_instance->remaining_time;
1277
1278         /* The remaining time is being reduced unless the instance is being
1279          * advertised without time limit.
1280          */
1281         if (adv_instance->timeout)
1282                 adv_instance->remaining_time =
1283                                 adv_instance->remaining_time - timeout;
1284
1285         hdev->adv_instance_timeout = timeout;
1286         queue_delayed_work(hdev->req_workqueue,
1287                            &hdev->adv_instance_expire,
1288                            msecs_to_jiffies(timeout * 1000));
1289
1290         /* If we're just re-scheduling the same instance again then do not
1291          * execute any HCI commands. This happens when a single instance is
1292          * being advertised.
1293          */
1294         if (!force && hdev->cur_adv_instance == instance &&
1295             hci_dev_test_flag(hdev, HCI_LE_ADV))
1296                 return 0;
1297
1298         hdev->cur_adv_instance = instance;
1299         __hci_req_update_adv_data(req, instance);
1300         __hci_req_update_scan_rsp_data(req, instance);
1301         __hci_req_enable_advertising(req);
1302
1303         return 0;
1304 }
1305
1306 static void cancel_adv_timeout(struct hci_dev *hdev)
1307 {
1308         if (hdev->adv_instance_timeout) {
1309                 hdev->adv_instance_timeout = 0;
1310                 cancel_delayed_work(&hdev->adv_instance_expire);
1311         }
1312 }
1313
1314 /* For a single instance:
1315  * - force == true: The instance will be removed even when its remaining
1316  *   lifetime is not zero.
1317  * - force == false: the instance will be deactivated but kept stored unless
1318  *   the remaining lifetime is zero.
1319  *
1320  * For instance == 0x00:
1321  * - force == true: All instances will be removed regardless of their timeout
1322  *   setting.
1323  * - force == false: Only instances that have a timeout will be removed.
1324  */
1325 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1326                                 struct hci_request *req, u8 instance,
1327                                 bool force)
1328 {
1329         struct adv_info *adv_instance, *n, *next_instance = NULL;
1330         int err;
1331         u8 rem_inst;
1332
1333         /* Cancel any timeout concerning the removed instance(s). */
1334         if (!instance || hdev->cur_adv_instance == instance)
1335                 cancel_adv_timeout(hdev);
1336
1337         /* Get the next instance to advertise BEFORE we remove
1338          * the current one. This can be the same instance again
1339          * if there is only one instance.
1340          */
1341         if (instance && hdev->cur_adv_instance == instance)
1342                 next_instance = hci_get_next_instance(hdev, instance);
1343
1344         if (instance == 0x00) {
1345                 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1346                                          list) {
1347                         if (!(force || adv_instance->timeout))
1348                                 continue;
1349
1350                         rem_inst = adv_instance->instance;
1351                         err = hci_remove_adv_instance(hdev, rem_inst);
1352                         if (!err)
1353                                 mgmt_advertising_removed(sk, hdev, rem_inst);
1354                 }
1355         } else {
1356                 adv_instance = hci_find_adv_instance(hdev, instance);
1357
1358                 if (force || (adv_instance && adv_instance->timeout &&
1359                               !adv_instance->remaining_time)) {
1360                         /* Don't advertise a removed instance. */
1361                         if (next_instance &&
1362                             next_instance->instance == instance)
1363                                 next_instance = NULL;
1364
1365                         err = hci_remove_adv_instance(hdev, instance);
1366                         if (!err)
1367                                 mgmt_advertising_removed(sk, hdev, instance);
1368                 }
1369         }
1370
1371         if (!req || !hdev_is_powered(hdev) ||
1372             hci_dev_test_flag(hdev, HCI_ADVERTISING))
1373                 return;
1374
1375         if (next_instance)
1376                 __hci_req_schedule_adv_instance(req, next_instance->instance,
1377                                                 false);
1378 }
1379
1380 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1381 {
1382         struct hci_dev *hdev = req->hdev;
1383
1384         /* If we're advertising or initiating an LE connection we can't
1385          * go ahead and change the random address at this time. This is
1386          * because the eventual initiator address used for the
1387          * subsequently created connection will be undefined (some
1388          * controllers use the new address and others the one we had
1389          * when the operation started).
1390          *
1391          * In this kind of scenario skip the update and let the random
1392          * address be updated at the next cycle.
1393          */
1394         if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1395             hci_lookup_le_connect(hdev)) {
1396                 BT_DBG("Deferring random address update");
1397                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1398                 return;
1399         }
1400
1401         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1402 }
1403
1404 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1405                               bool use_rpa, u8 *own_addr_type)
1406 {
1407         struct hci_dev *hdev = req->hdev;
1408         int err;
1409
1410         /* If privacy is enabled use a resolvable private address. If
1411          * current RPA has expired or there is something else than
1412          * the current RPA in use, then generate a new one.
1413          */
1414         if (use_rpa) {
1415                 int to;
1416
1417                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1418
1419                 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1420                     !bacmp(&hdev->random_addr, &hdev->rpa))
1421                         return 0;
1422
1423                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1424                 if (err < 0) {
1425                         BT_ERR("%s failed to generate new RPA", hdev->name);
1426                         return err;
1427                 }
1428
1429                 set_random_addr(req, &hdev->rpa);
1430
1431                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1432                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1433
1434                 return 0;
1435         }
1436
1437         /* In case of required privacy without resolvable private address,
1438          * use an non-resolvable private address. This is useful for active
1439          * scanning and non-connectable advertising.
1440          */
1441         if (require_privacy) {
1442                 bdaddr_t nrpa;
1443
1444                 while (true) {
1445                         /* The non-resolvable private address is generated
1446                          * from random six bytes with the two most significant
1447                          * bits cleared.
1448                          */
1449                         get_random_bytes(&nrpa, 6);
1450                         nrpa.b[5] &= 0x3f;
1451
1452                         /* The non-resolvable private address shall not be
1453                          * equal to the public address.
1454                          */
1455                         if (bacmp(&hdev->bdaddr, &nrpa))
1456                                 break;
1457                 }
1458
1459                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1460                 set_random_addr(req, &nrpa);
1461                 return 0;
1462         }
1463
1464         /* If forcing static address is in use or there is no public
1465          * address use the static address as random address (but skip
1466          * the HCI command if the current random address is already the
1467          * static one.
1468          *
1469          * In case BR/EDR has been disabled on a dual-mode controller
1470          * and a static address has been configured, then use that
1471          * address instead of the public BR/EDR address.
1472          */
1473         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1474             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1475             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1476              bacmp(&hdev->static_addr, BDADDR_ANY))) {
1477                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1478                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1479                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1480                                     &hdev->static_addr);
1481                 return 0;
1482         }
1483
1484         /* Neither privacy nor static address is being used so use a
1485          * public address.
1486          */
1487         *own_addr_type = ADDR_LE_DEV_PUBLIC;
1488
1489         return 0;
1490 }
1491
1492 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1493 {
1494         struct bdaddr_list *b;
1495
1496         list_for_each_entry(b, &hdev->whitelist, list) {
1497                 struct hci_conn *conn;
1498
1499                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1500                 if (!conn)
1501                         return true;
1502
1503                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1504                         return true;
1505         }
1506
1507         return false;
1508 }
1509
1510 void __hci_req_update_scan(struct hci_request *req)
1511 {
1512         struct hci_dev *hdev = req->hdev;
1513         u8 scan;
1514
1515         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1516                 return;
1517
1518         if (!hdev_is_powered(hdev))
1519                 return;
1520
1521         if (mgmt_powering_down(hdev))
1522                 return;
1523
1524         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1525             disconnected_whitelist_entries(hdev))
1526                 scan = SCAN_PAGE;
1527         else
1528                 scan = SCAN_DISABLED;
1529
1530         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1531                 scan |= SCAN_INQUIRY;
1532
1533         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1534             test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1535                 return;
1536
1537         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1538 }
1539
1540 static int update_scan(struct hci_request *req, unsigned long opt)
1541 {
1542         hci_dev_lock(req->hdev);
1543         __hci_req_update_scan(req);
1544         hci_dev_unlock(req->hdev);
1545         return 0;
1546 }
1547
1548 static void scan_update_work(struct work_struct *work)
1549 {
1550         struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1551
1552         hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1553 }
1554
1555 static int connectable_update(struct hci_request *req, unsigned long opt)
1556 {
1557         struct hci_dev *hdev = req->hdev;
1558
1559         hci_dev_lock(hdev);
1560
1561         __hci_req_update_scan(req);
1562
1563         /* If BR/EDR is not enabled and we disable advertising as a
1564          * by-product of disabling connectable, we need to update the
1565          * advertising flags.
1566          */
1567         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1568                 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
1569
1570         /* Update the advertising parameters if necessary */
1571         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1572             !list_empty(&hdev->adv_instances))
1573                 __hci_req_enable_advertising(req);
1574
1575         __hci_update_background_scan(req);
1576
1577         hci_dev_unlock(hdev);
1578
1579         return 0;
1580 }
1581
1582 static void connectable_update_work(struct work_struct *work)
1583 {
1584         struct hci_dev *hdev = container_of(work, struct hci_dev,
1585                                             connectable_update);
1586         u8 status;
1587
1588         hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1589         mgmt_set_connectable_complete(hdev, status);
1590 }
1591
1592 static u8 get_service_classes(struct hci_dev *hdev)
1593 {
1594         struct bt_uuid *uuid;
1595         u8 val = 0;
1596
1597         list_for_each_entry(uuid, &hdev->uuids, list)
1598                 val |= uuid->svc_hint;
1599
1600         return val;
1601 }
1602
1603 void __hci_req_update_class(struct hci_request *req)
1604 {
1605         struct hci_dev *hdev = req->hdev;
1606         u8 cod[3];
1607
1608         BT_DBG("%s", hdev->name);
1609
1610         if (!hdev_is_powered(hdev))
1611                 return;
1612
1613         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1614                 return;
1615
1616         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1617                 return;
1618
1619         cod[0] = hdev->minor_class;
1620         cod[1] = hdev->major_class;
1621         cod[2] = get_service_classes(hdev);
1622
1623         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1624                 cod[1] |= 0x20;
1625
1626         if (memcmp(cod, hdev->dev_class, 3) == 0)
1627                 return;
1628
1629         hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1630 }
1631
1632 static void write_iac(struct hci_request *req)
1633 {
1634         struct hci_dev *hdev = req->hdev;
1635         struct hci_cp_write_current_iac_lap cp;
1636
1637         if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1638                 return;
1639
1640         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1641                 /* Limited discoverable mode */
1642                 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1643                 cp.iac_lap[0] = 0x00;   /* LIAC */
1644                 cp.iac_lap[1] = 0x8b;
1645                 cp.iac_lap[2] = 0x9e;
1646                 cp.iac_lap[3] = 0x33;   /* GIAC */
1647                 cp.iac_lap[4] = 0x8b;
1648                 cp.iac_lap[5] = 0x9e;
1649         } else {
1650                 /* General discoverable mode */
1651                 cp.num_iac = 1;
1652                 cp.iac_lap[0] = 0x33;   /* GIAC */
1653                 cp.iac_lap[1] = 0x8b;
1654                 cp.iac_lap[2] = 0x9e;
1655         }
1656
1657         hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1658                     (cp.num_iac * 3) + 1, &cp);
1659 }
1660
1661 static int discoverable_update(struct hci_request *req, unsigned long opt)
1662 {
1663         struct hci_dev *hdev = req->hdev;
1664
1665         hci_dev_lock(hdev);
1666
1667         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1668                 write_iac(req);
1669                 __hci_req_update_scan(req);
1670                 __hci_req_update_class(req);
1671         }
1672
1673         /* Advertising instances don't use the global discoverable setting, so
1674          * only update AD if advertising was enabled using Set Advertising.
1675          */
1676         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1677                 __hci_req_update_adv_data(req, 0x00);
1678
1679                 /* Discoverable mode affects the local advertising
1680                  * address in limited privacy mode.
1681                  */
1682                 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1683                         __hci_req_enable_advertising(req);
1684         }
1685
1686         hci_dev_unlock(hdev);
1687
1688         return 0;
1689 }
1690
1691 static void discoverable_update_work(struct work_struct *work)
1692 {
1693         struct hci_dev *hdev = container_of(work, struct hci_dev,
1694                                             discoverable_update);
1695         u8 status;
1696
1697         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1698         mgmt_set_discoverable_complete(hdev, status);
1699 }
1700
1701 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1702                       u8 reason)
1703 {
1704         switch (conn->state) {
1705         case BT_CONNECTED:
1706         case BT_CONFIG:
1707                 if (conn->type == AMP_LINK) {
1708                         struct hci_cp_disconn_phy_link cp;
1709
1710                         cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1711                         cp.reason = reason;
1712                         hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1713                                     &cp);
1714                 } else {
1715                         struct hci_cp_disconnect dc;
1716
1717                         dc.handle = cpu_to_le16(conn->handle);
1718                         dc.reason = reason;
1719                         hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1720                 }
1721
1722                 conn->state = BT_DISCONN;
1723
1724                 break;
1725         case BT_CONNECT:
1726                 if (conn->type == LE_LINK) {
1727                         if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1728                                 break;
1729                         hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1730                                     0, NULL);
1731                 } else if (conn->type == ACL_LINK) {
1732                         if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1733                                 break;
1734                         hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1735                                     6, &conn->dst);
1736                 }
1737                 break;
1738         case BT_CONNECT2:
1739                 if (conn->type == ACL_LINK) {
1740                         struct hci_cp_reject_conn_req rej;
1741
1742                         bacpy(&rej.bdaddr, &conn->dst);
1743                         rej.reason = reason;
1744
1745                         hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1746                                     sizeof(rej), &rej);
1747                 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1748                         struct hci_cp_reject_sync_conn_req rej;
1749
1750                         bacpy(&rej.bdaddr, &conn->dst);
1751
1752                         /* SCO rejection has its own limited set of
1753                          * allowed error values (0x0D-0x0F) which isn't
1754                          * compatible with most values passed to this
1755                          * function. To be safe hard-code one of the
1756                          * values that's suitable for SCO.
1757                          */
1758                         rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
1759
1760                         hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1761                                     sizeof(rej), &rej);
1762                 }
1763                 break;
1764         default:
1765                 conn->state = BT_CLOSED;
1766                 break;
1767         }
1768 }
1769
1770 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1771 {
1772         if (status)
1773                 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1774 }
1775
1776 int hci_abort_conn(struct hci_conn *conn, u8 reason)
1777 {
1778         struct hci_request req;
1779         int err;
1780
1781         hci_req_init(&req, conn->hdev);
1782
1783         __hci_abort_conn(&req, conn, reason);
1784
1785         err = hci_req_run(&req, abort_conn_complete);
1786         if (err && err != -ENODATA) {
1787                 BT_ERR("Failed to run HCI request: err %d", err);
1788                 return err;
1789         }
1790
1791         return 0;
1792 }
1793
1794 static int update_bg_scan(struct hci_request *req, unsigned long opt)
1795 {
1796         hci_dev_lock(req->hdev);
1797         __hci_update_background_scan(req);
1798         hci_dev_unlock(req->hdev);
1799         return 0;
1800 }
1801
1802 static void bg_scan_update(struct work_struct *work)
1803 {
1804         struct hci_dev *hdev = container_of(work, struct hci_dev,
1805                                             bg_scan_update);
1806         struct hci_conn *conn;
1807         u8 status;
1808         int err;
1809
1810         err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1811         if (!err)
1812                 return;
1813
1814         hci_dev_lock(hdev);
1815
1816         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1817         if (conn)
1818                 hci_le_conn_failed(conn, status);
1819
1820         hci_dev_unlock(hdev);
1821 }
1822
1823 static int le_scan_disable(struct hci_request *req, unsigned long opt)
1824 {
1825         hci_req_add_le_scan_disable(req);
1826         return 0;
1827 }
1828
1829 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1830 {
1831         u8 length = opt;
1832         const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1833         const u8 liac[3] = { 0x00, 0x8b, 0x9e };
1834         struct hci_cp_inquiry cp;
1835
1836         BT_DBG("%s", req->hdev->name);
1837
1838         hci_dev_lock(req->hdev);
1839         hci_inquiry_cache_flush(req->hdev);
1840         hci_dev_unlock(req->hdev);
1841
1842         memset(&cp, 0, sizeof(cp));
1843
1844         if (req->hdev->discovery.limited)
1845                 memcpy(&cp.lap, liac, sizeof(cp.lap));
1846         else
1847                 memcpy(&cp.lap, giac, sizeof(cp.lap));
1848
1849         cp.length = length;
1850
1851         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1852
1853         return 0;
1854 }
1855
1856 static void le_scan_disable_work(struct work_struct *work)
1857 {
1858         struct hci_dev *hdev = container_of(work, struct hci_dev,
1859                                             le_scan_disable.work);
1860         u8 status;
1861
1862         BT_DBG("%s", hdev->name);
1863
1864         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1865                 return;
1866
1867         cancel_delayed_work(&hdev->le_scan_restart);
1868
1869         hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1870         if (status) {
1871                 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
1872                 return;
1873         }
1874
1875         hdev->discovery.scan_start = 0;
1876
1877         /* If we were running LE only scan, change discovery state. If
1878          * we were running both LE and BR/EDR inquiry simultaneously,
1879          * and BR/EDR inquiry is already finished, stop discovery,
1880          * otherwise BR/EDR inquiry will stop discovery when finished.
1881          * If we will resolve remote device name, do not change
1882          * discovery state.
1883          */
1884
1885         if (hdev->discovery.type == DISCOV_TYPE_LE)
1886                 goto discov_stopped;
1887
1888         if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1889                 return;
1890
1891         if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1892                 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1893                     hdev->discovery.state != DISCOVERY_RESOLVING)
1894                         goto discov_stopped;
1895
1896                 return;
1897         }
1898
1899         hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1900                      HCI_CMD_TIMEOUT, &status);
1901         if (status) {
1902                 BT_ERR("Inquiry failed: status 0x%02x", status);
1903                 goto discov_stopped;
1904         }
1905
1906         return;
1907
1908 discov_stopped:
1909         hci_dev_lock(hdev);
1910         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1911         hci_dev_unlock(hdev);
1912 }
1913
1914 static int le_scan_restart(struct hci_request *req, unsigned long opt)
1915 {
1916         struct hci_dev *hdev = req->hdev;
1917         struct hci_cp_le_set_scan_enable cp;
1918
1919         /* If controller is not scanning we are done. */
1920         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1921                 return 0;
1922
1923         hci_req_add_le_scan_disable(req);
1924
1925         memset(&cp, 0, sizeof(cp));
1926         cp.enable = LE_SCAN_ENABLE;
1927         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1928         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1929
1930         return 0;
1931 }
1932
1933 static void le_scan_restart_work(struct work_struct *work)
1934 {
1935         struct hci_dev *hdev = container_of(work, struct hci_dev,
1936                                             le_scan_restart.work);
1937         unsigned long timeout, duration, scan_start, now;
1938         u8 status;
1939
1940         BT_DBG("%s", hdev->name);
1941
1942         hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
1943         if (status) {
1944                 BT_ERR("Failed to restart LE scan: status %d", status);
1945                 return;
1946         }
1947
1948         hci_dev_lock(hdev);
1949
1950         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1951             !hdev->discovery.scan_start)
1952                 goto unlock;
1953
1954         /* When the scan was started, hdev->le_scan_disable has been queued
1955          * after duration from scan_start. During scan restart this job
1956          * has been canceled, and we need to queue it again after proper
1957          * timeout, to make sure that scan does not run indefinitely.
1958          */
1959         duration = hdev->discovery.scan_duration;
1960         scan_start = hdev->discovery.scan_start;
1961         now = jiffies;
1962         if (now - scan_start <= duration) {
1963                 int elapsed;
1964
1965                 if (now >= scan_start)
1966                         elapsed = now - scan_start;
1967                 else
1968                         elapsed = ULONG_MAX - scan_start + now;
1969
1970                 timeout = duration - elapsed;
1971         } else {
1972                 timeout = 0;
1973         }
1974
1975         queue_delayed_work(hdev->req_workqueue,
1976                            &hdev->le_scan_disable, timeout);
1977
1978 unlock:
1979         hci_dev_unlock(hdev);
1980 }
1981
1982 static void disable_advertising(struct hci_request *req)
1983 {
1984         u8 enable = 0x00;
1985
1986         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1987 }
1988
1989 static int active_scan(struct hci_request *req, unsigned long opt)
1990 {
1991         uint16_t interval = opt;
1992         struct hci_dev *hdev = req->hdev;
1993         struct hci_cp_le_set_scan_param param_cp;
1994         struct hci_cp_le_set_scan_enable enable_cp;
1995         u8 own_addr_type;
1996         int err;
1997
1998         BT_DBG("%s", hdev->name);
1999
2000         if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2001                 hci_dev_lock(hdev);
2002
2003                 /* Don't let discovery abort an outgoing connection attempt
2004                  * that's using directed advertising.
2005                  */
2006                 if (hci_lookup_le_connect(hdev)) {
2007                         hci_dev_unlock(hdev);
2008                         return -EBUSY;
2009                 }
2010
2011                 cancel_adv_timeout(hdev);
2012                 hci_dev_unlock(hdev);
2013
2014                 disable_advertising(req);
2015         }
2016
2017         /* If controller is scanning, it means the background scanning is
2018          * running. Thus, we should temporarily stop it in order to set the
2019          * discovery scanning parameters.
2020          */
2021         if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2022                 hci_req_add_le_scan_disable(req);
2023
2024         /* All active scans will be done with either a resolvable private
2025          * address (when privacy feature has been enabled) or non-resolvable
2026          * private address.
2027          */
2028         err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2029                                         &own_addr_type);
2030         if (err < 0)
2031                 own_addr_type = ADDR_LE_DEV_PUBLIC;
2032
2033         memset(&param_cp, 0, sizeof(param_cp));
2034         param_cp.type = LE_SCAN_ACTIVE;
2035         param_cp.interval = cpu_to_le16(interval);
2036         param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
2037         param_cp.own_address_type = own_addr_type;
2038
2039         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
2040                     &param_cp);
2041
2042         memset(&enable_cp, 0, sizeof(enable_cp));
2043         enable_cp.enable = LE_SCAN_ENABLE;
2044         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2045
2046         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2047                     &enable_cp);
2048
2049         return 0;
2050 }
2051
2052 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2053 {
2054         int err;
2055
2056         BT_DBG("%s", req->hdev->name);
2057
2058         err = active_scan(req, opt);
2059         if (err)
2060                 return err;
2061
2062         return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2063 }
2064
2065 static void start_discovery(struct hci_dev *hdev, u8 *status)
2066 {
2067         unsigned long timeout;
2068
2069         BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2070
2071         switch (hdev->discovery.type) {
2072         case DISCOV_TYPE_BREDR:
2073                 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2074                         hci_req_sync(hdev, bredr_inquiry,
2075                                      DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2076                                      status);
2077                 return;
2078         case DISCOV_TYPE_INTERLEAVED:
2079                 /* When running simultaneous discovery, the LE scanning time
2080                  * should occupy the whole discovery time sine BR/EDR inquiry
2081                  * and LE scanning are scheduled by the controller.
2082                  *
2083                  * For interleaving discovery in comparison, BR/EDR inquiry
2084                  * and LE scanning are done sequentially with separate
2085                  * timeouts.
2086                  */
2087                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2088                              &hdev->quirks)) {
2089                         timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2090                         /* During simultaneous discovery, we double LE scan
2091                          * interval. We must leave some time for the controller
2092                          * to do BR/EDR inquiry.
2093                          */
2094                         hci_req_sync(hdev, interleaved_discov,
2095                                      DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2096                                      status);
2097                         break;
2098                 }
2099
2100                 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2101                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2102                              HCI_CMD_TIMEOUT, status);
2103                 break;
2104         case DISCOV_TYPE_LE:
2105                 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2106                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2107                              HCI_CMD_TIMEOUT, status);
2108                 break;
2109         default:
2110                 *status = HCI_ERROR_UNSPECIFIED;
2111                 return;
2112         }
2113
2114         if (*status)
2115                 return;
2116
2117         BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2118
2119         /* When service discovery is used and the controller has a
2120          * strict duplicate filter, it is important to remember the
2121          * start and duration of the scan. This is required for
2122          * restarting scanning during the discovery phase.
2123          */
2124         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2125                      hdev->discovery.result_filtering) {
2126                 hdev->discovery.scan_start = jiffies;
2127                 hdev->discovery.scan_duration = timeout;
2128         }
2129
2130         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2131                            timeout);
2132 }
2133
2134 bool hci_req_stop_discovery(struct hci_request *req)
2135 {
2136         struct hci_dev *hdev = req->hdev;
2137         struct discovery_state *d = &hdev->discovery;
2138         struct hci_cp_remote_name_req_cancel cp;
2139         struct inquiry_entry *e;
2140         bool ret = false;
2141
2142         BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2143
2144         if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2145                 if (test_bit(HCI_INQUIRY, &hdev->flags))
2146                         hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2147
2148                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2149                         cancel_delayed_work(&hdev->le_scan_disable);
2150                         hci_req_add_le_scan_disable(req);
2151                 }
2152
2153                 ret = true;
2154         } else {
2155                 /* Passive scanning */
2156                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2157                         hci_req_add_le_scan_disable(req);
2158                         ret = true;
2159                 }
2160         }
2161
2162         /* No further actions needed for LE-only discovery */
2163         if (d->type == DISCOV_TYPE_LE)
2164                 return ret;
2165
2166         if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2167                 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2168                                                      NAME_PENDING);
2169                 if (!e)
2170                         return ret;
2171
2172                 bacpy(&cp.bdaddr, &e->data.bdaddr);
2173                 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2174                             &cp);
2175                 ret = true;
2176         }
2177
2178         return ret;
2179 }
2180
2181 static int stop_discovery(struct hci_request *req, unsigned long opt)
2182 {
2183         hci_dev_lock(req->hdev);
2184         hci_req_stop_discovery(req);
2185         hci_dev_unlock(req->hdev);
2186
2187         return 0;
2188 }
2189
2190 static void discov_update(struct work_struct *work)
2191 {
2192         struct hci_dev *hdev = container_of(work, struct hci_dev,
2193                                             discov_update);
2194         u8 status = 0;
2195
2196         switch (hdev->discovery.state) {
2197         case DISCOVERY_STARTING:
2198                 start_discovery(hdev, &status);
2199                 mgmt_start_discovery_complete(hdev, status);
2200                 if (status)
2201                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2202                 else
2203                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2204                 break;
2205         case DISCOVERY_STOPPING:
2206                 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2207                 mgmt_stop_discovery_complete(hdev, status);
2208                 if (!status)
2209                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2210                 break;
2211         case DISCOVERY_STOPPED:
2212         default:
2213                 return;
2214         }
2215 }
2216
2217 static void discov_off(struct work_struct *work)
2218 {
2219         struct hci_dev *hdev = container_of(work, struct hci_dev,
2220                                             discov_off.work);
2221
2222         BT_DBG("%s", hdev->name);
2223
2224         hci_dev_lock(hdev);
2225
2226         /* When discoverable timeout triggers, then just make sure
2227          * the limited discoverable flag is cleared. Even in the case
2228          * of a timeout triggered from general discoverable, it is
2229          * safe to unconditionally clear the flag.
2230          */
2231         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2232         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2233         hdev->discov_timeout = 0;
2234
2235         hci_dev_unlock(hdev);
2236
2237         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2238         mgmt_new_settings(hdev);
2239 }
2240
2241 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2242 {
2243         struct hci_dev *hdev = req->hdev;
2244         u8 link_sec;
2245
2246         hci_dev_lock(hdev);
2247
2248         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2249             !lmp_host_ssp_capable(hdev)) {
2250                 u8 mode = 0x01;
2251
2252                 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2253
2254                 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2255                         u8 support = 0x01;
2256
2257                         hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2258                                     sizeof(support), &support);
2259                 }
2260         }
2261
2262         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2263             lmp_bredr_capable(hdev)) {
2264                 struct hci_cp_write_le_host_supported cp;
2265
2266                 cp.le = 0x01;
2267                 cp.simul = 0x00;
2268
2269                 /* Check first if we already have the right
2270                  * host state (host features set)
2271                  */
2272                 if (cp.le != lmp_host_le_capable(hdev) ||
2273                     cp.simul != lmp_host_le_br_capable(hdev))
2274                         hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2275                                     sizeof(cp), &cp);
2276         }
2277
2278         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2279                 /* Make sure the controller has a good default for
2280                  * advertising data. This also applies to the case
2281                  * where BR/EDR was toggled during the AUTO_OFF phase.
2282                  */
2283                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2284                     list_empty(&hdev->adv_instances)) {
2285                         __hci_req_update_adv_data(req, 0x00);
2286                         __hci_req_update_scan_rsp_data(req, 0x00);
2287
2288                         if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2289                                 __hci_req_enable_advertising(req);
2290                 } else if (!list_empty(&hdev->adv_instances)) {
2291                         struct adv_info *adv_instance;
2292
2293                         adv_instance = list_first_entry(&hdev->adv_instances,
2294                                                         struct adv_info, list);
2295                         __hci_req_schedule_adv_instance(req,
2296                                                         adv_instance->instance,
2297                                                         true);
2298                 }
2299         }
2300
2301         link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2302         if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2303                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2304                             sizeof(link_sec), &link_sec);
2305
2306         if (lmp_bredr_capable(hdev)) {
2307                 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2308                         __hci_req_write_fast_connectable(req, true);
2309                 else
2310                         __hci_req_write_fast_connectable(req, false);
2311                 __hci_req_update_scan(req);
2312                 __hci_req_update_class(req);
2313                 __hci_req_update_name(req);
2314                 __hci_req_update_eir(req);
2315         }
2316
2317         hci_dev_unlock(hdev);
2318         return 0;
2319 }
2320
2321 int __hci_req_hci_power_on(struct hci_dev *hdev)
2322 {
2323         /* Register the available SMP channels (BR/EDR and LE) only when
2324          * successfully powering on the controller. This late
2325          * registration is required so that LE SMP can clearly decide if
2326          * the public address or static address is used.
2327          */
2328         smp_register(hdev);
2329
2330         return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2331                               NULL);
2332 }
2333
2334 void hci_request_setup(struct hci_dev *hdev)
2335 {
2336         INIT_WORK(&hdev->discov_update, discov_update);
2337         INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2338         INIT_WORK(&hdev->scan_update, scan_update_work);
2339         INIT_WORK(&hdev->connectable_update, connectable_update_work);
2340         INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2341         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2342         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2343         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2344         INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2345 }
2346
2347 void hci_request_cancel_all(struct hci_dev *hdev)
2348 {
2349         hci_req_sync_cancel(hdev, ENODEV);
2350
2351         cancel_work_sync(&hdev->discov_update);
2352         cancel_work_sync(&hdev->bg_scan_update);
2353         cancel_work_sync(&hdev->scan_update);
2354         cancel_work_sync(&hdev->connectable_update);
2355         cancel_work_sync(&hdev->discoverable_update);
2356         cancel_delayed_work_sync(&hdev->discov_off);
2357         cancel_delayed_work_sync(&hdev->le_scan_disable);
2358         cancel_delayed_work_sync(&hdev->le_scan_restart);
2359
2360         if (hdev->adv_instance_timeout) {
2361                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2362                 hdev->adv_instance_timeout = 0;
2363         }
2364 }