spi: spidev_test: fix build with musl libc
[cascardo/linux.git] / net / bluetooth / hci_request.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2014 Intel Corporation
5
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <asm/unaligned.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32
33 #define HCI_REQ_DONE      0
34 #define HCI_REQ_PEND      1
35 #define HCI_REQ_CANCELED  2
36
37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38 {
39         skb_queue_head_init(&req->cmd_q);
40         req->hdev = hdev;
41         req->err = 0;
42 }
43
44 static int req_run(struct hci_request *req, hci_req_complete_t complete,
45                    hci_req_complete_skb_t complete_skb)
46 {
47         struct hci_dev *hdev = req->hdev;
48         struct sk_buff *skb;
49         unsigned long flags;
50
51         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
52
53         /* If an error occurred during request building, remove all HCI
54          * commands queued on the HCI request queue.
55          */
56         if (req->err) {
57                 skb_queue_purge(&req->cmd_q);
58                 return req->err;
59         }
60
61         /* Do not allow empty requests */
62         if (skb_queue_empty(&req->cmd_q))
63                 return -ENODATA;
64
65         skb = skb_peek_tail(&req->cmd_q);
66         if (complete) {
67                 bt_cb(skb)->hci.req_complete = complete;
68         } else if (complete_skb) {
69                 bt_cb(skb)->hci.req_complete_skb = complete_skb;
70                 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
71         }
72
73         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
74         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
75         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
76
77         queue_work(hdev->workqueue, &hdev->cmd_work);
78
79         return 0;
80 }
81
82 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
83 {
84         return req_run(req, complete, NULL);
85 }
86
87 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
88 {
89         return req_run(req, NULL, complete);
90 }
91
92 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
93                                   struct sk_buff *skb)
94 {
95         BT_DBG("%s result 0x%2.2x", hdev->name, result);
96
97         if (hdev->req_status == HCI_REQ_PEND) {
98                 hdev->req_result = result;
99                 hdev->req_status = HCI_REQ_DONE;
100                 if (skb)
101                         hdev->req_skb = skb_get(skb);
102                 wake_up_interruptible(&hdev->req_wait_q);
103         }
104 }
105
106 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
107 {
108         BT_DBG("%s err 0x%2.2x", hdev->name, err);
109
110         if (hdev->req_status == HCI_REQ_PEND) {
111                 hdev->req_result = err;
112                 hdev->req_status = HCI_REQ_CANCELED;
113                 wake_up_interruptible(&hdev->req_wait_q);
114         }
115 }
116
117 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
118                                   const void *param, u8 event, u32 timeout)
119 {
120         DECLARE_WAITQUEUE(wait, current);
121         struct hci_request req;
122         struct sk_buff *skb;
123         int err = 0;
124
125         BT_DBG("%s", hdev->name);
126
127         hci_req_init(&req, hdev);
128
129         hci_req_add_ev(&req, opcode, plen, param, event);
130
131         hdev->req_status = HCI_REQ_PEND;
132
133         add_wait_queue(&hdev->req_wait_q, &wait);
134         set_current_state(TASK_INTERRUPTIBLE);
135
136         err = hci_req_run_skb(&req, hci_req_sync_complete);
137         if (err < 0) {
138                 remove_wait_queue(&hdev->req_wait_q, &wait);
139                 set_current_state(TASK_RUNNING);
140                 return ERR_PTR(err);
141         }
142
143         schedule_timeout(timeout);
144
145         remove_wait_queue(&hdev->req_wait_q, &wait);
146
147         if (signal_pending(current))
148                 return ERR_PTR(-EINTR);
149
150         switch (hdev->req_status) {
151         case HCI_REQ_DONE:
152                 err = -bt_to_errno(hdev->req_result);
153                 break;
154
155         case HCI_REQ_CANCELED:
156                 err = -hdev->req_result;
157                 break;
158
159         default:
160                 err = -ETIMEDOUT;
161                 break;
162         }
163
164         hdev->req_status = hdev->req_result = 0;
165         skb = hdev->req_skb;
166         hdev->req_skb = NULL;
167
168         BT_DBG("%s end: err %d", hdev->name, err);
169
170         if (err < 0) {
171                 kfree_skb(skb);
172                 return ERR_PTR(err);
173         }
174
175         if (!skb)
176                 return ERR_PTR(-ENODATA);
177
178         return skb;
179 }
180 EXPORT_SYMBOL(__hci_cmd_sync_ev);
181
182 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
183                                const void *param, u32 timeout)
184 {
185         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
186 }
187 EXPORT_SYMBOL(__hci_cmd_sync);
188
189 /* Execute request and wait for completion. */
190 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
191                                                      unsigned long opt),
192                    unsigned long opt, u32 timeout, u8 *hci_status)
193 {
194         struct hci_request req;
195         DECLARE_WAITQUEUE(wait, current);
196         int err = 0;
197
198         BT_DBG("%s start", hdev->name);
199
200         hci_req_init(&req, hdev);
201
202         hdev->req_status = HCI_REQ_PEND;
203
204         err = func(&req, opt);
205         if (err) {
206                 if (hci_status)
207                         *hci_status = HCI_ERROR_UNSPECIFIED;
208                 return err;
209         }
210
211         add_wait_queue(&hdev->req_wait_q, &wait);
212         set_current_state(TASK_INTERRUPTIBLE);
213
214         err = hci_req_run_skb(&req, hci_req_sync_complete);
215         if (err < 0) {
216                 hdev->req_status = 0;
217
218                 remove_wait_queue(&hdev->req_wait_q, &wait);
219                 set_current_state(TASK_RUNNING);
220
221                 /* ENODATA means the HCI request command queue is empty.
222                  * This can happen when a request with conditionals doesn't
223                  * trigger any commands to be sent. This is normal behavior
224                  * and should not trigger an error return.
225                  */
226                 if (err == -ENODATA) {
227                         if (hci_status)
228                                 *hci_status = 0;
229                         return 0;
230                 }
231
232                 if (hci_status)
233                         *hci_status = HCI_ERROR_UNSPECIFIED;
234
235                 return err;
236         }
237
238         schedule_timeout(timeout);
239
240         remove_wait_queue(&hdev->req_wait_q, &wait);
241
242         if (signal_pending(current))
243                 return -EINTR;
244
245         switch (hdev->req_status) {
246         case HCI_REQ_DONE:
247                 err = -bt_to_errno(hdev->req_result);
248                 if (hci_status)
249                         *hci_status = hdev->req_result;
250                 break;
251
252         case HCI_REQ_CANCELED:
253                 err = -hdev->req_result;
254                 if (hci_status)
255                         *hci_status = HCI_ERROR_UNSPECIFIED;
256                 break;
257
258         default:
259                 err = -ETIMEDOUT;
260                 if (hci_status)
261                         *hci_status = HCI_ERROR_UNSPECIFIED;
262                 break;
263         }
264
265         hdev->req_status = hdev->req_result = 0;
266
267         BT_DBG("%s end: err %d", hdev->name, err);
268
269         return err;
270 }
271
272 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
273                                                   unsigned long opt),
274                  unsigned long opt, u32 timeout, u8 *hci_status)
275 {
276         int ret;
277
278         if (!test_bit(HCI_UP, &hdev->flags))
279                 return -ENETDOWN;
280
281         /* Serialize all requests */
282         hci_req_sync_lock(hdev);
283         ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
284         hci_req_sync_unlock(hdev);
285
286         return ret;
287 }
288
289 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
290                                 const void *param)
291 {
292         int len = HCI_COMMAND_HDR_SIZE + plen;
293         struct hci_command_hdr *hdr;
294         struct sk_buff *skb;
295
296         skb = bt_skb_alloc(len, GFP_ATOMIC);
297         if (!skb)
298                 return NULL;
299
300         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
301         hdr->opcode = cpu_to_le16(opcode);
302         hdr->plen   = plen;
303
304         if (plen)
305                 memcpy(skb_put(skb, plen), param, plen);
306
307         BT_DBG("skb len %d", skb->len);
308
309         hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
310         hci_skb_opcode(skb) = opcode;
311
312         return skb;
313 }
314
315 /* Queue a command to an asynchronous HCI request */
316 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
317                     const void *param, u8 event)
318 {
319         struct hci_dev *hdev = req->hdev;
320         struct sk_buff *skb;
321
322         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
323
324         /* If an error occurred during request building, there is no point in
325          * queueing the HCI command. We can simply return.
326          */
327         if (req->err)
328                 return;
329
330         skb = hci_prepare_cmd(hdev, opcode, plen, param);
331         if (!skb) {
332                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
333                        hdev->name, opcode);
334                 req->err = -ENOMEM;
335                 return;
336         }
337
338         if (skb_queue_empty(&req->cmd_q))
339                 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
340
341         bt_cb(skb)->hci.req_event = event;
342
343         skb_queue_tail(&req->cmd_q, skb);
344 }
345
346 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
347                  const void *param)
348 {
349         hci_req_add_ev(req, opcode, plen, param, 0);
350 }
351
352 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
353 {
354         struct hci_dev *hdev = req->hdev;
355         struct hci_cp_write_page_scan_activity acp;
356         u8 type;
357
358         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
359                 return;
360
361         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
362                 return;
363
364         if (enable) {
365                 type = PAGE_SCAN_TYPE_INTERLACED;
366
367                 /* 160 msec page scan interval */
368                 acp.interval = cpu_to_le16(0x0100);
369         } else {
370                 type = PAGE_SCAN_TYPE_STANDARD; /* default */
371
372                 /* default 1.28 sec page scan */
373                 acp.interval = cpu_to_le16(0x0800);
374         }
375
376         acp.window = cpu_to_le16(0x0012);
377
378         if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
379             __cpu_to_le16(hdev->page_scan_window) != acp.window)
380                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
381                             sizeof(acp), &acp);
382
383         if (hdev->page_scan_type != type)
384                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
385 }
386
387 /* This function controls the background scanning based on hdev->pend_le_conns
388  * list. If there are pending LE connection we start the background scanning,
389  * otherwise we stop it.
390  *
391  * This function requires the caller holds hdev->lock.
392  */
393 static void __hci_update_background_scan(struct hci_request *req)
394 {
395         struct hci_dev *hdev = req->hdev;
396
397         if (!test_bit(HCI_UP, &hdev->flags) ||
398             test_bit(HCI_INIT, &hdev->flags) ||
399             hci_dev_test_flag(hdev, HCI_SETUP) ||
400             hci_dev_test_flag(hdev, HCI_CONFIG) ||
401             hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
402             hci_dev_test_flag(hdev, HCI_UNREGISTER))
403                 return;
404
405         /* No point in doing scanning if LE support hasn't been enabled */
406         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
407                 return;
408
409         /* If discovery is active don't interfere with it */
410         if (hdev->discovery.state != DISCOVERY_STOPPED)
411                 return;
412
413         /* Reset RSSI and UUID filters when starting background scanning
414          * since these filters are meant for service discovery only.
415          *
416          * The Start Discovery and Start Service Discovery operations
417          * ensure to set proper values for RSSI threshold and UUID
418          * filter list. So it is safe to just reset them here.
419          */
420         hci_discovery_filter_clear(hdev);
421
422         if (list_empty(&hdev->pend_le_conns) &&
423             list_empty(&hdev->pend_le_reports)) {
424                 /* If there is no pending LE connections or devices
425                  * to be scanned for, we should stop the background
426                  * scanning.
427                  */
428
429                 /* If controller is not scanning we are done. */
430                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
431                         return;
432
433                 hci_req_add_le_scan_disable(req);
434
435                 BT_DBG("%s stopping background scanning", hdev->name);
436         } else {
437                 /* If there is at least one pending LE connection, we should
438                  * keep the background scan running.
439                  */
440
441                 /* If controller is connecting, we should not start scanning
442                  * since some controllers are not able to scan and connect at
443                  * the same time.
444                  */
445                 if (hci_lookup_le_connect(hdev))
446                         return;
447
448                 /* If controller is currently scanning, we stop it to ensure we
449                  * don't miss any advertising (due to duplicates filter).
450                  */
451                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
452                         hci_req_add_le_scan_disable(req);
453
454                 hci_req_add_le_passive_scan(req);
455
456                 BT_DBG("%s starting background scanning", hdev->name);
457         }
458 }
459
460 void __hci_req_update_name(struct hci_request *req)
461 {
462         struct hci_dev *hdev = req->hdev;
463         struct hci_cp_write_local_name cp;
464
465         memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
466
467         hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
468 }
469
470 #define PNP_INFO_SVCLASS_ID             0x1200
471
472 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
473 {
474         u8 *ptr = data, *uuids_start = NULL;
475         struct bt_uuid *uuid;
476
477         if (len < 4)
478                 return ptr;
479
480         list_for_each_entry(uuid, &hdev->uuids, list) {
481                 u16 uuid16;
482
483                 if (uuid->size != 16)
484                         continue;
485
486                 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
487                 if (uuid16 < 0x1100)
488                         continue;
489
490                 if (uuid16 == PNP_INFO_SVCLASS_ID)
491                         continue;
492
493                 if (!uuids_start) {
494                         uuids_start = ptr;
495                         uuids_start[0] = 1;
496                         uuids_start[1] = EIR_UUID16_ALL;
497                         ptr += 2;
498                 }
499
500                 /* Stop if not enough space to put next UUID */
501                 if ((ptr - data) + sizeof(u16) > len) {
502                         uuids_start[1] = EIR_UUID16_SOME;
503                         break;
504                 }
505
506                 *ptr++ = (uuid16 & 0x00ff);
507                 *ptr++ = (uuid16 & 0xff00) >> 8;
508                 uuids_start[0] += sizeof(uuid16);
509         }
510
511         return ptr;
512 }
513
514 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
515 {
516         u8 *ptr = data, *uuids_start = NULL;
517         struct bt_uuid *uuid;
518
519         if (len < 6)
520                 return ptr;
521
522         list_for_each_entry(uuid, &hdev->uuids, list) {
523                 if (uuid->size != 32)
524                         continue;
525
526                 if (!uuids_start) {
527                         uuids_start = ptr;
528                         uuids_start[0] = 1;
529                         uuids_start[1] = EIR_UUID32_ALL;
530                         ptr += 2;
531                 }
532
533                 /* Stop if not enough space to put next UUID */
534                 if ((ptr - data) + sizeof(u32) > len) {
535                         uuids_start[1] = EIR_UUID32_SOME;
536                         break;
537                 }
538
539                 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
540                 ptr += sizeof(u32);
541                 uuids_start[0] += sizeof(u32);
542         }
543
544         return ptr;
545 }
546
547 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
548 {
549         u8 *ptr = data, *uuids_start = NULL;
550         struct bt_uuid *uuid;
551
552         if (len < 18)
553                 return ptr;
554
555         list_for_each_entry(uuid, &hdev->uuids, list) {
556                 if (uuid->size != 128)
557                         continue;
558
559                 if (!uuids_start) {
560                         uuids_start = ptr;
561                         uuids_start[0] = 1;
562                         uuids_start[1] = EIR_UUID128_ALL;
563                         ptr += 2;
564                 }
565
566                 /* Stop if not enough space to put next UUID */
567                 if ((ptr - data) + 16 > len) {
568                         uuids_start[1] = EIR_UUID128_SOME;
569                         break;
570                 }
571
572                 memcpy(ptr, uuid->uuid, 16);
573                 ptr += 16;
574                 uuids_start[0] += 16;
575         }
576
577         return ptr;
578 }
579
580 static void create_eir(struct hci_dev *hdev, u8 *data)
581 {
582         u8 *ptr = data;
583         size_t name_len;
584
585         name_len = strlen(hdev->dev_name);
586
587         if (name_len > 0) {
588                 /* EIR Data type */
589                 if (name_len > 48) {
590                         name_len = 48;
591                         ptr[1] = EIR_NAME_SHORT;
592                 } else
593                         ptr[1] = EIR_NAME_COMPLETE;
594
595                 /* EIR Data length */
596                 ptr[0] = name_len + 1;
597
598                 memcpy(ptr + 2, hdev->dev_name, name_len);
599
600                 ptr += (name_len + 2);
601         }
602
603         if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
604                 ptr[0] = 2;
605                 ptr[1] = EIR_TX_POWER;
606                 ptr[2] = (u8) hdev->inq_tx_power;
607
608                 ptr += 3;
609         }
610
611         if (hdev->devid_source > 0) {
612                 ptr[0] = 9;
613                 ptr[1] = EIR_DEVICE_ID;
614
615                 put_unaligned_le16(hdev->devid_source, ptr + 2);
616                 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
617                 put_unaligned_le16(hdev->devid_product, ptr + 6);
618                 put_unaligned_le16(hdev->devid_version, ptr + 8);
619
620                 ptr += 10;
621         }
622
623         ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
624         ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
625         ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
626 }
627
628 void __hci_req_update_eir(struct hci_request *req)
629 {
630         struct hci_dev *hdev = req->hdev;
631         struct hci_cp_write_eir cp;
632
633         if (!hdev_is_powered(hdev))
634                 return;
635
636         if (!lmp_ext_inq_capable(hdev))
637                 return;
638
639         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
640                 return;
641
642         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
643                 return;
644
645         memset(&cp, 0, sizeof(cp));
646
647         create_eir(hdev, cp.data);
648
649         if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
650                 return;
651
652         memcpy(hdev->eir, cp.data, sizeof(cp.data));
653
654         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
655 }
656
657 void hci_req_add_le_scan_disable(struct hci_request *req)
658 {
659         struct hci_cp_le_set_scan_enable cp;
660
661         memset(&cp, 0, sizeof(cp));
662         cp.enable = LE_SCAN_DISABLE;
663         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
664 }
665
666 static void add_to_white_list(struct hci_request *req,
667                               struct hci_conn_params *params)
668 {
669         struct hci_cp_le_add_to_white_list cp;
670
671         cp.bdaddr_type = params->addr_type;
672         bacpy(&cp.bdaddr, &params->addr);
673
674         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
675 }
676
677 static u8 update_white_list(struct hci_request *req)
678 {
679         struct hci_dev *hdev = req->hdev;
680         struct hci_conn_params *params;
681         struct bdaddr_list *b;
682         uint8_t white_list_entries = 0;
683
684         /* Go through the current white list programmed into the
685          * controller one by one and check if that address is still
686          * in the list of pending connections or list of devices to
687          * report. If not present in either list, then queue the
688          * command to remove it from the controller.
689          */
690         list_for_each_entry(b, &hdev->le_white_list, list) {
691                 /* If the device is neither in pend_le_conns nor
692                  * pend_le_reports then remove it from the whitelist.
693                  */
694                 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
695                                                &b->bdaddr, b->bdaddr_type) &&
696                     !hci_pend_le_action_lookup(&hdev->pend_le_reports,
697                                                &b->bdaddr, b->bdaddr_type)) {
698                         struct hci_cp_le_del_from_white_list cp;
699
700                         cp.bdaddr_type = b->bdaddr_type;
701                         bacpy(&cp.bdaddr, &b->bdaddr);
702
703                         hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
704                                     sizeof(cp), &cp);
705                         continue;
706                 }
707
708                 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
709                         /* White list can not be used with RPAs */
710                         return 0x00;
711                 }
712
713                 white_list_entries++;
714         }
715
716         /* Since all no longer valid white list entries have been
717          * removed, walk through the list of pending connections
718          * and ensure that any new device gets programmed into
719          * the controller.
720          *
721          * If the list of the devices is larger than the list of
722          * available white list entries in the controller, then
723          * just abort and return filer policy value to not use the
724          * white list.
725          */
726         list_for_each_entry(params, &hdev->pend_le_conns, action) {
727                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
728                                            &params->addr, params->addr_type))
729                         continue;
730
731                 if (white_list_entries >= hdev->le_white_list_size) {
732                         /* Select filter policy to accept all advertising */
733                         return 0x00;
734                 }
735
736                 if (hci_find_irk_by_addr(hdev, &params->addr,
737                                          params->addr_type)) {
738                         /* White list can not be used with RPAs */
739                         return 0x00;
740                 }
741
742                 white_list_entries++;
743                 add_to_white_list(req, params);
744         }
745
746         /* After adding all new pending connections, walk through
747          * the list of pending reports and also add these to the
748          * white list if there is still space.
749          */
750         list_for_each_entry(params, &hdev->pend_le_reports, action) {
751                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
752                                            &params->addr, params->addr_type))
753                         continue;
754
755                 if (white_list_entries >= hdev->le_white_list_size) {
756                         /* Select filter policy to accept all advertising */
757                         return 0x00;
758                 }
759
760                 if (hci_find_irk_by_addr(hdev, &params->addr,
761                                          params->addr_type)) {
762                         /* White list can not be used with RPAs */
763                         return 0x00;
764                 }
765
766                 white_list_entries++;
767                 add_to_white_list(req, params);
768         }
769
770         /* Select filter policy to use white list */
771         return 0x01;
772 }
773
774 static bool scan_use_rpa(struct hci_dev *hdev)
775 {
776         return hci_dev_test_flag(hdev, HCI_PRIVACY);
777 }
778
779 void hci_req_add_le_passive_scan(struct hci_request *req)
780 {
781         struct hci_cp_le_set_scan_param param_cp;
782         struct hci_cp_le_set_scan_enable enable_cp;
783         struct hci_dev *hdev = req->hdev;
784         u8 own_addr_type;
785         u8 filter_policy;
786
787         /* Set require_privacy to false since no SCAN_REQ are send
788          * during passive scanning. Not using an non-resolvable address
789          * here is important so that peer devices using direct
790          * advertising with our address will be correctly reported
791          * by the controller.
792          */
793         if (hci_update_random_address(req, false, scan_use_rpa(hdev),
794                                       &own_addr_type))
795                 return;
796
797         /* Adding or removing entries from the white list must
798          * happen before enabling scanning. The controller does
799          * not allow white list modification while scanning.
800          */
801         filter_policy = update_white_list(req);
802
803         /* When the controller is using random resolvable addresses and
804          * with that having LE privacy enabled, then controllers with
805          * Extended Scanner Filter Policies support can now enable support
806          * for handling directed advertising.
807          *
808          * So instead of using filter polices 0x00 (no whitelist)
809          * and 0x01 (whitelist enabled) use the new filter policies
810          * 0x02 (no whitelist) and 0x03 (whitelist enabled).
811          */
812         if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
813             (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
814                 filter_policy |= 0x02;
815
816         memset(&param_cp, 0, sizeof(param_cp));
817         param_cp.type = LE_SCAN_PASSIVE;
818         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
819         param_cp.window = cpu_to_le16(hdev->le_scan_window);
820         param_cp.own_address_type = own_addr_type;
821         param_cp.filter_policy = filter_policy;
822         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
823                     &param_cp);
824
825         memset(&enable_cp, 0, sizeof(enable_cp));
826         enable_cp.enable = LE_SCAN_ENABLE;
827         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
828         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
829                     &enable_cp);
830 }
831
832 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
833 {
834         u8 instance = hdev->cur_adv_instance;
835         struct adv_info *adv_instance;
836
837         /* Ignore instance 0 */
838         if (instance == 0x00)
839                 return 0;
840
841         adv_instance = hci_find_adv_instance(hdev, instance);
842         if (!adv_instance)
843                 return 0;
844
845         /* TODO: Take into account the "appearance" and "local-name" flags here.
846          * These are currently being ignored as they are not supported.
847          */
848         return adv_instance->scan_rsp_len;
849 }
850
851 void __hci_req_disable_advertising(struct hci_request *req)
852 {
853         u8 enable = 0x00;
854
855         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
856 }
857
858 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
859 {
860         u32 flags;
861         struct adv_info *adv_instance;
862
863         if (instance == 0x00) {
864                 /* Instance 0 always manages the "Tx Power" and "Flags"
865                  * fields
866                  */
867                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
868
869                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
870                  * corresponds to the "connectable" instance flag.
871                  */
872                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
873                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
874
875                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
876                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
877                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
878                         flags |= MGMT_ADV_FLAG_DISCOV;
879
880                 return flags;
881         }
882
883         adv_instance = hci_find_adv_instance(hdev, instance);
884
885         /* Return 0 when we got an invalid instance identifier. */
886         if (!adv_instance)
887                 return 0;
888
889         return adv_instance->flags;
890 }
891
892 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
893 {
894         /* If privacy is not enabled don't use RPA */
895         if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
896                 return false;
897
898         /* If basic privacy mode is enabled use RPA */
899         if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
900                 return true;
901
902         /* If limited privacy mode is enabled don't use RPA if we're
903          * both discoverable and bondable.
904          */
905         if ((flags & MGMT_ADV_FLAG_DISCOV) &&
906             hci_dev_test_flag(hdev, HCI_BONDABLE))
907                 return false;
908
909         /* We're neither bondable nor discoverable in the limited
910          * privacy mode, therefore use RPA.
911          */
912         return true;
913 }
914
915 void __hci_req_enable_advertising(struct hci_request *req)
916 {
917         struct hci_dev *hdev = req->hdev;
918         struct hci_cp_le_set_adv_param cp;
919         u8 own_addr_type, enable = 0x01;
920         bool connectable;
921         u32 flags;
922
923         if (hci_conn_num(hdev, LE_LINK) > 0)
924                 return;
925
926         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
927                 __hci_req_disable_advertising(req);
928
929         /* Clear the HCI_LE_ADV bit temporarily so that the
930          * hci_update_random_address knows that it's safe to go ahead
931          * and write a new random address. The flag will be set back on
932          * as soon as the SET_ADV_ENABLE HCI command completes.
933          */
934         hci_dev_clear_flag(hdev, HCI_LE_ADV);
935
936         flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
937
938         /* If the "connectable" instance flag was not set, then choose between
939          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
940          */
941         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
942                       mgmt_get_connectable(hdev);
943
944         /* Set require_privacy to true only when non-connectable
945          * advertising is used. In that case it is fine to use a
946          * non-resolvable private address.
947          */
948         if (hci_update_random_address(req, !connectable,
949                                       adv_use_rpa(hdev, flags),
950                                       &own_addr_type) < 0)
951                 return;
952
953         memset(&cp, 0, sizeof(cp));
954         cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
955         cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
956
957         if (connectable)
958                 cp.type = LE_ADV_IND;
959         else if (get_cur_adv_instance_scan_rsp_len(hdev))
960                 cp.type = LE_ADV_SCAN_IND;
961         else
962                 cp.type = LE_ADV_NONCONN_IND;
963
964         cp.own_address_type = own_addr_type;
965         cp.channel_map = hdev->le_adv_channel_map;
966
967         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
968
969         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
970 }
971
972 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
973 {
974         u8 ad_len = 0;
975         size_t name_len;
976
977         name_len = strlen(hdev->dev_name);
978         if (name_len > 0) {
979                 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
980
981                 if (name_len > max_len) {
982                         name_len = max_len;
983                         ptr[1] = EIR_NAME_SHORT;
984                 } else
985                         ptr[1] = EIR_NAME_COMPLETE;
986
987                 ptr[0] = name_len + 1;
988
989                 memcpy(ptr + 2, hdev->dev_name, name_len);
990
991                 ad_len += (name_len + 2);
992                 ptr += (name_len + 2);
993         }
994
995         return ad_len;
996 }
997
998 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
999                                         u8 *ptr)
1000 {
1001         struct adv_info *adv_instance;
1002
1003         adv_instance = hci_find_adv_instance(hdev, instance);
1004         if (!adv_instance)
1005                 return 0;
1006
1007         /* TODO: Set the appropriate entries based on advertising instance flags
1008          * here once flags other than 0 are supported.
1009          */
1010         memcpy(ptr, adv_instance->scan_rsp_data,
1011                adv_instance->scan_rsp_len);
1012
1013         return adv_instance->scan_rsp_len;
1014 }
1015
1016 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1017 {
1018         struct hci_dev *hdev = req->hdev;
1019         struct hci_cp_le_set_scan_rsp_data cp;
1020         u8 len;
1021
1022         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1023                 return;
1024
1025         memset(&cp, 0, sizeof(cp));
1026
1027         if (instance)
1028                 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
1029         else
1030                 len = create_default_scan_rsp_data(hdev, cp.data);
1031
1032         if (hdev->scan_rsp_data_len == len &&
1033             !memcmp(cp.data, hdev->scan_rsp_data, len))
1034                 return;
1035
1036         memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1037         hdev->scan_rsp_data_len = len;
1038
1039         cp.length = len;
1040
1041         hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1042 }
1043
1044 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1045 {
1046         struct adv_info *adv_instance = NULL;
1047         u8 ad_len = 0, flags = 0;
1048         u32 instance_flags;
1049
1050         /* Return 0 when the current instance identifier is invalid. */
1051         if (instance) {
1052                 adv_instance = hci_find_adv_instance(hdev, instance);
1053                 if (!adv_instance)
1054                         return 0;
1055         }
1056
1057         instance_flags = get_adv_instance_flags(hdev, instance);
1058
1059         /* The Add Advertising command allows userspace to set both the general
1060          * and limited discoverable flags.
1061          */
1062         if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1063                 flags |= LE_AD_GENERAL;
1064
1065         if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1066                 flags |= LE_AD_LIMITED;
1067
1068         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1069                 flags |= LE_AD_NO_BREDR;
1070
1071         if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1072                 /* If a discovery flag wasn't provided, simply use the global
1073                  * settings.
1074                  */
1075                 if (!flags)
1076                         flags |= mgmt_get_adv_discov_flags(hdev);
1077
1078                 /* If flags would still be empty, then there is no need to
1079                  * include the "Flags" AD field".
1080                  */
1081                 if (flags) {
1082                         ptr[0] = 0x02;
1083                         ptr[1] = EIR_FLAGS;
1084                         ptr[2] = flags;
1085
1086                         ad_len += 3;
1087                         ptr += 3;
1088                 }
1089         }
1090
1091         if (adv_instance) {
1092                 memcpy(ptr, adv_instance->adv_data,
1093                        adv_instance->adv_data_len);
1094                 ad_len += adv_instance->adv_data_len;
1095                 ptr += adv_instance->adv_data_len;
1096         }
1097
1098         /* Provide Tx Power only if we can provide a valid value for it */
1099         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1100             (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1101                 ptr[0] = 0x02;
1102                 ptr[1] = EIR_TX_POWER;
1103                 ptr[2] = (u8)hdev->adv_tx_power;
1104
1105                 ad_len += 3;
1106                 ptr += 3;
1107         }
1108
1109         return ad_len;
1110 }
1111
1112 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1113 {
1114         struct hci_dev *hdev = req->hdev;
1115         struct hci_cp_le_set_adv_data cp;
1116         u8 len;
1117
1118         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1119                 return;
1120
1121         memset(&cp, 0, sizeof(cp));
1122
1123         len = create_instance_adv_data(hdev, instance, cp.data);
1124
1125         /* There's nothing to do if the data hasn't changed */
1126         if (hdev->adv_data_len == len &&
1127             memcmp(cp.data, hdev->adv_data, len) == 0)
1128                 return;
1129
1130         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1131         hdev->adv_data_len = len;
1132
1133         cp.length = len;
1134
1135         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1136 }
1137
1138 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1139 {
1140         struct hci_request req;
1141
1142         hci_req_init(&req, hdev);
1143         __hci_req_update_adv_data(&req, instance);
1144
1145         return hci_req_run(&req, NULL);
1146 }
1147
1148 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1149 {
1150         BT_DBG("%s status %u", hdev->name, status);
1151 }
1152
1153 void hci_req_reenable_advertising(struct hci_dev *hdev)
1154 {
1155         struct hci_request req;
1156
1157         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1158             list_empty(&hdev->adv_instances))
1159                 return;
1160
1161         hci_req_init(&req, hdev);
1162
1163         if (hdev->cur_adv_instance) {
1164                 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1165                                                 true);
1166         } else {
1167                 __hci_req_update_adv_data(&req, 0x00);
1168                 __hci_req_update_scan_rsp_data(&req, 0x00);
1169                 __hci_req_enable_advertising(&req);
1170         }
1171
1172         hci_req_run(&req, adv_enable_complete);
1173 }
1174
1175 static void adv_timeout_expire(struct work_struct *work)
1176 {
1177         struct hci_dev *hdev = container_of(work, struct hci_dev,
1178                                             adv_instance_expire.work);
1179
1180         struct hci_request req;
1181         u8 instance;
1182
1183         BT_DBG("%s", hdev->name);
1184
1185         hci_dev_lock(hdev);
1186
1187         hdev->adv_instance_timeout = 0;
1188
1189         instance = hdev->cur_adv_instance;
1190         if (instance == 0x00)
1191                 goto unlock;
1192
1193         hci_req_init(&req, hdev);
1194
1195         hci_req_clear_adv_instance(hdev, &req, instance, false);
1196
1197         if (list_empty(&hdev->adv_instances))
1198                 __hci_req_disable_advertising(&req);
1199
1200         hci_req_run(&req, NULL);
1201
1202 unlock:
1203         hci_dev_unlock(hdev);
1204 }
1205
1206 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1207                                     bool force)
1208 {
1209         struct hci_dev *hdev = req->hdev;
1210         struct adv_info *adv_instance = NULL;
1211         u16 timeout;
1212
1213         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1214             list_empty(&hdev->adv_instances))
1215                 return -EPERM;
1216
1217         if (hdev->adv_instance_timeout)
1218                 return -EBUSY;
1219
1220         adv_instance = hci_find_adv_instance(hdev, instance);
1221         if (!adv_instance)
1222                 return -ENOENT;
1223
1224         /* A zero timeout means unlimited advertising. As long as there is
1225          * only one instance, duration should be ignored. We still set a timeout
1226          * in case further instances are being added later on.
1227          *
1228          * If the remaining lifetime of the instance is more than the duration
1229          * then the timeout corresponds to the duration, otherwise it will be
1230          * reduced to the remaining instance lifetime.
1231          */
1232         if (adv_instance->timeout == 0 ||
1233             adv_instance->duration <= adv_instance->remaining_time)
1234                 timeout = adv_instance->duration;
1235         else
1236                 timeout = adv_instance->remaining_time;
1237
1238         /* The remaining time is being reduced unless the instance is being
1239          * advertised without time limit.
1240          */
1241         if (adv_instance->timeout)
1242                 adv_instance->remaining_time =
1243                                 adv_instance->remaining_time - timeout;
1244
1245         hdev->adv_instance_timeout = timeout;
1246         queue_delayed_work(hdev->req_workqueue,
1247                            &hdev->adv_instance_expire,
1248                            msecs_to_jiffies(timeout * 1000));
1249
1250         /* If we're just re-scheduling the same instance again then do not
1251          * execute any HCI commands. This happens when a single instance is
1252          * being advertised.
1253          */
1254         if (!force && hdev->cur_adv_instance == instance &&
1255             hci_dev_test_flag(hdev, HCI_LE_ADV))
1256                 return 0;
1257
1258         hdev->cur_adv_instance = instance;
1259         __hci_req_update_adv_data(req, instance);
1260         __hci_req_update_scan_rsp_data(req, instance);
1261         __hci_req_enable_advertising(req);
1262
1263         return 0;
1264 }
1265
1266 static void cancel_adv_timeout(struct hci_dev *hdev)
1267 {
1268         if (hdev->adv_instance_timeout) {
1269                 hdev->adv_instance_timeout = 0;
1270                 cancel_delayed_work(&hdev->adv_instance_expire);
1271         }
1272 }
1273
1274 /* For a single instance:
1275  * - force == true: The instance will be removed even when its remaining
1276  *   lifetime is not zero.
1277  * - force == false: the instance will be deactivated but kept stored unless
1278  *   the remaining lifetime is zero.
1279  *
1280  * For instance == 0x00:
1281  * - force == true: All instances will be removed regardless of their timeout
1282  *   setting.
1283  * - force == false: Only instances that have a timeout will be removed.
1284  */
1285 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
1286                                 u8 instance, bool force)
1287 {
1288         struct adv_info *adv_instance, *n, *next_instance = NULL;
1289         int err;
1290         u8 rem_inst;
1291
1292         /* Cancel any timeout concerning the removed instance(s). */
1293         if (!instance || hdev->cur_adv_instance == instance)
1294                 cancel_adv_timeout(hdev);
1295
1296         /* Get the next instance to advertise BEFORE we remove
1297          * the current one. This can be the same instance again
1298          * if there is only one instance.
1299          */
1300         if (instance && hdev->cur_adv_instance == instance)
1301                 next_instance = hci_get_next_instance(hdev, instance);
1302
1303         if (instance == 0x00) {
1304                 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1305                                          list) {
1306                         if (!(force || adv_instance->timeout))
1307                                 continue;
1308
1309                         rem_inst = adv_instance->instance;
1310                         err = hci_remove_adv_instance(hdev, rem_inst);
1311                         if (!err)
1312                                 mgmt_advertising_removed(NULL, hdev, rem_inst);
1313                 }
1314         } else {
1315                 adv_instance = hci_find_adv_instance(hdev, instance);
1316
1317                 if (force || (adv_instance && adv_instance->timeout &&
1318                               !adv_instance->remaining_time)) {
1319                         /* Don't advertise a removed instance. */
1320                         if (next_instance &&
1321                             next_instance->instance == instance)
1322                                 next_instance = NULL;
1323
1324                         err = hci_remove_adv_instance(hdev, instance);
1325                         if (!err)
1326                                 mgmt_advertising_removed(NULL, hdev, instance);
1327                 }
1328         }
1329
1330         if (!req || !hdev_is_powered(hdev) ||
1331             hci_dev_test_flag(hdev, HCI_ADVERTISING))
1332                 return;
1333
1334         if (next_instance)
1335                 __hci_req_schedule_adv_instance(req, next_instance->instance,
1336                                                 false);
1337 }
1338
1339 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1340 {
1341         struct hci_dev *hdev = req->hdev;
1342
1343         /* If we're advertising or initiating an LE connection we can't
1344          * go ahead and change the random address at this time. This is
1345          * because the eventual initiator address used for the
1346          * subsequently created connection will be undefined (some
1347          * controllers use the new address and others the one we had
1348          * when the operation started).
1349          *
1350          * In this kind of scenario skip the update and let the random
1351          * address be updated at the next cycle.
1352          */
1353         if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1354             hci_lookup_le_connect(hdev)) {
1355                 BT_DBG("Deferring random address update");
1356                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1357                 return;
1358         }
1359
1360         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1361 }
1362
1363 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1364                               bool use_rpa, u8 *own_addr_type)
1365 {
1366         struct hci_dev *hdev = req->hdev;
1367         int err;
1368
1369         /* If privacy is enabled use a resolvable private address. If
1370          * current RPA has expired or there is something else than
1371          * the current RPA in use, then generate a new one.
1372          */
1373         if (use_rpa) {
1374                 int to;
1375
1376                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1377
1378                 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1379                     !bacmp(&hdev->random_addr, &hdev->rpa))
1380                         return 0;
1381
1382                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1383                 if (err < 0) {
1384                         BT_ERR("%s failed to generate new RPA", hdev->name);
1385                         return err;
1386                 }
1387
1388                 set_random_addr(req, &hdev->rpa);
1389
1390                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1391                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1392
1393                 return 0;
1394         }
1395
1396         /* In case of required privacy without resolvable private address,
1397          * use an non-resolvable private address. This is useful for active
1398          * scanning and non-connectable advertising.
1399          */
1400         if (require_privacy) {
1401                 bdaddr_t nrpa;
1402
1403                 while (true) {
1404                         /* The non-resolvable private address is generated
1405                          * from random six bytes with the two most significant
1406                          * bits cleared.
1407                          */
1408                         get_random_bytes(&nrpa, 6);
1409                         nrpa.b[5] &= 0x3f;
1410
1411                         /* The non-resolvable private address shall not be
1412                          * equal to the public address.
1413                          */
1414                         if (bacmp(&hdev->bdaddr, &nrpa))
1415                                 break;
1416                 }
1417
1418                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1419                 set_random_addr(req, &nrpa);
1420                 return 0;
1421         }
1422
1423         /* If forcing static address is in use or there is no public
1424          * address use the static address as random address (but skip
1425          * the HCI command if the current random address is already the
1426          * static one.
1427          *
1428          * In case BR/EDR has been disabled on a dual-mode controller
1429          * and a static address has been configured, then use that
1430          * address instead of the public BR/EDR address.
1431          */
1432         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1433             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1434             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1435              bacmp(&hdev->static_addr, BDADDR_ANY))) {
1436                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1437                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1438                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1439                                     &hdev->static_addr);
1440                 return 0;
1441         }
1442
1443         /* Neither privacy nor static address is being used so use a
1444          * public address.
1445          */
1446         *own_addr_type = ADDR_LE_DEV_PUBLIC;
1447
1448         return 0;
1449 }
1450
1451 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1452 {
1453         struct bdaddr_list *b;
1454
1455         list_for_each_entry(b, &hdev->whitelist, list) {
1456                 struct hci_conn *conn;
1457
1458                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1459                 if (!conn)
1460                         return true;
1461
1462                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1463                         return true;
1464         }
1465
1466         return false;
1467 }
1468
1469 void __hci_req_update_scan(struct hci_request *req)
1470 {
1471         struct hci_dev *hdev = req->hdev;
1472         u8 scan;
1473
1474         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1475                 return;
1476
1477         if (!hdev_is_powered(hdev))
1478                 return;
1479
1480         if (mgmt_powering_down(hdev))
1481                 return;
1482
1483         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1484             disconnected_whitelist_entries(hdev))
1485                 scan = SCAN_PAGE;
1486         else
1487                 scan = SCAN_DISABLED;
1488
1489         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1490                 scan |= SCAN_INQUIRY;
1491
1492         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1493             test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1494                 return;
1495
1496         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1497 }
1498
1499 static int update_scan(struct hci_request *req, unsigned long opt)
1500 {
1501         hci_dev_lock(req->hdev);
1502         __hci_req_update_scan(req);
1503         hci_dev_unlock(req->hdev);
1504         return 0;
1505 }
1506
1507 static void scan_update_work(struct work_struct *work)
1508 {
1509         struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1510
1511         hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1512 }
1513
1514 static int connectable_update(struct hci_request *req, unsigned long opt)
1515 {
1516         struct hci_dev *hdev = req->hdev;
1517
1518         hci_dev_lock(hdev);
1519
1520         __hci_req_update_scan(req);
1521
1522         /* If BR/EDR is not enabled and we disable advertising as a
1523          * by-product of disabling connectable, we need to update the
1524          * advertising flags.
1525          */
1526         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1527                 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
1528
1529         /* Update the advertising parameters if necessary */
1530         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1531             !list_empty(&hdev->adv_instances))
1532                 __hci_req_enable_advertising(req);
1533
1534         __hci_update_background_scan(req);
1535
1536         hci_dev_unlock(hdev);
1537
1538         return 0;
1539 }
1540
1541 static void connectable_update_work(struct work_struct *work)
1542 {
1543         struct hci_dev *hdev = container_of(work, struct hci_dev,
1544                                             connectable_update);
1545         u8 status;
1546
1547         hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1548         mgmt_set_connectable_complete(hdev, status);
1549 }
1550
1551 static u8 get_service_classes(struct hci_dev *hdev)
1552 {
1553         struct bt_uuid *uuid;
1554         u8 val = 0;
1555
1556         list_for_each_entry(uuid, &hdev->uuids, list)
1557                 val |= uuid->svc_hint;
1558
1559         return val;
1560 }
1561
1562 void __hci_req_update_class(struct hci_request *req)
1563 {
1564         struct hci_dev *hdev = req->hdev;
1565         u8 cod[3];
1566
1567         BT_DBG("%s", hdev->name);
1568
1569         if (!hdev_is_powered(hdev))
1570                 return;
1571
1572         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1573                 return;
1574
1575         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1576                 return;
1577
1578         cod[0] = hdev->minor_class;
1579         cod[1] = hdev->major_class;
1580         cod[2] = get_service_classes(hdev);
1581
1582         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1583                 cod[1] |= 0x20;
1584
1585         if (memcmp(cod, hdev->dev_class, 3) == 0)
1586                 return;
1587
1588         hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1589 }
1590
1591 static void write_iac(struct hci_request *req)
1592 {
1593         struct hci_dev *hdev = req->hdev;
1594         struct hci_cp_write_current_iac_lap cp;
1595
1596         if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1597                 return;
1598
1599         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1600                 /* Limited discoverable mode */
1601                 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1602                 cp.iac_lap[0] = 0x00;   /* LIAC */
1603                 cp.iac_lap[1] = 0x8b;
1604                 cp.iac_lap[2] = 0x9e;
1605                 cp.iac_lap[3] = 0x33;   /* GIAC */
1606                 cp.iac_lap[4] = 0x8b;
1607                 cp.iac_lap[5] = 0x9e;
1608         } else {
1609                 /* General discoverable mode */
1610                 cp.num_iac = 1;
1611                 cp.iac_lap[0] = 0x33;   /* GIAC */
1612                 cp.iac_lap[1] = 0x8b;
1613                 cp.iac_lap[2] = 0x9e;
1614         }
1615
1616         hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1617                     (cp.num_iac * 3) + 1, &cp);
1618 }
1619
1620 static int discoverable_update(struct hci_request *req, unsigned long opt)
1621 {
1622         struct hci_dev *hdev = req->hdev;
1623
1624         hci_dev_lock(hdev);
1625
1626         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1627                 write_iac(req);
1628                 __hci_req_update_scan(req);
1629                 __hci_req_update_class(req);
1630         }
1631
1632         /* Advertising instances don't use the global discoverable setting, so
1633          * only update AD if advertising was enabled using Set Advertising.
1634          */
1635         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1636                 __hci_req_update_adv_data(req, 0x00);
1637
1638                 /* Discoverable mode affects the local advertising
1639                  * address in limited privacy mode.
1640                  */
1641                 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1642                         __hci_req_enable_advertising(req);
1643         }
1644
1645         hci_dev_unlock(hdev);
1646
1647         return 0;
1648 }
1649
1650 static void discoverable_update_work(struct work_struct *work)
1651 {
1652         struct hci_dev *hdev = container_of(work, struct hci_dev,
1653                                             discoverable_update);
1654         u8 status;
1655
1656         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1657         mgmt_set_discoverable_complete(hdev, status);
1658 }
1659
1660 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1661                       u8 reason)
1662 {
1663         switch (conn->state) {
1664         case BT_CONNECTED:
1665         case BT_CONFIG:
1666                 if (conn->type == AMP_LINK) {
1667                         struct hci_cp_disconn_phy_link cp;
1668
1669                         cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1670                         cp.reason = reason;
1671                         hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1672                                     &cp);
1673                 } else {
1674                         struct hci_cp_disconnect dc;
1675
1676                         dc.handle = cpu_to_le16(conn->handle);
1677                         dc.reason = reason;
1678                         hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1679                 }
1680
1681                 conn->state = BT_DISCONN;
1682
1683                 break;
1684         case BT_CONNECT:
1685                 if (conn->type == LE_LINK) {
1686                         if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1687                                 break;
1688                         hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1689                                     0, NULL);
1690                 } else if (conn->type == ACL_LINK) {
1691                         if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1692                                 break;
1693                         hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1694                                     6, &conn->dst);
1695                 }
1696                 break;
1697         case BT_CONNECT2:
1698                 if (conn->type == ACL_LINK) {
1699                         struct hci_cp_reject_conn_req rej;
1700
1701                         bacpy(&rej.bdaddr, &conn->dst);
1702                         rej.reason = reason;
1703
1704                         hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1705                                     sizeof(rej), &rej);
1706                 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1707                         struct hci_cp_reject_sync_conn_req rej;
1708
1709                         bacpy(&rej.bdaddr, &conn->dst);
1710
1711                         /* SCO rejection has its own limited set of
1712                          * allowed error values (0x0D-0x0F) which isn't
1713                          * compatible with most values passed to this
1714                          * function. To be safe hard-code one of the
1715                          * values that's suitable for SCO.
1716                          */
1717                         rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES;
1718
1719                         hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1720                                     sizeof(rej), &rej);
1721                 }
1722                 break;
1723         default:
1724                 conn->state = BT_CLOSED;
1725                 break;
1726         }
1727 }
1728
1729 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1730 {
1731         if (status)
1732                 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1733 }
1734
1735 int hci_abort_conn(struct hci_conn *conn, u8 reason)
1736 {
1737         struct hci_request req;
1738         int err;
1739
1740         hci_req_init(&req, conn->hdev);
1741
1742         __hci_abort_conn(&req, conn, reason);
1743
1744         err = hci_req_run(&req, abort_conn_complete);
1745         if (err && err != -ENODATA) {
1746                 BT_ERR("Failed to run HCI request: err %d", err);
1747                 return err;
1748         }
1749
1750         return 0;
1751 }
1752
1753 static int update_bg_scan(struct hci_request *req, unsigned long opt)
1754 {
1755         hci_dev_lock(req->hdev);
1756         __hci_update_background_scan(req);
1757         hci_dev_unlock(req->hdev);
1758         return 0;
1759 }
1760
1761 static void bg_scan_update(struct work_struct *work)
1762 {
1763         struct hci_dev *hdev = container_of(work, struct hci_dev,
1764                                             bg_scan_update);
1765         struct hci_conn *conn;
1766         u8 status;
1767         int err;
1768
1769         err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1770         if (!err)
1771                 return;
1772
1773         hci_dev_lock(hdev);
1774
1775         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1776         if (conn)
1777                 hci_le_conn_failed(conn, status);
1778
1779         hci_dev_unlock(hdev);
1780 }
1781
1782 static int le_scan_disable(struct hci_request *req, unsigned long opt)
1783 {
1784         hci_req_add_le_scan_disable(req);
1785         return 0;
1786 }
1787
1788 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1789 {
1790         u8 length = opt;
1791         const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1792         const u8 liac[3] = { 0x00, 0x8b, 0x9e };
1793         struct hci_cp_inquiry cp;
1794
1795         BT_DBG("%s", req->hdev->name);
1796
1797         hci_dev_lock(req->hdev);
1798         hci_inquiry_cache_flush(req->hdev);
1799         hci_dev_unlock(req->hdev);
1800
1801         memset(&cp, 0, sizeof(cp));
1802
1803         if (req->hdev->discovery.limited)
1804                 memcpy(&cp.lap, liac, sizeof(cp.lap));
1805         else
1806                 memcpy(&cp.lap, giac, sizeof(cp.lap));
1807
1808         cp.length = length;
1809
1810         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1811
1812         return 0;
1813 }
1814
1815 static void le_scan_disable_work(struct work_struct *work)
1816 {
1817         struct hci_dev *hdev = container_of(work, struct hci_dev,
1818                                             le_scan_disable.work);
1819         u8 status;
1820
1821         BT_DBG("%s", hdev->name);
1822
1823         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1824                 return;
1825
1826         cancel_delayed_work(&hdev->le_scan_restart);
1827
1828         hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1829         if (status) {
1830                 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
1831                 return;
1832         }
1833
1834         hdev->discovery.scan_start = 0;
1835
1836         /* If we were running LE only scan, change discovery state. If
1837          * we were running both LE and BR/EDR inquiry simultaneously,
1838          * and BR/EDR inquiry is already finished, stop discovery,
1839          * otherwise BR/EDR inquiry will stop discovery when finished.
1840          * If we will resolve remote device name, do not change
1841          * discovery state.
1842          */
1843
1844         if (hdev->discovery.type == DISCOV_TYPE_LE)
1845                 goto discov_stopped;
1846
1847         if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1848                 return;
1849
1850         if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1851                 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1852                     hdev->discovery.state != DISCOVERY_RESOLVING)
1853                         goto discov_stopped;
1854
1855                 return;
1856         }
1857
1858         hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1859                      HCI_CMD_TIMEOUT, &status);
1860         if (status) {
1861                 BT_ERR("Inquiry failed: status 0x%02x", status);
1862                 goto discov_stopped;
1863         }
1864
1865         return;
1866
1867 discov_stopped:
1868         hci_dev_lock(hdev);
1869         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1870         hci_dev_unlock(hdev);
1871 }
1872
1873 static int le_scan_restart(struct hci_request *req, unsigned long opt)
1874 {
1875         struct hci_dev *hdev = req->hdev;
1876         struct hci_cp_le_set_scan_enable cp;
1877
1878         /* If controller is not scanning we are done. */
1879         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1880                 return 0;
1881
1882         hci_req_add_le_scan_disable(req);
1883
1884         memset(&cp, 0, sizeof(cp));
1885         cp.enable = LE_SCAN_ENABLE;
1886         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1887         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1888
1889         return 0;
1890 }
1891
1892 static void le_scan_restart_work(struct work_struct *work)
1893 {
1894         struct hci_dev *hdev = container_of(work, struct hci_dev,
1895                                             le_scan_restart.work);
1896         unsigned long timeout, duration, scan_start, now;
1897         u8 status;
1898
1899         BT_DBG("%s", hdev->name);
1900
1901         hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
1902         if (status) {
1903                 BT_ERR("Failed to restart LE scan: status %d", status);
1904                 return;
1905         }
1906
1907         hci_dev_lock(hdev);
1908
1909         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1910             !hdev->discovery.scan_start)
1911                 goto unlock;
1912
1913         /* When the scan was started, hdev->le_scan_disable has been queued
1914          * after duration from scan_start. During scan restart this job
1915          * has been canceled, and we need to queue it again after proper
1916          * timeout, to make sure that scan does not run indefinitely.
1917          */
1918         duration = hdev->discovery.scan_duration;
1919         scan_start = hdev->discovery.scan_start;
1920         now = jiffies;
1921         if (now - scan_start <= duration) {
1922                 int elapsed;
1923
1924                 if (now >= scan_start)
1925                         elapsed = now - scan_start;
1926                 else
1927                         elapsed = ULONG_MAX - scan_start + now;
1928
1929                 timeout = duration - elapsed;
1930         } else {
1931                 timeout = 0;
1932         }
1933
1934         queue_delayed_work(hdev->req_workqueue,
1935                            &hdev->le_scan_disable, timeout);
1936
1937 unlock:
1938         hci_dev_unlock(hdev);
1939 }
1940
1941 static void disable_advertising(struct hci_request *req)
1942 {
1943         u8 enable = 0x00;
1944
1945         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1946 }
1947
1948 static int active_scan(struct hci_request *req, unsigned long opt)
1949 {
1950         uint16_t interval = opt;
1951         struct hci_dev *hdev = req->hdev;
1952         struct hci_cp_le_set_scan_param param_cp;
1953         struct hci_cp_le_set_scan_enable enable_cp;
1954         u8 own_addr_type;
1955         int err;
1956
1957         BT_DBG("%s", hdev->name);
1958
1959         if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
1960                 hci_dev_lock(hdev);
1961
1962                 /* Don't let discovery abort an outgoing connection attempt
1963                  * that's using directed advertising.
1964                  */
1965                 if (hci_lookup_le_connect(hdev)) {
1966                         hci_dev_unlock(hdev);
1967                         return -EBUSY;
1968                 }
1969
1970                 cancel_adv_timeout(hdev);
1971                 hci_dev_unlock(hdev);
1972
1973                 disable_advertising(req);
1974         }
1975
1976         /* If controller is scanning, it means the background scanning is
1977          * running. Thus, we should temporarily stop it in order to set the
1978          * discovery scanning parameters.
1979          */
1980         if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1981                 hci_req_add_le_scan_disable(req);
1982
1983         /* All active scans will be done with either a resolvable private
1984          * address (when privacy feature has been enabled) or non-resolvable
1985          * private address.
1986          */
1987         err = hci_update_random_address(req, true, scan_use_rpa(hdev),
1988                                         &own_addr_type);
1989         if (err < 0)
1990                 own_addr_type = ADDR_LE_DEV_PUBLIC;
1991
1992         memset(&param_cp, 0, sizeof(param_cp));
1993         param_cp.type = LE_SCAN_ACTIVE;
1994         param_cp.interval = cpu_to_le16(interval);
1995         param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
1996         param_cp.own_address_type = own_addr_type;
1997
1998         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1999                     &param_cp);
2000
2001         memset(&enable_cp, 0, sizeof(enable_cp));
2002         enable_cp.enable = LE_SCAN_ENABLE;
2003         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2004
2005         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2006                     &enable_cp);
2007
2008         return 0;
2009 }
2010
2011 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2012 {
2013         int err;
2014
2015         BT_DBG("%s", req->hdev->name);
2016
2017         err = active_scan(req, opt);
2018         if (err)
2019                 return err;
2020
2021         return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2022 }
2023
2024 static void start_discovery(struct hci_dev *hdev, u8 *status)
2025 {
2026         unsigned long timeout;
2027
2028         BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2029
2030         switch (hdev->discovery.type) {
2031         case DISCOV_TYPE_BREDR:
2032                 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2033                         hci_req_sync(hdev, bredr_inquiry,
2034                                      DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2035                                      status);
2036                 return;
2037         case DISCOV_TYPE_INTERLEAVED:
2038                 /* When running simultaneous discovery, the LE scanning time
2039                  * should occupy the whole discovery time sine BR/EDR inquiry
2040                  * and LE scanning are scheduled by the controller.
2041                  *
2042                  * For interleaving discovery in comparison, BR/EDR inquiry
2043                  * and LE scanning are done sequentially with separate
2044                  * timeouts.
2045                  */
2046                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2047                              &hdev->quirks)) {
2048                         timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2049                         /* During simultaneous discovery, we double LE scan
2050                          * interval. We must leave some time for the controller
2051                          * to do BR/EDR inquiry.
2052                          */
2053                         hci_req_sync(hdev, interleaved_discov,
2054                                      DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2055                                      status);
2056                         break;
2057                 }
2058
2059                 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2060                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2061                              HCI_CMD_TIMEOUT, status);
2062                 break;
2063         case DISCOV_TYPE_LE:
2064                 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2065                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2066                              HCI_CMD_TIMEOUT, status);
2067                 break;
2068         default:
2069                 *status = HCI_ERROR_UNSPECIFIED;
2070                 return;
2071         }
2072
2073         if (*status)
2074                 return;
2075
2076         BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2077
2078         /* When service discovery is used and the controller has a
2079          * strict duplicate filter, it is important to remember the
2080          * start and duration of the scan. This is required for
2081          * restarting scanning during the discovery phase.
2082          */
2083         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2084                      hdev->discovery.result_filtering) {
2085                 hdev->discovery.scan_start = jiffies;
2086                 hdev->discovery.scan_duration = timeout;
2087         }
2088
2089         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2090                            timeout);
2091 }
2092
2093 bool hci_req_stop_discovery(struct hci_request *req)
2094 {
2095         struct hci_dev *hdev = req->hdev;
2096         struct discovery_state *d = &hdev->discovery;
2097         struct hci_cp_remote_name_req_cancel cp;
2098         struct inquiry_entry *e;
2099         bool ret = false;
2100
2101         BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2102
2103         if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2104                 if (test_bit(HCI_INQUIRY, &hdev->flags))
2105                         hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2106
2107                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2108                         cancel_delayed_work(&hdev->le_scan_disable);
2109                         hci_req_add_le_scan_disable(req);
2110                 }
2111
2112                 ret = true;
2113         } else {
2114                 /* Passive scanning */
2115                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2116                         hci_req_add_le_scan_disable(req);
2117                         ret = true;
2118                 }
2119         }
2120
2121         /* No further actions needed for LE-only discovery */
2122         if (d->type == DISCOV_TYPE_LE)
2123                 return ret;
2124
2125         if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2126                 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2127                                                      NAME_PENDING);
2128                 if (!e)
2129                         return ret;
2130
2131                 bacpy(&cp.bdaddr, &e->data.bdaddr);
2132                 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2133                             &cp);
2134                 ret = true;
2135         }
2136
2137         return ret;
2138 }
2139
2140 static int stop_discovery(struct hci_request *req, unsigned long opt)
2141 {
2142         hci_dev_lock(req->hdev);
2143         hci_req_stop_discovery(req);
2144         hci_dev_unlock(req->hdev);
2145
2146         return 0;
2147 }
2148
2149 static void discov_update(struct work_struct *work)
2150 {
2151         struct hci_dev *hdev = container_of(work, struct hci_dev,
2152                                             discov_update);
2153         u8 status = 0;
2154
2155         switch (hdev->discovery.state) {
2156         case DISCOVERY_STARTING:
2157                 start_discovery(hdev, &status);
2158                 mgmt_start_discovery_complete(hdev, status);
2159                 if (status)
2160                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2161                 else
2162                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2163                 break;
2164         case DISCOVERY_STOPPING:
2165                 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2166                 mgmt_stop_discovery_complete(hdev, status);
2167                 if (!status)
2168                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2169                 break;
2170         case DISCOVERY_STOPPED:
2171         default:
2172                 return;
2173         }
2174 }
2175
2176 static void discov_off(struct work_struct *work)
2177 {
2178         struct hci_dev *hdev = container_of(work, struct hci_dev,
2179                                             discov_off.work);
2180
2181         BT_DBG("%s", hdev->name);
2182
2183         hci_dev_lock(hdev);
2184
2185         /* When discoverable timeout triggers, then just make sure
2186          * the limited discoverable flag is cleared. Even in the case
2187          * of a timeout triggered from general discoverable, it is
2188          * safe to unconditionally clear the flag.
2189          */
2190         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2191         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2192         hdev->discov_timeout = 0;
2193
2194         hci_dev_unlock(hdev);
2195
2196         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2197         mgmt_new_settings(hdev);
2198 }
2199
2200 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2201 {
2202         struct hci_dev *hdev = req->hdev;
2203         u8 link_sec;
2204
2205         hci_dev_lock(hdev);
2206
2207         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2208             !lmp_host_ssp_capable(hdev)) {
2209                 u8 mode = 0x01;
2210
2211                 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2212
2213                 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2214                         u8 support = 0x01;
2215
2216                         hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2217                                     sizeof(support), &support);
2218                 }
2219         }
2220
2221         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2222             lmp_bredr_capable(hdev)) {
2223                 struct hci_cp_write_le_host_supported cp;
2224
2225                 cp.le = 0x01;
2226                 cp.simul = 0x00;
2227
2228                 /* Check first if we already have the right
2229                  * host state (host features set)
2230                  */
2231                 if (cp.le != lmp_host_le_capable(hdev) ||
2232                     cp.simul != lmp_host_le_br_capable(hdev))
2233                         hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2234                                     sizeof(cp), &cp);
2235         }
2236
2237         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2238                 /* Make sure the controller has a good default for
2239                  * advertising data. This also applies to the case
2240                  * where BR/EDR was toggled during the AUTO_OFF phase.
2241                  */
2242                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2243                     list_empty(&hdev->adv_instances)) {
2244                         __hci_req_update_adv_data(req, 0x00);
2245                         __hci_req_update_scan_rsp_data(req, 0x00);
2246
2247                         if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2248                                 __hci_req_enable_advertising(req);
2249                 } else if (!list_empty(&hdev->adv_instances)) {
2250                         struct adv_info *adv_instance;
2251
2252                         adv_instance = list_first_entry(&hdev->adv_instances,
2253                                                         struct adv_info, list);
2254                         __hci_req_schedule_adv_instance(req,
2255                                                         adv_instance->instance,
2256                                                         true);
2257                 }
2258         }
2259
2260         link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2261         if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2262                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2263                             sizeof(link_sec), &link_sec);
2264
2265         if (lmp_bredr_capable(hdev)) {
2266                 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2267                         __hci_req_write_fast_connectable(req, true);
2268                 else
2269                         __hci_req_write_fast_connectable(req, false);
2270                 __hci_req_update_scan(req);
2271                 __hci_req_update_class(req);
2272                 __hci_req_update_name(req);
2273                 __hci_req_update_eir(req);
2274         }
2275
2276         hci_dev_unlock(hdev);
2277         return 0;
2278 }
2279
2280 int __hci_req_hci_power_on(struct hci_dev *hdev)
2281 {
2282         /* Register the available SMP channels (BR/EDR and LE) only when
2283          * successfully powering on the controller. This late
2284          * registration is required so that LE SMP can clearly decide if
2285          * the public address or static address is used.
2286          */
2287         smp_register(hdev);
2288
2289         return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2290                               NULL);
2291 }
2292
2293 void hci_request_setup(struct hci_dev *hdev)
2294 {
2295         INIT_WORK(&hdev->discov_update, discov_update);
2296         INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2297         INIT_WORK(&hdev->scan_update, scan_update_work);
2298         INIT_WORK(&hdev->connectable_update, connectable_update_work);
2299         INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2300         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2301         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2302         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2303         INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2304 }
2305
2306 void hci_request_cancel_all(struct hci_dev *hdev)
2307 {
2308         hci_req_sync_cancel(hdev, ENODEV);
2309
2310         cancel_work_sync(&hdev->discov_update);
2311         cancel_work_sync(&hdev->bg_scan_update);
2312         cancel_work_sync(&hdev->scan_update);
2313         cancel_work_sync(&hdev->connectable_update);
2314         cancel_work_sync(&hdev->discoverable_update);
2315         cancel_delayed_work_sync(&hdev->discov_off);
2316         cancel_delayed_work_sync(&hdev->le_scan_disable);
2317         cancel_delayed_work_sync(&hdev->le_scan_restart);
2318
2319         if (hdev->adv_instance_timeout) {
2320                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2321                 hdev->adv_instance_timeout = 0;
2322         }
2323 }