Merge tag 'wireless-drivers-for-davem-2016-03-04' of git://git.kernel.org/pub/scm...
[cascardo/linux.git] / net / bluetooth / hci_request.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2014 Intel Corporation
5
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <asm/unaligned.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32
33 #define HCI_REQ_DONE      0
34 #define HCI_REQ_PEND      1
35 #define HCI_REQ_CANCELED  2
36
37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38 {
39         skb_queue_head_init(&req->cmd_q);
40         req->hdev = hdev;
41         req->err = 0;
42 }
43
44 static int req_run(struct hci_request *req, hci_req_complete_t complete,
45                    hci_req_complete_skb_t complete_skb)
46 {
47         struct hci_dev *hdev = req->hdev;
48         struct sk_buff *skb;
49         unsigned long flags;
50
51         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
52
53         /* If an error occurred during request building, remove all HCI
54          * commands queued on the HCI request queue.
55          */
56         if (req->err) {
57                 skb_queue_purge(&req->cmd_q);
58                 return req->err;
59         }
60
61         /* Do not allow empty requests */
62         if (skb_queue_empty(&req->cmd_q))
63                 return -ENODATA;
64
65         skb = skb_peek_tail(&req->cmd_q);
66         if (complete) {
67                 bt_cb(skb)->hci.req_complete = complete;
68         } else if (complete_skb) {
69                 bt_cb(skb)->hci.req_complete_skb = complete_skb;
70                 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
71         }
72
73         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
74         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
75         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
76
77         queue_work(hdev->workqueue, &hdev->cmd_work);
78
79         return 0;
80 }
81
82 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
83 {
84         return req_run(req, complete, NULL);
85 }
86
87 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
88 {
89         return req_run(req, NULL, complete);
90 }
91
92 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
93                                   struct sk_buff *skb)
94 {
95         BT_DBG("%s result 0x%2.2x", hdev->name, result);
96
97         if (hdev->req_status == HCI_REQ_PEND) {
98                 hdev->req_result = result;
99                 hdev->req_status = HCI_REQ_DONE;
100                 if (skb)
101                         hdev->req_skb = skb_get(skb);
102                 wake_up_interruptible(&hdev->req_wait_q);
103         }
104 }
105
106 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
107 {
108         BT_DBG("%s err 0x%2.2x", hdev->name, err);
109
110         if (hdev->req_status == HCI_REQ_PEND) {
111                 hdev->req_result = err;
112                 hdev->req_status = HCI_REQ_CANCELED;
113                 wake_up_interruptible(&hdev->req_wait_q);
114         }
115 }
116
117 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
118                                   const void *param, u8 event, u32 timeout)
119 {
120         DECLARE_WAITQUEUE(wait, current);
121         struct hci_request req;
122         struct sk_buff *skb;
123         int err = 0;
124
125         BT_DBG("%s", hdev->name);
126
127         hci_req_init(&req, hdev);
128
129         hci_req_add_ev(&req, opcode, plen, param, event);
130
131         hdev->req_status = HCI_REQ_PEND;
132
133         add_wait_queue(&hdev->req_wait_q, &wait);
134         set_current_state(TASK_INTERRUPTIBLE);
135
136         err = hci_req_run_skb(&req, hci_req_sync_complete);
137         if (err < 0) {
138                 remove_wait_queue(&hdev->req_wait_q, &wait);
139                 set_current_state(TASK_RUNNING);
140                 return ERR_PTR(err);
141         }
142
143         schedule_timeout(timeout);
144
145         remove_wait_queue(&hdev->req_wait_q, &wait);
146
147         if (signal_pending(current))
148                 return ERR_PTR(-EINTR);
149
150         switch (hdev->req_status) {
151         case HCI_REQ_DONE:
152                 err = -bt_to_errno(hdev->req_result);
153                 break;
154
155         case HCI_REQ_CANCELED:
156                 err = -hdev->req_result;
157                 break;
158
159         default:
160                 err = -ETIMEDOUT;
161                 break;
162         }
163
164         hdev->req_status = hdev->req_result = 0;
165         skb = hdev->req_skb;
166         hdev->req_skb = NULL;
167
168         BT_DBG("%s end: err %d", hdev->name, err);
169
170         if (err < 0) {
171                 kfree_skb(skb);
172                 return ERR_PTR(err);
173         }
174
175         if (!skb)
176                 return ERR_PTR(-ENODATA);
177
178         return skb;
179 }
180 EXPORT_SYMBOL(__hci_cmd_sync_ev);
181
182 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
183                                const void *param, u32 timeout)
184 {
185         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
186 }
187 EXPORT_SYMBOL(__hci_cmd_sync);
188
189 /* Execute request and wait for completion. */
190 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
191                                                      unsigned long opt),
192                    unsigned long opt, u32 timeout, u8 *hci_status)
193 {
194         struct hci_request req;
195         DECLARE_WAITQUEUE(wait, current);
196         int err = 0;
197
198         BT_DBG("%s start", hdev->name);
199
200         hci_req_init(&req, hdev);
201
202         hdev->req_status = HCI_REQ_PEND;
203
204         err = func(&req, opt);
205         if (err) {
206                 if (hci_status)
207                         *hci_status = HCI_ERROR_UNSPECIFIED;
208                 return err;
209         }
210
211         add_wait_queue(&hdev->req_wait_q, &wait);
212         set_current_state(TASK_INTERRUPTIBLE);
213
214         err = hci_req_run_skb(&req, hci_req_sync_complete);
215         if (err < 0) {
216                 hdev->req_status = 0;
217
218                 remove_wait_queue(&hdev->req_wait_q, &wait);
219                 set_current_state(TASK_RUNNING);
220
221                 /* ENODATA means the HCI request command queue is empty.
222                  * This can happen when a request with conditionals doesn't
223                  * trigger any commands to be sent. This is normal behavior
224                  * and should not trigger an error return.
225                  */
226                 if (err == -ENODATA) {
227                         if (hci_status)
228                                 *hci_status = 0;
229                         return 0;
230                 }
231
232                 if (hci_status)
233                         *hci_status = HCI_ERROR_UNSPECIFIED;
234
235                 return err;
236         }
237
238         schedule_timeout(timeout);
239
240         remove_wait_queue(&hdev->req_wait_q, &wait);
241
242         if (signal_pending(current))
243                 return -EINTR;
244
245         switch (hdev->req_status) {
246         case HCI_REQ_DONE:
247                 err = -bt_to_errno(hdev->req_result);
248                 if (hci_status)
249                         *hci_status = hdev->req_result;
250                 break;
251
252         case HCI_REQ_CANCELED:
253                 err = -hdev->req_result;
254                 if (hci_status)
255                         *hci_status = HCI_ERROR_UNSPECIFIED;
256                 break;
257
258         default:
259                 err = -ETIMEDOUT;
260                 if (hci_status)
261                         *hci_status = HCI_ERROR_UNSPECIFIED;
262                 break;
263         }
264
265         hdev->req_status = hdev->req_result = 0;
266
267         BT_DBG("%s end: err %d", hdev->name, err);
268
269         return err;
270 }
271
272 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
273                                                   unsigned long opt),
274                  unsigned long opt, u32 timeout, u8 *hci_status)
275 {
276         int ret;
277
278         if (!test_bit(HCI_UP, &hdev->flags))
279                 return -ENETDOWN;
280
281         /* Serialize all requests */
282         hci_req_sync_lock(hdev);
283         ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
284         hci_req_sync_unlock(hdev);
285
286         return ret;
287 }
288
289 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
290                                 const void *param)
291 {
292         int len = HCI_COMMAND_HDR_SIZE + plen;
293         struct hci_command_hdr *hdr;
294         struct sk_buff *skb;
295
296         skb = bt_skb_alloc(len, GFP_ATOMIC);
297         if (!skb)
298                 return NULL;
299
300         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
301         hdr->opcode = cpu_to_le16(opcode);
302         hdr->plen   = plen;
303
304         if (plen)
305                 memcpy(skb_put(skb, plen), param, plen);
306
307         BT_DBG("skb len %d", skb->len);
308
309         hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
310         hci_skb_opcode(skb) = opcode;
311
312         return skb;
313 }
314
315 /* Queue a command to an asynchronous HCI request */
316 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
317                     const void *param, u8 event)
318 {
319         struct hci_dev *hdev = req->hdev;
320         struct sk_buff *skb;
321
322         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
323
324         /* If an error occurred during request building, there is no point in
325          * queueing the HCI command. We can simply return.
326          */
327         if (req->err)
328                 return;
329
330         skb = hci_prepare_cmd(hdev, opcode, plen, param);
331         if (!skb) {
332                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
333                        hdev->name, opcode);
334                 req->err = -ENOMEM;
335                 return;
336         }
337
338         if (skb_queue_empty(&req->cmd_q))
339                 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
340
341         bt_cb(skb)->hci.req_event = event;
342
343         skb_queue_tail(&req->cmd_q, skb);
344 }
345
346 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
347                  const void *param)
348 {
349         hci_req_add_ev(req, opcode, plen, param, 0);
350 }
351
352 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
353 {
354         struct hci_dev *hdev = req->hdev;
355         struct hci_cp_write_page_scan_activity acp;
356         u8 type;
357
358         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
359                 return;
360
361         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
362                 return;
363
364         if (enable) {
365                 type = PAGE_SCAN_TYPE_INTERLACED;
366
367                 /* 160 msec page scan interval */
368                 acp.interval = cpu_to_le16(0x0100);
369         } else {
370                 type = PAGE_SCAN_TYPE_STANDARD; /* default */
371
372                 /* default 1.28 sec page scan */
373                 acp.interval = cpu_to_le16(0x0800);
374         }
375
376         acp.window = cpu_to_le16(0x0012);
377
378         if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
379             __cpu_to_le16(hdev->page_scan_window) != acp.window)
380                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
381                             sizeof(acp), &acp);
382
383         if (hdev->page_scan_type != type)
384                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
385 }
386
387 /* This function controls the background scanning based on hdev->pend_le_conns
388  * list. If there are pending LE connection we start the background scanning,
389  * otherwise we stop it.
390  *
391  * This function requires the caller holds hdev->lock.
392  */
393 static void __hci_update_background_scan(struct hci_request *req)
394 {
395         struct hci_dev *hdev = req->hdev;
396
397         if (!test_bit(HCI_UP, &hdev->flags) ||
398             test_bit(HCI_INIT, &hdev->flags) ||
399             hci_dev_test_flag(hdev, HCI_SETUP) ||
400             hci_dev_test_flag(hdev, HCI_CONFIG) ||
401             hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
402             hci_dev_test_flag(hdev, HCI_UNREGISTER))
403                 return;
404
405         /* No point in doing scanning if LE support hasn't been enabled */
406         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
407                 return;
408
409         /* If discovery is active don't interfere with it */
410         if (hdev->discovery.state != DISCOVERY_STOPPED)
411                 return;
412
413         /* Reset RSSI and UUID filters when starting background scanning
414          * since these filters are meant for service discovery only.
415          *
416          * The Start Discovery and Start Service Discovery operations
417          * ensure to set proper values for RSSI threshold and UUID
418          * filter list. So it is safe to just reset them here.
419          */
420         hci_discovery_filter_clear(hdev);
421
422         if (list_empty(&hdev->pend_le_conns) &&
423             list_empty(&hdev->pend_le_reports)) {
424                 /* If there is no pending LE connections or devices
425                  * to be scanned for, we should stop the background
426                  * scanning.
427                  */
428
429                 /* If controller is not scanning we are done. */
430                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
431                         return;
432
433                 hci_req_add_le_scan_disable(req);
434
435                 BT_DBG("%s stopping background scanning", hdev->name);
436         } else {
437                 /* If there is at least one pending LE connection, we should
438                  * keep the background scan running.
439                  */
440
441                 /* If controller is connecting, we should not start scanning
442                  * since some controllers are not able to scan and connect at
443                  * the same time.
444                  */
445                 if (hci_lookup_le_connect(hdev))
446                         return;
447
448                 /* If controller is currently scanning, we stop it to ensure we
449                  * don't miss any advertising (due to duplicates filter).
450                  */
451                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
452                         hci_req_add_le_scan_disable(req);
453
454                 hci_req_add_le_passive_scan(req);
455
456                 BT_DBG("%s starting background scanning", hdev->name);
457         }
458 }
459
460 void __hci_req_update_name(struct hci_request *req)
461 {
462         struct hci_dev *hdev = req->hdev;
463         struct hci_cp_write_local_name cp;
464
465         memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
466
467         hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
468 }
469
470 #define PNP_INFO_SVCLASS_ID             0x1200
471
472 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
473 {
474         u8 *ptr = data, *uuids_start = NULL;
475         struct bt_uuid *uuid;
476
477         if (len < 4)
478                 return ptr;
479
480         list_for_each_entry(uuid, &hdev->uuids, list) {
481                 u16 uuid16;
482
483                 if (uuid->size != 16)
484                         continue;
485
486                 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
487                 if (uuid16 < 0x1100)
488                         continue;
489
490                 if (uuid16 == PNP_INFO_SVCLASS_ID)
491                         continue;
492
493                 if (!uuids_start) {
494                         uuids_start = ptr;
495                         uuids_start[0] = 1;
496                         uuids_start[1] = EIR_UUID16_ALL;
497                         ptr += 2;
498                 }
499
500                 /* Stop if not enough space to put next UUID */
501                 if ((ptr - data) + sizeof(u16) > len) {
502                         uuids_start[1] = EIR_UUID16_SOME;
503                         break;
504                 }
505
506                 *ptr++ = (uuid16 & 0x00ff);
507                 *ptr++ = (uuid16 & 0xff00) >> 8;
508                 uuids_start[0] += sizeof(uuid16);
509         }
510
511         return ptr;
512 }
513
514 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
515 {
516         u8 *ptr = data, *uuids_start = NULL;
517         struct bt_uuid *uuid;
518
519         if (len < 6)
520                 return ptr;
521
522         list_for_each_entry(uuid, &hdev->uuids, list) {
523                 if (uuid->size != 32)
524                         continue;
525
526                 if (!uuids_start) {
527                         uuids_start = ptr;
528                         uuids_start[0] = 1;
529                         uuids_start[1] = EIR_UUID32_ALL;
530                         ptr += 2;
531                 }
532
533                 /* Stop if not enough space to put next UUID */
534                 if ((ptr - data) + sizeof(u32) > len) {
535                         uuids_start[1] = EIR_UUID32_SOME;
536                         break;
537                 }
538
539                 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
540                 ptr += sizeof(u32);
541                 uuids_start[0] += sizeof(u32);
542         }
543
544         return ptr;
545 }
546
547 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
548 {
549         u8 *ptr = data, *uuids_start = NULL;
550         struct bt_uuid *uuid;
551
552         if (len < 18)
553                 return ptr;
554
555         list_for_each_entry(uuid, &hdev->uuids, list) {
556                 if (uuid->size != 128)
557                         continue;
558
559                 if (!uuids_start) {
560                         uuids_start = ptr;
561                         uuids_start[0] = 1;
562                         uuids_start[1] = EIR_UUID128_ALL;
563                         ptr += 2;
564                 }
565
566                 /* Stop if not enough space to put next UUID */
567                 if ((ptr - data) + 16 > len) {
568                         uuids_start[1] = EIR_UUID128_SOME;
569                         break;
570                 }
571
572                 memcpy(ptr, uuid->uuid, 16);
573                 ptr += 16;
574                 uuids_start[0] += 16;
575         }
576
577         return ptr;
578 }
579
580 static void create_eir(struct hci_dev *hdev, u8 *data)
581 {
582         u8 *ptr = data;
583         size_t name_len;
584
585         name_len = strlen(hdev->dev_name);
586
587         if (name_len > 0) {
588                 /* EIR Data type */
589                 if (name_len > 48) {
590                         name_len = 48;
591                         ptr[1] = EIR_NAME_SHORT;
592                 } else
593                         ptr[1] = EIR_NAME_COMPLETE;
594
595                 /* EIR Data length */
596                 ptr[0] = name_len + 1;
597
598                 memcpy(ptr + 2, hdev->dev_name, name_len);
599
600                 ptr += (name_len + 2);
601         }
602
603         if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
604                 ptr[0] = 2;
605                 ptr[1] = EIR_TX_POWER;
606                 ptr[2] = (u8) hdev->inq_tx_power;
607
608                 ptr += 3;
609         }
610
611         if (hdev->devid_source > 0) {
612                 ptr[0] = 9;
613                 ptr[1] = EIR_DEVICE_ID;
614
615                 put_unaligned_le16(hdev->devid_source, ptr + 2);
616                 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
617                 put_unaligned_le16(hdev->devid_product, ptr + 6);
618                 put_unaligned_le16(hdev->devid_version, ptr + 8);
619
620                 ptr += 10;
621         }
622
623         ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
624         ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
625         ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
626 }
627
628 void __hci_req_update_eir(struct hci_request *req)
629 {
630         struct hci_dev *hdev = req->hdev;
631         struct hci_cp_write_eir cp;
632
633         if (!hdev_is_powered(hdev))
634                 return;
635
636         if (!lmp_ext_inq_capable(hdev))
637                 return;
638
639         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
640                 return;
641
642         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
643                 return;
644
645         memset(&cp, 0, sizeof(cp));
646
647         create_eir(hdev, cp.data);
648
649         if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
650                 return;
651
652         memcpy(hdev->eir, cp.data, sizeof(cp.data));
653
654         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
655 }
656
657 void hci_req_add_le_scan_disable(struct hci_request *req)
658 {
659         struct hci_cp_le_set_scan_enable cp;
660
661         memset(&cp, 0, sizeof(cp));
662         cp.enable = LE_SCAN_DISABLE;
663         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
664 }
665
666 static void add_to_white_list(struct hci_request *req,
667                               struct hci_conn_params *params)
668 {
669         struct hci_cp_le_add_to_white_list cp;
670
671         cp.bdaddr_type = params->addr_type;
672         bacpy(&cp.bdaddr, &params->addr);
673
674         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
675 }
676
677 static u8 update_white_list(struct hci_request *req)
678 {
679         struct hci_dev *hdev = req->hdev;
680         struct hci_conn_params *params;
681         struct bdaddr_list *b;
682         uint8_t white_list_entries = 0;
683
684         /* Go through the current white list programmed into the
685          * controller one by one and check if that address is still
686          * in the list of pending connections or list of devices to
687          * report. If not present in either list, then queue the
688          * command to remove it from the controller.
689          */
690         list_for_each_entry(b, &hdev->le_white_list, list) {
691                 /* If the device is neither in pend_le_conns nor
692                  * pend_le_reports then remove it from the whitelist.
693                  */
694                 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
695                                                &b->bdaddr, b->bdaddr_type) &&
696                     !hci_pend_le_action_lookup(&hdev->pend_le_reports,
697                                                &b->bdaddr, b->bdaddr_type)) {
698                         struct hci_cp_le_del_from_white_list cp;
699
700                         cp.bdaddr_type = b->bdaddr_type;
701                         bacpy(&cp.bdaddr, &b->bdaddr);
702
703                         hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
704                                     sizeof(cp), &cp);
705                         continue;
706                 }
707
708                 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
709                         /* White list can not be used with RPAs */
710                         return 0x00;
711                 }
712
713                 white_list_entries++;
714         }
715
716         /* Since all no longer valid white list entries have been
717          * removed, walk through the list of pending connections
718          * and ensure that any new device gets programmed into
719          * the controller.
720          *
721          * If the list of the devices is larger than the list of
722          * available white list entries in the controller, then
723          * just abort and return filer policy value to not use the
724          * white list.
725          */
726         list_for_each_entry(params, &hdev->pend_le_conns, action) {
727                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
728                                            &params->addr, params->addr_type))
729                         continue;
730
731                 if (white_list_entries >= hdev->le_white_list_size) {
732                         /* Select filter policy to accept all advertising */
733                         return 0x00;
734                 }
735
736                 if (hci_find_irk_by_addr(hdev, &params->addr,
737                                          params->addr_type)) {
738                         /* White list can not be used with RPAs */
739                         return 0x00;
740                 }
741
742                 white_list_entries++;
743                 add_to_white_list(req, params);
744         }
745
746         /* After adding all new pending connections, walk through
747          * the list of pending reports and also add these to the
748          * white list if there is still space.
749          */
750         list_for_each_entry(params, &hdev->pend_le_reports, action) {
751                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
752                                            &params->addr, params->addr_type))
753                         continue;
754
755                 if (white_list_entries >= hdev->le_white_list_size) {
756                         /* Select filter policy to accept all advertising */
757                         return 0x00;
758                 }
759
760                 if (hci_find_irk_by_addr(hdev, &params->addr,
761                                          params->addr_type)) {
762                         /* White list can not be used with RPAs */
763                         return 0x00;
764                 }
765
766                 white_list_entries++;
767                 add_to_white_list(req, params);
768         }
769
770         /* Select filter policy to use white list */
771         return 0x01;
772 }
773
774 void hci_req_add_le_passive_scan(struct hci_request *req)
775 {
776         struct hci_cp_le_set_scan_param param_cp;
777         struct hci_cp_le_set_scan_enable enable_cp;
778         struct hci_dev *hdev = req->hdev;
779         u8 own_addr_type;
780         u8 filter_policy;
781
782         /* Set require_privacy to false since no SCAN_REQ are send
783          * during passive scanning. Not using an non-resolvable address
784          * here is important so that peer devices using direct
785          * advertising with our address will be correctly reported
786          * by the controller.
787          */
788         if (hci_update_random_address(req, false, &own_addr_type))
789                 return;
790
791         /* Adding or removing entries from the white list must
792          * happen before enabling scanning. The controller does
793          * not allow white list modification while scanning.
794          */
795         filter_policy = update_white_list(req);
796
797         /* When the controller is using random resolvable addresses and
798          * with that having LE privacy enabled, then controllers with
799          * Extended Scanner Filter Policies support can now enable support
800          * for handling directed advertising.
801          *
802          * So instead of using filter polices 0x00 (no whitelist)
803          * and 0x01 (whitelist enabled) use the new filter policies
804          * 0x02 (no whitelist) and 0x03 (whitelist enabled).
805          */
806         if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
807             (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
808                 filter_policy |= 0x02;
809
810         memset(&param_cp, 0, sizeof(param_cp));
811         param_cp.type = LE_SCAN_PASSIVE;
812         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
813         param_cp.window = cpu_to_le16(hdev->le_scan_window);
814         param_cp.own_address_type = own_addr_type;
815         param_cp.filter_policy = filter_policy;
816         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
817                     &param_cp);
818
819         memset(&enable_cp, 0, sizeof(enable_cp));
820         enable_cp.enable = LE_SCAN_ENABLE;
821         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
822         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
823                     &enable_cp);
824 }
825
826 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
827 {
828         u8 instance = hdev->cur_adv_instance;
829         struct adv_info *adv_instance;
830
831         /* Ignore instance 0 */
832         if (instance == 0x00)
833                 return 0;
834
835         adv_instance = hci_find_adv_instance(hdev, instance);
836         if (!adv_instance)
837                 return 0;
838
839         /* TODO: Take into account the "appearance" and "local-name" flags here.
840          * These are currently being ignored as they are not supported.
841          */
842         return adv_instance->scan_rsp_len;
843 }
844
845 void __hci_req_disable_advertising(struct hci_request *req)
846 {
847         u8 enable = 0x00;
848
849         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
850 }
851
852 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
853 {
854         u32 flags;
855         struct adv_info *adv_instance;
856
857         if (instance == 0x00) {
858                 /* Instance 0 always manages the "Tx Power" and "Flags"
859                  * fields
860                  */
861                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
862
863                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
864                  * corresponds to the "connectable" instance flag.
865                  */
866                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
867                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
868
869                 return flags;
870         }
871
872         adv_instance = hci_find_adv_instance(hdev, instance);
873
874         /* Return 0 when we got an invalid instance identifier. */
875         if (!adv_instance)
876                 return 0;
877
878         return adv_instance->flags;
879 }
880
881 void __hci_req_enable_advertising(struct hci_request *req)
882 {
883         struct hci_dev *hdev = req->hdev;
884         struct hci_cp_le_set_adv_param cp;
885         u8 own_addr_type, enable = 0x01;
886         bool connectable;
887         u32 flags;
888
889         if (hci_conn_num(hdev, LE_LINK) > 0)
890                 return;
891
892         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
893                 __hci_req_disable_advertising(req);
894
895         /* Clear the HCI_LE_ADV bit temporarily so that the
896          * hci_update_random_address knows that it's safe to go ahead
897          * and write a new random address. The flag will be set back on
898          * as soon as the SET_ADV_ENABLE HCI command completes.
899          */
900         hci_dev_clear_flag(hdev, HCI_LE_ADV);
901
902         flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
903
904         /* If the "connectable" instance flag was not set, then choose between
905          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
906          */
907         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
908                       mgmt_get_connectable(hdev);
909
910         /* Set require_privacy to true only when non-connectable
911          * advertising is used. In that case it is fine to use a
912          * non-resolvable private address.
913          */
914         if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
915                 return;
916
917         memset(&cp, 0, sizeof(cp));
918         cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
919         cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
920
921         if (connectable)
922                 cp.type = LE_ADV_IND;
923         else if (get_cur_adv_instance_scan_rsp_len(hdev))
924                 cp.type = LE_ADV_SCAN_IND;
925         else
926                 cp.type = LE_ADV_NONCONN_IND;
927
928         cp.own_address_type = own_addr_type;
929         cp.channel_map = hdev->le_adv_channel_map;
930
931         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
932
933         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
934 }
935
936 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
937 {
938         u8 ad_len = 0;
939         size_t name_len;
940
941         name_len = strlen(hdev->dev_name);
942         if (name_len > 0) {
943                 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
944
945                 if (name_len > max_len) {
946                         name_len = max_len;
947                         ptr[1] = EIR_NAME_SHORT;
948                 } else
949                         ptr[1] = EIR_NAME_COMPLETE;
950
951                 ptr[0] = name_len + 1;
952
953                 memcpy(ptr + 2, hdev->dev_name, name_len);
954
955                 ad_len += (name_len + 2);
956                 ptr += (name_len + 2);
957         }
958
959         return ad_len;
960 }
961
962 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
963                                         u8 *ptr)
964 {
965         struct adv_info *adv_instance;
966
967         adv_instance = hci_find_adv_instance(hdev, instance);
968         if (!adv_instance)
969                 return 0;
970
971         /* TODO: Set the appropriate entries based on advertising instance flags
972          * here once flags other than 0 are supported.
973          */
974         memcpy(ptr, adv_instance->scan_rsp_data,
975                adv_instance->scan_rsp_len);
976
977         return adv_instance->scan_rsp_len;
978 }
979
980 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
981 {
982         struct hci_dev *hdev = req->hdev;
983         struct hci_cp_le_set_scan_rsp_data cp;
984         u8 len;
985
986         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
987                 return;
988
989         memset(&cp, 0, sizeof(cp));
990
991         if (instance)
992                 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
993         else
994                 len = create_default_scan_rsp_data(hdev, cp.data);
995
996         if (hdev->scan_rsp_data_len == len &&
997             !memcmp(cp.data, hdev->scan_rsp_data, len))
998                 return;
999
1000         memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1001         hdev->scan_rsp_data_len = len;
1002
1003         cp.length = len;
1004
1005         hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1006 }
1007
1008 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1009 {
1010         struct adv_info *adv_instance = NULL;
1011         u8 ad_len = 0, flags = 0;
1012         u32 instance_flags;
1013
1014         /* Return 0 when the current instance identifier is invalid. */
1015         if (instance) {
1016                 adv_instance = hci_find_adv_instance(hdev, instance);
1017                 if (!adv_instance)
1018                         return 0;
1019         }
1020
1021         instance_flags = get_adv_instance_flags(hdev, instance);
1022
1023         /* The Add Advertising command allows userspace to set both the general
1024          * and limited discoverable flags.
1025          */
1026         if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1027                 flags |= LE_AD_GENERAL;
1028
1029         if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1030                 flags |= LE_AD_LIMITED;
1031
1032         if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1033                 /* If a discovery flag wasn't provided, simply use the global
1034                  * settings.
1035                  */
1036                 if (!flags)
1037                         flags |= mgmt_get_adv_discov_flags(hdev);
1038
1039                 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1040                         flags |= LE_AD_NO_BREDR;
1041
1042                 /* If flags would still be empty, then there is no need to
1043                  * include the "Flags" AD field".
1044                  */
1045                 if (flags) {
1046                         ptr[0] = 0x02;
1047                         ptr[1] = EIR_FLAGS;
1048                         ptr[2] = flags;
1049
1050                         ad_len += 3;
1051                         ptr += 3;
1052                 }
1053         }
1054
1055         if (adv_instance) {
1056                 memcpy(ptr, adv_instance->adv_data,
1057                        adv_instance->adv_data_len);
1058                 ad_len += adv_instance->adv_data_len;
1059                 ptr += adv_instance->adv_data_len;
1060         }
1061
1062         /* Provide Tx Power only if we can provide a valid value for it */
1063         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1064             (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1065                 ptr[0] = 0x02;
1066                 ptr[1] = EIR_TX_POWER;
1067                 ptr[2] = (u8)hdev->adv_tx_power;
1068
1069                 ad_len += 3;
1070                 ptr += 3;
1071         }
1072
1073         return ad_len;
1074 }
1075
1076 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1077 {
1078         struct hci_dev *hdev = req->hdev;
1079         struct hci_cp_le_set_adv_data cp;
1080         u8 len;
1081
1082         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1083                 return;
1084
1085         memset(&cp, 0, sizeof(cp));
1086
1087         len = create_instance_adv_data(hdev, instance, cp.data);
1088
1089         /* There's nothing to do if the data hasn't changed */
1090         if (hdev->adv_data_len == len &&
1091             memcmp(cp.data, hdev->adv_data, len) == 0)
1092                 return;
1093
1094         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1095         hdev->adv_data_len = len;
1096
1097         cp.length = len;
1098
1099         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1100 }
1101
1102 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1103 {
1104         struct hci_request req;
1105
1106         hci_req_init(&req, hdev);
1107         __hci_req_update_adv_data(&req, instance);
1108
1109         return hci_req_run(&req, NULL);
1110 }
1111
1112 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1113 {
1114         BT_DBG("%s status %u", hdev->name, status);
1115 }
1116
1117 void hci_req_reenable_advertising(struct hci_dev *hdev)
1118 {
1119         struct hci_request req;
1120
1121         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1122             list_empty(&hdev->adv_instances))
1123                 return;
1124
1125         hci_req_init(&req, hdev);
1126
1127         if (hdev->cur_adv_instance) {
1128                 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1129                                                 true);
1130         } else {
1131                 __hci_req_update_adv_data(&req, 0x00);
1132                 __hci_req_update_scan_rsp_data(&req, 0x00);
1133                 __hci_req_enable_advertising(&req);
1134         }
1135
1136         hci_req_run(&req, adv_enable_complete);
1137 }
1138
1139 static void adv_timeout_expire(struct work_struct *work)
1140 {
1141         struct hci_dev *hdev = container_of(work, struct hci_dev,
1142                                             adv_instance_expire.work);
1143
1144         struct hci_request req;
1145         u8 instance;
1146
1147         BT_DBG("%s", hdev->name);
1148
1149         hci_dev_lock(hdev);
1150
1151         hdev->adv_instance_timeout = 0;
1152
1153         instance = hdev->cur_adv_instance;
1154         if (instance == 0x00)
1155                 goto unlock;
1156
1157         hci_req_init(&req, hdev);
1158
1159         hci_req_clear_adv_instance(hdev, &req, instance, false);
1160
1161         if (list_empty(&hdev->adv_instances))
1162                 __hci_req_disable_advertising(&req);
1163
1164         hci_req_run(&req, NULL);
1165
1166 unlock:
1167         hci_dev_unlock(hdev);
1168 }
1169
1170 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1171                                     bool force)
1172 {
1173         struct hci_dev *hdev = req->hdev;
1174         struct adv_info *adv_instance = NULL;
1175         u16 timeout;
1176
1177         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1178             list_empty(&hdev->adv_instances))
1179                 return -EPERM;
1180
1181         if (hdev->adv_instance_timeout)
1182                 return -EBUSY;
1183
1184         adv_instance = hci_find_adv_instance(hdev, instance);
1185         if (!adv_instance)
1186                 return -ENOENT;
1187
1188         /* A zero timeout means unlimited advertising. As long as there is
1189          * only one instance, duration should be ignored. We still set a timeout
1190          * in case further instances are being added later on.
1191          *
1192          * If the remaining lifetime of the instance is more than the duration
1193          * then the timeout corresponds to the duration, otherwise it will be
1194          * reduced to the remaining instance lifetime.
1195          */
1196         if (adv_instance->timeout == 0 ||
1197             adv_instance->duration <= adv_instance->remaining_time)
1198                 timeout = adv_instance->duration;
1199         else
1200                 timeout = adv_instance->remaining_time;
1201
1202         /* The remaining time is being reduced unless the instance is being
1203          * advertised without time limit.
1204          */
1205         if (adv_instance->timeout)
1206                 adv_instance->remaining_time =
1207                                 adv_instance->remaining_time - timeout;
1208
1209         hdev->adv_instance_timeout = timeout;
1210         queue_delayed_work(hdev->req_workqueue,
1211                            &hdev->adv_instance_expire,
1212                            msecs_to_jiffies(timeout * 1000));
1213
1214         /* If we're just re-scheduling the same instance again then do not
1215          * execute any HCI commands. This happens when a single instance is
1216          * being advertised.
1217          */
1218         if (!force && hdev->cur_adv_instance == instance &&
1219             hci_dev_test_flag(hdev, HCI_LE_ADV))
1220                 return 0;
1221
1222         hdev->cur_adv_instance = instance;
1223         __hci_req_update_adv_data(req, instance);
1224         __hci_req_update_scan_rsp_data(req, instance);
1225         __hci_req_enable_advertising(req);
1226
1227         return 0;
1228 }
1229
1230 static void cancel_adv_timeout(struct hci_dev *hdev)
1231 {
1232         if (hdev->adv_instance_timeout) {
1233                 hdev->adv_instance_timeout = 0;
1234                 cancel_delayed_work(&hdev->adv_instance_expire);
1235         }
1236 }
1237
1238 /* For a single instance:
1239  * - force == true: The instance will be removed even when its remaining
1240  *   lifetime is not zero.
1241  * - force == false: the instance will be deactivated but kept stored unless
1242  *   the remaining lifetime is zero.
1243  *
1244  * For instance == 0x00:
1245  * - force == true: All instances will be removed regardless of their timeout
1246  *   setting.
1247  * - force == false: Only instances that have a timeout will be removed.
1248  */
1249 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
1250                                 u8 instance, bool force)
1251 {
1252         struct adv_info *adv_instance, *n, *next_instance = NULL;
1253         int err;
1254         u8 rem_inst;
1255
1256         /* Cancel any timeout concerning the removed instance(s). */
1257         if (!instance || hdev->cur_adv_instance == instance)
1258                 cancel_adv_timeout(hdev);
1259
1260         /* Get the next instance to advertise BEFORE we remove
1261          * the current one. This can be the same instance again
1262          * if there is only one instance.
1263          */
1264         if (instance && hdev->cur_adv_instance == instance)
1265                 next_instance = hci_get_next_instance(hdev, instance);
1266
1267         if (instance == 0x00) {
1268                 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1269                                          list) {
1270                         if (!(force || adv_instance->timeout))
1271                                 continue;
1272
1273                         rem_inst = adv_instance->instance;
1274                         err = hci_remove_adv_instance(hdev, rem_inst);
1275                         if (!err)
1276                                 mgmt_advertising_removed(NULL, hdev, rem_inst);
1277                 }
1278         } else {
1279                 adv_instance = hci_find_adv_instance(hdev, instance);
1280
1281                 if (force || (adv_instance && adv_instance->timeout &&
1282                               !adv_instance->remaining_time)) {
1283                         /* Don't advertise a removed instance. */
1284                         if (next_instance &&
1285                             next_instance->instance == instance)
1286                                 next_instance = NULL;
1287
1288                         err = hci_remove_adv_instance(hdev, instance);
1289                         if (!err)
1290                                 mgmt_advertising_removed(NULL, hdev, instance);
1291                 }
1292         }
1293
1294         if (!req || !hdev_is_powered(hdev) ||
1295             hci_dev_test_flag(hdev, HCI_ADVERTISING))
1296                 return;
1297
1298         if (next_instance)
1299                 __hci_req_schedule_adv_instance(req, next_instance->instance,
1300                                                 false);
1301 }
1302
1303 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1304 {
1305         struct hci_dev *hdev = req->hdev;
1306
1307         /* If we're advertising or initiating an LE connection we can't
1308          * go ahead and change the random address at this time. This is
1309          * because the eventual initiator address used for the
1310          * subsequently created connection will be undefined (some
1311          * controllers use the new address and others the one we had
1312          * when the operation started).
1313          *
1314          * In this kind of scenario skip the update and let the random
1315          * address be updated at the next cycle.
1316          */
1317         if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1318             hci_lookup_le_connect(hdev)) {
1319                 BT_DBG("Deferring random address update");
1320                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1321                 return;
1322         }
1323
1324         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1325 }
1326
1327 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1328                               u8 *own_addr_type)
1329 {
1330         struct hci_dev *hdev = req->hdev;
1331         int err;
1332
1333         /* If privacy is enabled use a resolvable private address. If
1334          * current RPA has expired or there is something else than
1335          * the current RPA in use, then generate a new one.
1336          */
1337         if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
1338                 int to;
1339
1340                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1341
1342                 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1343                     !bacmp(&hdev->random_addr, &hdev->rpa))
1344                         return 0;
1345
1346                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1347                 if (err < 0) {
1348                         BT_ERR("%s failed to generate new RPA", hdev->name);
1349                         return err;
1350                 }
1351
1352                 set_random_addr(req, &hdev->rpa);
1353
1354                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1355                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1356
1357                 return 0;
1358         }
1359
1360         /* In case of required privacy without resolvable private address,
1361          * use an non-resolvable private address. This is useful for active
1362          * scanning and non-connectable advertising.
1363          */
1364         if (require_privacy) {
1365                 bdaddr_t nrpa;
1366
1367                 while (true) {
1368                         /* The non-resolvable private address is generated
1369                          * from random six bytes with the two most significant
1370                          * bits cleared.
1371                          */
1372                         get_random_bytes(&nrpa, 6);
1373                         nrpa.b[5] &= 0x3f;
1374
1375                         /* The non-resolvable private address shall not be
1376                          * equal to the public address.
1377                          */
1378                         if (bacmp(&hdev->bdaddr, &nrpa))
1379                                 break;
1380                 }
1381
1382                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1383                 set_random_addr(req, &nrpa);
1384                 return 0;
1385         }
1386
1387         /* If forcing static address is in use or there is no public
1388          * address use the static address as random address (but skip
1389          * the HCI command if the current random address is already the
1390          * static one.
1391          *
1392          * In case BR/EDR has been disabled on a dual-mode controller
1393          * and a static address has been configured, then use that
1394          * address instead of the public BR/EDR address.
1395          */
1396         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1397             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1398             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1399              bacmp(&hdev->static_addr, BDADDR_ANY))) {
1400                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1401                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1402                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1403                                     &hdev->static_addr);
1404                 return 0;
1405         }
1406
1407         /* Neither privacy nor static address is being used so use a
1408          * public address.
1409          */
1410         *own_addr_type = ADDR_LE_DEV_PUBLIC;
1411
1412         return 0;
1413 }
1414
1415 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1416 {
1417         struct bdaddr_list *b;
1418
1419         list_for_each_entry(b, &hdev->whitelist, list) {
1420                 struct hci_conn *conn;
1421
1422                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1423                 if (!conn)
1424                         return true;
1425
1426                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1427                         return true;
1428         }
1429
1430         return false;
1431 }
1432
1433 void __hci_req_update_scan(struct hci_request *req)
1434 {
1435         struct hci_dev *hdev = req->hdev;
1436         u8 scan;
1437
1438         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1439                 return;
1440
1441         if (!hdev_is_powered(hdev))
1442                 return;
1443
1444         if (mgmt_powering_down(hdev))
1445                 return;
1446
1447         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1448             disconnected_whitelist_entries(hdev))
1449                 scan = SCAN_PAGE;
1450         else
1451                 scan = SCAN_DISABLED;
1452
1453         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1454                 scan |= SCAN_INQUIRY;
1455
1456         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1457             test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1458                 return;
1459
1460         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1461 }
1462
1463 static int update_scan(struct hci_request *req, unsigned long opt)
1464 {
1465         hci_dev_lock(req->hdev);
1466         __hci_req_update_scan(req);
1467         hci_dev_unlock(req->hdev);
1468         return 0;
1469 }
1470
1471 static void scan_update_work(struct work_struct *work)
1472 {
1473         struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1474
1475         hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1476 }
1477
1478 static int connectable_update(struct hci_request *req, unsigned long opt)
1479 {
1480         struct hci_dev *hdev = req->hdev;
1481
1482         hci_dev_lock(hdev);
1483
1484         __hci_req_update_scan(req);
1485
1486         /* If BR/EDR is not enabled and we disable advertising as a
1487          * by-product of disabling connectable, we need to update the
1488          * advertising flags.
1489          */
1490         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1491                 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
1492
1493         /* Update the advertising parameters if necessary */
1494         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1495             !list_empty(&hdev->adv_instances))
1496                 __hci_req_enable_advertising(req);
1497
1498         __hci_update_background_scan(req);
1499
1500         hci_dev_unlock(hdev);
1501
1502         return 0;
1503 }
1504
1505 static void connectable_update_work(struct work_struct *work)
1506 {
1507         struct hci_dev *hdev = container_of(work, struct hci_dev,
1508                                             connectable_update);
1509         u8 status;
1510
1511         hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1512         mgmt_set_connectable_complete(hdev, status);
1513 }
1514
1515 static u8 get_service_classes(struct hci_dev *hdev)
1516 {
1517         struct bt_uuid *uuid;
1518         u8 val = 0;
1519
1520         list_for_each_entry(uuid, &hdev->uuids, list)
1521                 val |= uuid->svc_hint;
1522
1523         return val;
1524 }
1525
1526 void __hci_req_update_class(struct hci_request *req)
1527 {
1528         struct hci_dev *hdev = req->hdev;
1529         u8 cod[3];
1530
1531         BT_DBG("%s", hdev->name);
1532
1533         if (!hdev_is_powered(hdev))
1534                 return;
1535
1536         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1537                 return;
1538
1539         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1540                 return;
1541
1542         cod[0] = hdev->minor_class;
1543         cod[1] = hdev->major_class;
1544         cod[2] = get_service_classes(hdev);
1545
1546         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1547                 cod[1] |= 0x20;
1548
1549         if (memcmp(cod, hdev->dev_class, 3) == 0)
1550                 return;
1551
1552         hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1553 }
1554
1555 static void write_iac(struct hci_request *req)
1556 {
1557         struct hci_dev *hdev = req->hdev;
1558         struct hci_cp_write_current_iac_lap cp;
1559
1560         if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1561                 return;
1562
1563         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1564                 /* Limited discoverable mode */
1565                 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1566                 cp.iac_lap[0] = 0x00;   /* LIAC */
1567                 cp.iac_lap[1] = 0x8b;
1568                 cp.iac_lap[2] = 0x9e;
1569                 cp.iac_lap[3] = 0x33;   /* GIAC */
1570                 cp.iac_lap[4] = 0x8b;
1571                 cp.iac_lap[5] = 0x9e;
1572         } else {
1573                 /* General discoverable mode */
1574                 cp.num_iac = 1;
1575                 cp.iac_lap[0] = 0x33;   /* GIAC */
1576                 cp.iac_lap[1] = 0x8b;
1577                 cp.iac_lap[2] = 0x9e;
1578         }
1579
1580         hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1581                     (cp.num_iac * 3) + 1, &cp);
1582 }
1583
1584 static int discoverable_update(struct hci_request *req, unsigned long opt)
1585 {
1586         struct hci_dev *hdev = req->hdev;
1587
1588         hci_dev_lock(hdev);
1589
1590         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1591                 write_iac(req);
1592                 __hci_req_update_scan(req);
1593                 __hci_req_update_class(req);
1594         }
1595
1596         /* Advertising instances don't use the global discoverable setting, so
1597          * only update AD if advertising was enabled using Set Advertising.
1598          */
1599         if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
1600                 __hci_req_update_adv_data(req, 0x00);
1601
1602         hci_dev_unlock(hdev);
1603
1604         return 0;
1605 }
1606
1607 static void discoverable_update_work(struct work_struct *work)
1608 {
1609         struct hci_dev *hdev = container_of(work, struct hci_dev,
1610                                             discoverable_update);
1611         u8 status;
1612
1613         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1614         mgmt_set_discoverable_complete(hdev, status);
1615 }
1616
1617 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1618                       u8 reason)
1619 {
1620         switch (conn->state) {
1621         case BT_CONNECTED:
1622         case BT_CONFIG:
1623                 if (conn->type == AMP_LINK) {
1624                         struct hci_cp_disconn_phy_link cp;
1625
1626                         cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1627                         cp.reason = reason;
1628                         hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1629                                     &cp);
1630                 } else {
1631                         struct hci_cp_disconnect dc;
1632
1633                         dc.handle = cpu_to_le16(conn->handle);
1634                         dc.reason = reason;
1635                         hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1636                 }
1637
1638                 conn->state = BT_DISCONN;
1639
1640                 break;
1641         case BT_CONNECT:
1642                 if (conn->type == LE_LINK) {
1643                         if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1644                                 break;
1645                         hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1646                                     0, NULL);
1647                 } else if (conn->type == ACL_LINK) {
1648                         if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1649                                 break;
1650                         hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1651                                     6, &conn->dst);
1652                 }
1653                 break;
1654         case BT_CONNECT2:
1655                 if (conn->type == ACL_LINK) {
1656                         struct hci_cp_reject_conn_req rej;
1657
1658                         bacpy(&rej.bdaddr, &conn->dst);
1659                         rej.reason = reason;
1660
1661                         hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1662                                     sizeof(rej), &rej);
1663                 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1664                         struct hci_cp_reject_sync_conn_req rej;
1665
1666                         bacpy(&rej.bdaddr, &conn->dst);
1667
1668                         /* SCO rejection has its own limited set of
1669                          * allowed error values (0x0D-0x0F) which isn't
1670                          * compatible with most values passed to this
1671                          * function. To be safe hard-code one of the
1672                          * values that's suitable for SCO.
1673                          */
1674                         rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES;
1675
1676                         hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1677                                     sizeof(rej), &rej);
1678                 }
1679                 break;
1680         default:
1681                 conn->state = BT_CLOSED;
1682                 break;
1683         }
1684 }
1685
1686 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1687 {
1688         if (status)
1689                 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1690 }
1691
1692 int hci_abort_conn(struct hci_conn *conn, u8 reason)
1693 {
1694         struct hci_request req;
1695         int err;
1696
1697         hci_req_init(&req, conn->hdev);
1698
1699         __hci_abort_conn(&req, conn, reason);
1700
1701         err = hci_req_run(&req, abort_conn_complete);
1702         if (err && err != -ENODATA) {
1703                 BT_ERR("Failed to run HCI request: err %d", err);
1704                 return err;
1705         }
1706
1707         return 0;
1708 }
1709
1710 static int update_bg_scan(struct hci_request *req, unsigned long opt)
1711 {
1712         hci_dev_lock(req->hdev);
1713         __hci_update_background_scan(req);
1714         hci_dev_unlock(req->hdev);
1715         return 0;
1716 }
1717
1718 static void bg_scan_update(struct work_struct *work)
1719 {
1720         struct hci_dev *hdev = container_of(work, struct hci_dev,
1721                                             bg_scan_update);
1722         struct hci_conn *conn;
1723         u8 status;
1724         int err;
1725
1726         err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1727         if (!err)
1728                 return;
1729
1730         hci_dev_lock(hdev);
1731
1732         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1733         if (conn)
1734                 hci_le_conn_failed(conn, status);
1735
1736         hci_dev_unlock(hdev);
1737 }
1738
1739 static int le_scan_disable(struct hci_request *req, unsigned long opt)
1740 {
1741         hci_req_add_le_scan_disable(req);
1742         return 0;
1743 }
1744
1745 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1746 {
1747         u8 length = opt;
1748         const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1749         const u8 liac[3] = { 0x00, 0x8b, 0x9e };
1750         struct hci_cp_inquiry cp;
1751
1752         BT_DBG("%s", req->hdev->name);
1753
1754         hci_dev_lock(req->hdev);
1755         hci_inquiry_cache_flush(req->hdev);
1756         hci_dev_unlock(req->hdev);
1757
1758         memset(&cp, 0, sizeof(cp));
1759
1760         if (req->hdev->discovery.limited)
1761                 memcpy(&cp.lap, liac, sizeof(cp.lap));
1762         else
1763                 memcpy(&cp.lap, giac, sizeof(cp.lap));
1764
1765         cp.length = length;
1766
1767         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1768
1769         return 0;
1770 }
1771
1772 static void le_scan_disable_work(struct work_struct *work)
1773 {
1774         struct hci_dev *hdev = container_of(work, struct hci_dev,
1775                                             le_scan_disable.work);
1776         u8 status;
1777
1778         BT_DBG("%s", hdev->name);
1779
1780         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1781                 return;
1782
1783         cancel_delayed_work(&hdev->le_scan_restart);
1784
1785         hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1786         if (status) {
1787                 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
1788                 return;
1789         }
1790
1791         hdev->discovery.scan_start = 0;
1792
1793         /* If we were running LE only scan, change discovery state. If
1794          * we were running both LE and BR/EDR inquiry simultaneously,
1795          * and BR/EDR inquiry is already finished, stop discovery,
1796          * otherwise BR/EDR inquiry will stop discovery when finished.
1797          * If we will resolve remote device name, do not change
1798          * discovery state.
1799          */
1800
1801         if (hdev->discovery.type == DISCOV_TYPE_LE)
1802                 goto discov_stopped;
1803
1804         if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1805                 return;
1806
1807         if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1808                 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1809                     hdev->discovery.state != DISCOVERY_RESOLVING)
1810                         goto discov_stopped;
1811
1812                 return;
1813         }
1814
1815         hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1816                      HCI_CMD_TIMEOUT, &status);
1817         if (status) {
1818                 BT_ERR("Inquiry failed: status 0x%02x", status);
1819                 goto discov_stopped;
1820         }
1821
1822         return;
1823
1824 discov_stopped:
1825         hci_dev_lock(hdev);
1826         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1827         hci_dev_unlock(hdev);
1828 }
1829
1830 static int le_scan_restart(struct hci_request *req, unsigned long opt)
1831 {
1832         struct hci_dev *hdev = req->hdev;
1833         struct hci_cp_le_set_scan_enable cp;
1834
1835         /* If controller is not scanning we are done. */
1836         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1837                 return 0;
1838
1839         hci_req_add_le_scan_disable(req);
1840
1841         memset(&cp, 0, sizeof(cp));
1842         cp.enable = LE_SCAN_ENABLE;
1843         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1844         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1845
1846         return 0;
1847 }
1848
1849 static void le_scan_restart_work(struct work_struct *work)
1850 {
1851         struct hci_dev *hdev = container_of(work, struct hci_dev,
1852                                             le_scan_restart.work);
1853         unsigned long timeout, duration, scan_start, now;
1854         u8 status;
1855
1856         BT_DBG("%s", hdev->name);
1857
1858         hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
1859         if (status) {
1860                 BT_ERR("Failed to restart LE scan: status %d", status);
1861                 return;
1862         }
1863
1864         hci_dev_lock(hdev);
1865
1866         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1867             !hdev->discovery.scan_start)
1868                 goto unlock;
1869
1870         /* When the scan was started, hdev->le_scan_disable has been queued
1871          * after duration from scan_start. During scan restart this job
1872          * has been canceled, and we need to queue it again after proper
1873          * timeout, to make sure that scan does not run indefinitely.
1874          */
1875         duration = hdev->discovery.scan_duration;
1876         scan_start = hdev->discovery.scan_start;
1877         now = jiffies;
1878         if (now - scan_start <= duration) {
1879                 int elapsed;
1880
1881                 if (now >= scan_start)
1882                         elapsed = now - scan_start;
1883                 else
1884                         elapsed = ULONG_MAX - scan_start + now;
1885
1886                 timeout = duration - elapsed;
1887         } else {
1888                 timeout = 0;
1889         }
1890
1891         queue_delayed_work(hdev->req_workqueue,
1892                            &hdev->le_scan_disable, timeout);
1893
1894 unlock:
1895         hci_dev_unlock(hdev);
1896 }
1897
1898 static void disable_advertising(struct hci_request *req)
1899 {
1900         u8 enable = 0x00;
1901
1902         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1903 }
1904
1905 static int active_scan(struct hci_request *req, unsigned long opt)
1906 {
1907         uint16_t interval = opt;
1908         struct hci_dev *hdev = req->hdev;
1909         struct hci_cp_le_set_scan_param param_cp;
1910         struct hci_cp_le_set_scan_enable enable_cp;
1911         u8 own_addr_type;
1912         int err;
1913
1914         BT_DBG("%s", hdev->name);
1915
1916         if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
1917                 hci_dev_lock(hdev);
1918
1919                 /* Don't let discovery abort an outgoing connection attempt
1920                  * that's using directed advertising.
1921                  */
1922                 if (hci_lookup_le_connect(hdev)) {
1923                         hci_dev_unlock(hdev);
1924                         return -EBUSY;
1925                 }
1926
1927                 cancel_adv_timeout(hdev);
1928                 hci_dev_unlock(hdev);
1929
1930                 disable_advertising(req);
1931         }
1932
1933         /* If controller is scanning, it means the background scanning is
1934          * running. Thus, we should temporarily stop it in order to set the
1935          * discovery scanning parameters.
1936          */
1937         if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1938                 hci_req_add_le_scan_disable(req);
1939
1940         /* All active scans will be done with either a resolvable private
1941          * address (when privacy feature has been enabled) or non-resolvable
1942          * private address.
1943          */
1944         err = hci_update_random_address(req, true, &own_addr_type);
1945         if (err < 0)
1946                 own_addr_type = ADDR_LE_DEV_PUBLIC;
1947
1948         memset(&param_cp, 0, sizeof(param_cp));
1949         param_cp.type = LE_SCAN_ACTIVE;
1950         param_cp.interval = cpu_to_le16(interval);
1951         param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
1952         param_cp.own_address_type = own_addr_type;
1953
1954         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1955                     &param_cp);
1956
1957         memset(&enable_cp, 0, sizeof(enable_cp));
1958         enable_cp.enable = LE_SCAN_ENABLE;
1959         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1960
1961         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1962                     &enable_cp);
1963
1964         return 0;
1965 }
1966
1967 static int interleaved_discov(struct hci_request *req, unsigned long opt)
1968 {
1969         int err;
1970
1971         BT_DBG("%s", req->hdev->name);
1972
1973         err = active_scan(req, opt);
1974         if (err)
1975                 return err;
1976
1977         return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
1978 }
1979
1980 static void start_discovery(struct hci_dev *hdev, u8 *status)
1981 {
1982         unsigned long timeout;
1983
1984         BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
1985
1986         switch (hdev->discovery.type) {
1987         case DISCOV_TYPE_BREDR:
1988                 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
1989                         hci_req_sync(hdev, bredr_inquiry,
1990                                      DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
1991                                      status);
1992                 return;
1993         case DISCOV_TYPE_INTERLEAVED:
1994                 /* When running simultaneous discovery, the LE scanning time
1995                  * should occupy the whole discovery time sine BR/EDR inquiry
1996                  * and LE scanning are scheduled by the controller.
1997                  *
1998                  * For interleaving discovery in comparison, BR/EDR inquiry
1999                  * and LE scanning are done sequentially with separate
2000                  * timeouts.
2001                  */
2002                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2003                              &hdev->quirks)) {
2004                         timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2005                         /* During simultaneous discovery, we double LE scan
2006                          * interval. We must leave some time for the controller
2007                          * to do BR/EDR inquiry.
2008                          */
2009                         hci_req_sync(hdev, interleaved_discov,
2010                                      DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2011                                      status);
2012                         break;
2013                 }
2014
2015                 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2016                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2017                              HCI_CMD_TIMEOUT, status);
2018                 break;
2019         case DISCOV_TYPE_LE:
2020                 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2021                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2022                              HCI_CMD_TIMEOUT, status);
2023                 break;
2024         default:
2025                 *status = HCI_ERROR_UNSPECIFIED;
2026                 return;
2027         }
2028
2029         if (*status)
2030                 return;
2031
2032         BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2033
2034         /* When service discovery is used and the controller has a
2035          * strict duplicate filter, it is important to remember the
2036          * start and duration of the scan. This is required for
2037          * restarting scanning during the discovery phase.
2038          */
2039         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2040                      hdev->discovery.result_filtering) {
2041                 hdev->discovery.scan_start = jiffies;
2042                 hdev->discovery.scan_duration = timeout;
2043         }
2044
2045         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2046                            timeout);
2047 }
2048
2049 bool hci_req_stop_discovery(struct hci_request *req)
2050 {
2051         struct hci_dev *hdev = req->hdev;
2052         struct discovery_state *d = &hdev->discovery;
2053         struct hci_cp_remote_name_req_cancel cp;
2054         struct inquiry_entry *e;
2055         bool ret = false;
2056
2057         BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2058
2059         if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2060                 if (test_bit(HCI_INQUIRY, &hdev->flags))
2061                         hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2062
2063                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2064                         cancel_delayed_work(&hdev->le_scan_disable);
2065                         hci_req_add_le_scan_disable(req);
2066                 }
2067
2068                 ret = true;
2069         } else {
2070                 /* Passive scanning */
2071                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2072                         hci_req_add_le_scan_disable(req);
2073                         ret = true;
2074                 }
2075         }
2076
2077         /* No further actions needed for LE-only discovery */
2078         if (d->type == DISCOV_TYPE_LE)
2079                 return ret;
2080
2081         if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2082                 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2083                                                      NAME_PENDING);
2084                 if (!e)
2085                         return ret;
2086
2087                 bacpy(&cp.bdaddr, &e->data.bdaddr);
2088                 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2089                             &cp);
2090                 ret = true;
2091         }
2092
2093         return ret;
2094 }
2095
2096 static int stop_discovery(struct hci_request *req, unsigned long opt)
2097 {
2098         hci_dev_lock(req->hdev);
2099         hci_req_stop_discovery(req);
2100         hci_dev_unlock(req->hdev);
2101
2102         return 0;
2103 }
2104
2105 static void discov_update(struct work_struct *work)
2106 {
2107         struct hci_dev *hdev = container_of(work, struct hci_dev,
2108                                             discov_update);
2109         u8 status = 0;
2110
2111         switch (hdev->discovery.state) {
2112         case DISCOVERY_STARTING:
2113                 start_discovery(hdev, &status);
2114                 mgmt_start_discovery_complete(hdev, status);
2115                 if (status)
2116                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2117                 else
2118                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2119                 break;
2120         case DISCOVERY_STOPPING:
2121                 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2122                 mgmt_stop_discovery_complete(hdev, status);
2123                 if (!status)
2124                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2125                 break;
2126         case DISCOVERY_STOPPED:
2127         default:
2128                 return;
2129         }
2130 }
2131
2132 static void discov_off(struct work_struct *work)
2133 {
2134         struct hci_dev *hdev = container_of(work, struct hci_dev,
2135                                             discov_off.work);
2136
2137         BT_DBG("%s", hdev->name);
2138
2139         hci_dev_lock(hdev);
2140
2141         /* When discoverable timeout triggers, then just make sure
2142          * the limited discoverable flag is cleared. Even in the case
2143          * of a timeout triggered from general discoverable, it is
2144          * safe to unconditionally clear the flag.
2145          */
2146         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2147         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2148         hdev->discov_timeout = 0;
2149
2150         hci_dev_unlock(hdev);
2151
2152         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2153         mgmt_new_settings(hdev);
2154 }
2155
2156 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2157 {
2158         struct hci_dev *hdev = req->hdev;
2159         u8 link_sec;
2160
2161         hci_dev_lock(hdev);
2162
2163         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2164             !lmp_host_ssp_capable(hdev)) {
2165                 u8 mode = 0x01;
2166
2167                 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2168
2169                 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2170                         u8 support = 0x01;
2171
2172                         hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2173                                     sizeof(support), &support);
2174                 }
2175         }
2176
2177         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2178             lmp_bredr_capable(hdev)) {
2179                 struct hci_cp_write_le_host_supported cp;
2180
2181                 cp.le = 0x01;
2182                 cp.simul = 0x00;
2183
2184                 /* Check first if we already have the right
2185                  * host state (host features set)
2186                  */
2187                 if (cp.le != lmp_host_le_capable(hdev) ||
2188                     cp.simul != lmp_host_le_br_capable(hdev))
2189                         hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2190                                     sizeof(cp), &cp);
2191         }
2192
2193         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2194                 /* Make sure the controller has a good default for
2195                  * advertising data. This also applies to the case
2196                  * where BR/EDR was toggled during the AUTO_OFF phase.
2197                  */
2198                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2199                     list_empty(&hdev->adv_instances)) {
2200                         __hci_req_update_adv_data(req, 0x00);
2201                         __hci_req_update_scan_rsp_data(req, 0x00);
2202
2203                         if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2204                                 __hci_req_enable_advertising(req);
2205                 } else if (!list_empty(&hdev->adv_instances)) {
2206                         struct adv_info *adv_instance;
2207
2208                         adv_instance = list_first_entry(&hdev->adv_instances,
2209                                                         struct adv_info, list);
2210                         __hci_req_schedule_adv_instance(req,
2211                                                         adv_instance->instance,
2212                                                         true);
2213                 }
2214         }
2215
2216         link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2217         if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2218                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2219                             sizeof(link_sec), &link_sec);
2220
2221         if (lmp_bredr_capable(hdev)) {
2222                 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2223                         __hci_req_write_fast_connectable(req, true);
2224                 else
2225                         __hci_req_write_fast_connectable(req, false);
2226                 __hci_req_update_scan(req);
2227                 __hci_req_update_class(req);
2228                 __hci_req_update_name(req);
2229                 __hci_req_update_eir(req);
2230         }
2231
2232         hci_dev_unlock(hdev);
2233         return 0;
2234 }
2235
2236 int __hci_req_hci_power_on(struct hci_dev *hdev)
2237 {
2238         /* Register the available SMP channels (BR/EDR and LE) only when
2239          * successfully powering on the controller. This late
2240          * registration is required so that LE SMP can clearly decide if
2241          * the public address or static address is used.
2242          */
2243         smp_register(hdev);
2244
2245         return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2246                               NULL);
2247 }
2248
2249 void hci_request_setup(struct hci_dev *hdev)
2250 {
2251         INIT_WORK(&hdev->discov_update, discov_update);
2252         INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2253         INIT_WORK(&hdev->scan_update, scan_update_work);
2254         INIT_WORK(&hdev->connectable_update, connectable_update_work);
2255         INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2256         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2257         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2258         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2259         INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2260 }
2261
2262 void hci_request_cancel_all(struct hci_dev *hdev)
2263 {
2264         hci_req_sync_cancel(hdev, ENODEV);
2265
2266         cancel_work_sync(&hdev->discov_update);
2267         cancel_work_sync(&hdev->bg_scan_update);
2268         cancel_work_sync(&hdev->scan_update);
2269         cancel_work_sync(&hdev->connectable_update);
2270         cancel_work_sync(&hdev->discoverable_update);
2271         cancel_delayed_work_sync(&hdev->discov_off);
2272         cancel_delayed_work_sync(&hdev->le_scan_disable);
2273         cancel_delayed_work_sync(&hdev->le_scan_restart);
2274
2275         if (hdev->adv_instance_timeout) {
2276                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2277                 hdev->adv_instance_timeout = 0;
2278         }
2279 }