2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 6
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
87 MGMT_OP_GET_CONN_INFO,
90 static const u16 mgmt_events[] = {
91 MGMT_EV_CONTROLLER_ERROR,
93 MGMT_EV_INDEX_REMOVED,
95 MGMT_EV_CLASS_OF_DEV_CHANGED,
96 MGMT_EV_LOCAL_NAME_CHANGED,
98 MGMT_EV_NEW_LONG_TERM_KEY,
99 MGMT_EV_DEVICE_CONNECTED,
100 MGMT_EV_DEVICE_DISCONNECTED,
101 MGMT_EV_CONNECT_FAILED,
102 MGMT_EV_PIN_CODE_REQUEST,
103 MGMT_EV_USER_CONFIRM_REQUEST,
104 MGMT_EV_USER_PASSKEY_REQUEST,
106 MGMT_EV_DEVICE_FOUND,
108 MGMT_EV_DEVICE_BLOCKED,
109 MGMT_EV_DEVICE_UNBLOCKED,
110 MGMT_EV_DEVICE_UNPAIRED,
111 MGMT_EV_PASSKEY_NOTIFY,
116 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
118 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
119 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
122 struct list_head list;
130 /* HCI to MGMT error code conversion table */
131 static u8 mgmt_status_table[] = {
133 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
134 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
135 MGMT_STATUS_FAILED, /* Hardware Failure */
136 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
137 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
138 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
139 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
140 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
141 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
142 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
143 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
144 MGMT_STATUS_BUSY, /* Command Disallowed */
145 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
146 MGMT_STATUS_REJECTED, /* Rejected Security */
147 MGMT_STATUS_REJECTED, /* Rejected Personal */
148 MGMT_STATUS_TIMEOUT, /* Host Timeout */
149 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
150 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
151 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
152 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
153 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
154 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
155 MGMT_STATUS_BUSY, /* Repeated Attempts */
156 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
157 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
158 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
159 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
160 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
161 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
162 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
163 MGMT_STATUS_FAILED, /* Unspecified Error */
164 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
165 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
166 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
167 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
168 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
169 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
170 MGMT_STATUS_FAILED, /* Unit Link Key Used */
171 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
172 MGMT_STATUS_TIMEOUT, /* Instant Passed */
173 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
174 MGMT_STATUS_FAILED, /* Transaction Collision */
175 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
176 MGMT_STATUS_REJECTED, /* QoS Rejected */
177 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
178 MGMT_STATUS_REJECTED, /* Insufficient Security */
179 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
180 MGMT_STATUS_BUSY, /* Role Switch Pending */
181 MGMT_STATUS_FAILED, /* Slot Violation */
182 MGMT_STATUS_FAILED, /* Role Switch Failed */
183 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
184 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
185 MGMT_STATUS_BUSY, /* Host Busy Pairing */
186 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
187 MGMT_STATUS_BUSY, /* Controller Busy */
188 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
189 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
190 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
191 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
192 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
195 static u8 mgmt_status(u8 hci_status)
197 if (hci_status < ARRAY_SIZE(mgmt_status_table))
198 return mgmt_status_table[hci_status];
200 return MGMT_STATUS_FAILED;
203 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
206 struct mgmt_hdr *hdr;
207 struct mgmt_ev_cmd_status *ev;
210 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
212 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
216 hdr = (void *) skb_put(skb, sizeof(*hdr));
218 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
219 hdr->index = cpu_to_le16(index);
220 hdr->len = cpu_to_le16(sizeof(*ev));
222 ev = (void *) skb_put(skb, sizeof(*ev));
224 ev->opcode = cpu_to_le16(cmd);
226 err = sock_queue_rcv_skb(sk, skb);
233 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
234 void *rp, size_t rp_len)
237 struct mgmt_hdr *hdr;
238 struct mgmt_ev_cmd_complete *ev;
241 BT_DBG("sock %p", sk);
243 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
247 hdr = (void *) skb_put(skb, sizeof(*hdr));
249 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
250 hdr->index = cpu_to_le16(index);
251 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
253 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
254 ev->opcode = cpu_to_le16(cmd);
258 memcpy(ev->data, rp, rp_len);
260 err = sock_queue_rcv_skb(sk, skb);
267 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
270 struct mgmt_rp_read_version rp;
272 BT_DBG("sock %p", sk);
274 rp.version = MGMT_VERSION;
275 rp.revision = cpu_to_le16(MGMT_REVISION);
277 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
281 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
284 struct mgmt_rp_read_commands *rp;
285 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
286 const u16 num_events = ARRAY_SIZE(mgmt_events);
291 BT_DBG("sock %p", sk);
293 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
295 rp = kmalloc(rp_size, GFP_KERNEL);
299 rp->num_commands = cpu_to_le16(num_commands);
300 rp->num_events = cpu_to_le16(num_events);
302 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
303 put_unaligned_le16(mgmt_commands[i], opcode);
305 for (i = 0; i < num_events; i++, opcode++)
306 put_unaligned_le16(mgmt_events[i], opcode);
308 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
315 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
318 struct mgmt_rp_read_index_list *rp;
324 BT_DBG("sock %p", sk);
326 read_lock(&hci_dev_list_lock);
329 list_for_each_entry(d, &hci_dev_list, list) {
330 if (d->dev_type == HCI_BREDR)
334 rp_len = sizeof(*rp) + (2 * count);
335 rp = kmalloc(rp_len, GFP_ATOMIC);
337 read_unlock(&hci_dev_list_lock);
342 list_for_each_entry(d, &hci_dev_list, list) {
343 if (test_bit(HCI_SETUP, &d->dev_flags))
346 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
349 if (d->dev_type == HCI_BREDR) {
350 rp->index[count++] = cpu_to_le16(d->id);
351 BT_DBG("Added hci%u", d->id);
355 rp->num_controllers = cpu_to_le16(count);
356 rp_len = sizeof(*rp) + (2 * count);
358 read_unlock(&hci_dev_list_lock);
360 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
368 static u32 get_supported_settings(struct hci_dev *hdev)
372 settings |= MGMT_SETTING_POWERED;
373 settings |= MGMT_SETTING_PAIRABLE;
374 settings |= MGMT_SETTING_DEBUG_KEYS;
376 if (lmp_bredr_capable(hdev)) {
377 settings |= MGMT_SETTING_CONNECTABLE;
378 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
379 settings |= MGMT_SETTING_FAST_CONNECTABLE;
380 settings |= MGMT_SETTING_DISCOVERABLE;
381 settings |= MGMT_SETTING_BREDR;
382 settings |= MGMT_SETTING_LINK_SECURITY;
384 if (lmp_ssp_capable(hdev)) {
385 settings |= MGMT_SETTING_SSP;
386 settings |= MGMT_SETTING_HS;
389 if (lmp_sc_capable(hdev) ||
390 test_bit(HCI_FORCE_SC, &hdev->dev_flags))
391 settings |= MGMT_SETTING_SECURE_CONN;
394 if (lmp_le_capable(hdev)) {
395 settings |= MGMT_SETTING_LE;
396 settings |= MGMT_SETTING_ADVERTISING;
397 settings |= MGMT_SETTING_PRIVACY;
403 static u32 get_current_settings(struct hci_dev *hdev)
407 if (hdev_is_powered(hdev))
408 settings |= MGMT_SETTING_POWERED;
410 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
411 settings |= MGMT_SETTING_CONNECTABLE;
413 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
414 settings |= MGMT_SETTING_FAST_CONNECTABLE;
416 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
417 settings |= MGMT_SETTING_DISCOVERABLE;
419 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
420 settings |= MGMT_SETTING_PAIRABLE;
422 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
423 settings |= MGMT_SETTING_BREDR;
425 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
426 settings |= MGMT_SETTING_LE;
428 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
429 settings |= MGMT_SETTING_LINK_SECURITY;
431 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
432 settings |= MGMT_SETTING_SSP;
434 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
435 settings |= MGMT_SETTING_HS;
437 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
438 settings |= MGMT_SETTING_ADVERTISING;
440 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
441 settings |= MGMT_SETTING_SECURE_CONN;
443 if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags))
444 settings |= MGMT_SETTING_DEBUG_KEYS;
446 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
447 settings |= MGMT_SETTING_PRIVACY;
452 #define PNP_INFO_SVCLASS_ID 0x1200
454 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
456 u8 *ptr = data, *uuids_start = NULL;
457 struct bt_uuid *uuid;
462 list_for_each_entry(uuid, &hdev->uuids, list) {
465 if (uuid->size != 16)
468 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
472 if (uuid16 == PNP_INFO_SVCLASS_ID)
478 uuids_start[1] = EIR_UUID16_ALL;
482 /* Stop if not enough space to put next UUID */
483 if ((ptr - data) + sizeof(u16) > len) {
484 uuids_start[1] = EIR_UUID16_SOME;
488 *ptr++ = (uuid16 & 0x00ff);
489 *ptr++ = (uuid16 & 0xff00) >> 8;
490 uuids_start[0] += sizeof(uuid16);
496 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
498 u8 *ptr = data, *uuids_start = NULL;
499 struct bt_uuid *uuid;
504 list_for_each_entry(uuid, &hdev->uuids, list) {
505 if (uuid->size != 32)
511 uuids_start[1] = EIR_UUID32_ALL;
515 /* Stop if not enough space to put next UUID */
516 if ((ptr - data) + sizeof(u32) > len) {
517 uuids_start[1] = EIR_UUID32_SOME;
521 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
523 uuids_start[0] += sizeof(u32);
529 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
531 u8 *ptr = data, *uuids_start = NULL;
532 struct bt_uuid *uuid;
537 list_for_each_entry(uuid, &hdev->uuids, list) {
538 if (uuid->size != 128)
544 uuids_start[1] = EIR_UUID128_ALL;
548 /* Stop if not enough space to put next UUID */
549 if ((ptr - data) + 16 > len) {
550 uuids_start[1] = EIR_UUID128_SOME;
554 memcpy(ptr, uuid->uuid, 16);
556 uuids_start[0] += 16;
562 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
564 struct pending_cmd *cmd;
566 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
567 if (cmd->opcode == opcode)
574 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
579 name_len = strlen(hdev->dev_name);
581 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
583 if (name_len > max_len) {
585 ptr[1] = EIR_NAME_SHORT;
587 ptr[1] = EIR_NAME_COMPLETE;
589 ptr[0] = name_len + 1;
591 memcpy(ptr + 2, hdev->dev_name, name_len);
593 ad_len += (name_len + 2);
594 ptr += (name_len + 2);
600 static void update_scan_rsp_data(struct hci_request *req)
602 struct hci_dev *hdev = req->hdev;
603 struct hci_cp_le_set_scan_rsp_data cp;
606 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
609 memset(&cp, 0, sizeof(cp));
611 len = create_scan_rsp_data(hdev, cp.data);
613 if (hdev->scan_rsp_data_len == len &&
614 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
617 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
618 hdev->scan_rsp_data_len = len;
622 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
625 static u8 get_adv_discov_flags(struct hci_dev *hdev)
627 struct pending_cmd *cmd;
629 /* If there's a pending mgmt command the flags will not yet have
630 * their final values, so check for this first.
632 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
634 struct mgmt_mode *cp = cmd->param;
636 return LE_AD_GENERAL;
637 else if (cp->val == 0x02)
638 return LE_AD_LIMITED;
640 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
641 return LE_AD_LIMITED;
642 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
643 return LE_AD_GENERAL;
649 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
651 u8 ad_len = 0, flags = 0;
653 flags |= get_adv_discov_flags(hdev);
655 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
656 flags |= LE_AD_NO_BREDR;
659 BT_DBG("adv flags 0x%02x", flags);
669 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
671 ptr[1] = EIR_TX_POWER;
672 ptr[2] = (u8) hdev->adv_tx_power;
681 static void update_adv_data(struct hci_request *req)
683 struct hci_dev *hdev = req->hdev;
684 struct hci_cp_le_set_adv_data cp;
687 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
690 memset(&cp, 0, sizeof(cp));
692 len = create_adv_data(hdev, cp.data);
694 if (hdev->adv_data_len == len &&
695 memcmp(cp.data, hdev->adv_data, len) == 0)
698 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
699 hdev->adv_data_len = len;
703 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
706 static void create_eir(struct hci_dev *hdev, u8 *data)
711 name_len = strlen(hdev->dev_name);
717 ptr[1] = EIR_NAME_SHORT;
719 ptr[1] = EIR_NAME_COMPLETE;
721 /* EIR Data length */
722 ptr[0] = name_len + 1;
724 memcpy(ptr + 2, hdev->dev_name, name_len);
726 ptr += (name_len + 2);
729 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
731 ptr[1] = EIR_TX_POWER;
732 ptr[2] = (u8) hdev->inq_tx_power;
737 if (hdev->devid_source > 0) {
739 ptr[1] = EIR_DEVICE_ID;
741 put_unaligned_le16(hdev->devid_source, ptr + 2);
742 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
743 put_unaligned_le16(hdev->devid_product, ptr + 6);
744 put_unaligned_le16(hdev->devid_version, ptr + 8);
749 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
750 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
751 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
754 static void update_eir(struct hci_request *req)
756 struct hci_dev *hdev = req->hdev;
757 struct hci_cp_write_eir cp;
759 if (!hdev_is_powered(hdev))
762 if (!lmp_ext_inq_capable(hdev))
765 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
768 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
771 memset(&cp, 0, sizeof(cp));
773 create_eir(hdev, cp.data);
775 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
778 memcpy(hdev->eir, cp.data, sizeof(cp.data));
780 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
783 static u8 get_service_classes(struct hci_dev *hdev)
785 struct bt_uuid *uuid;
788 list_for_each_entry(uuid, &hdev->uuids, list)
789 val |= uuid->svc_hint;
794 static void update_class(struct hci_request *req)
796 struct hci_dev *hdev = req->hdev;
799 BT_DBG("%s", hdev->name);
801 if (!hdev_is_powered(hdev))
804 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
807 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
810 cod[0] = hdev->minor_class;
811 cod[1] = hdev->major_class;
812 cod[2] = get_service_classes(hdev);
814 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
817 if (memcmp(cod, hdev->dev_class, 3) == 0)
820 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
823 static bool get_connectable(struct hci_dev *hdev)
825 struct pending_cmd *cmd;
827 /* If there's a pending mgmt command the flag will not yet have
828 * it's final value, so check for this first.
830 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
832 struct mgmt_mode *cp = cmd->param;
836 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
839 static void enable_advertising(struct hci_request *req)
841 struct hci_dev *hdev = req->hdev;
842 struct hci_cp_le_set_adv_param cp;
843 u8 own_addr_type, enable = 0x01;
846 /* Clear the HCI_ADVERTISING bit temporarily so that the
847 * hci_update_random_address knows that it's safe to go ahead
848 * and write a new random address. The flag will be set back on
849 * as soon as the SET_ADV_ENABLE HCI command completes.
851 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
853 connectable = get_connectable(hdev);
855 /* Set require_privacy to true only when non-connectable
856 * advertising is used. In that case it is fine to use a
857 * non-resolvable private address.
859 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
862 memset(&cp, 0, sizeof(cp));
863 cp.min_interval = cpu_to_le16(0x0800);
864 cp.max_interval = cpu_to_le16(0x0800);
865 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
866 cp.own_address_type = own_addr_type;
867 cp.channel_map = hdev->le_adv_channel_map;
869 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
871 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
874 static void disable_advertising(struct hci_request *req)
878 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
881 static void service_cache_off(struct work_struct *work)
883 struct hci_dev *hdev = container_of(work, struct hci_dev,
885 struct hci_request req;
887 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
890 hci_req_init(&req, hdev);
897 hci_dev_unlock(hdev);
899 hci_req_run(&req, NULL);
902 static void rpa_expired(struct work_struct *work)
904 struct hci_dev *hdev = container_of(work, struct hci_dev,
906 struct hci_request req;
910 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
912 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
913 hci_conn_num(hdev, LE_LINK) > 0)
916 /* The generation of a new RPA and programming it into the
917 * controller happens in the enable_advertising() function.
920 hci_req_init(&req, hdev);
922 disable_advertising(&req);
923 enable_advertising(&req);
925 hci_req_run(&req, NULL);
928 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
930 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
933 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
934 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
936 /* Non-mgmt controlled devices get this bit set
937 * implicitly so that pairing works for them, however
938 * for mgmt we require user-space to explicitly enable
941 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
944 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
945 void *data, u16 data_len)
947 struct mgmt_rp_read_info rp;
949 BT_DBG("sock %p %s", sk, hdev->name);
953 memset(&rp, 0, sizeof(rp));
955 bacpy(&rp.bdaddr, &hdev->bdaddr);
957 rp.version = hdev->hci_ver;
958 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
960 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
961 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
963 memcpy(rp.dev_class, hdev->dev_class, 3);
965 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
966 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
968 hci_dev_unlock(hdev);
970 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
974 static void mgmt_pending_free(struct pending_cmd *cmd)
981 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
982 struct hci_dev *hdev, void *data,
985 struct pending_cmd *cmd;
987 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
991 cmd->opcode = opcode;
992 cmd->index = hdev->id;
994 cmd->param = kmalloc(len, GFP_KERNEL);
1001 memcpy(cmd->param, data, len);
1006 list_add(&cmd->list, &hdev->mgmt_pending);
1011 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1012 void (*cb)(struct pending_cmd *cmd,
1016 struct pending_cmd *cmd, *tmp;
1018 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1019 if (opcode > 0 && cmd->opcode != opcode)
1026 static void mgmt_pending_remove(struct pending_cmd *cmd)
1028 list_del(&cmd->list);
1029 mgmt_pending_free(cmd);
1032 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1034 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1036 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1040 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1042 BT_DBG("%s status 0x%02x", hdev->name, status);
1044 if (hci_conn_count(hdev) == 0) {
1045 cancel_delayed_work(&hdev->power_off);
1046 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1050 static void hci_stop_discovery(struct hci_request *req)
1052 struct hci_dev *hdev = req->hdev;
1053 struct hci_cp_remote_name_req_cancel cp;
1054 struct inquiry_entry *e;
1056 switch (hdev->discovery.state) {
1057 case DISCOVERY_FINDING:
1058 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1059 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1061 cancel_delayed_work(&hdev->le_scan_disable);
1062 hci_req_add_le_scan_disable(req);
1067 case DISCOVERY_RESOLVING:
1068 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1073 bacpy(&cp.bdaddr, &e->data.bdaddr);
1074 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1080 /* Passive scanning */
1081 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1082 hci_req_add_le_scan_disable(req);
1087 static int clean_up_hci_state(struct hci_dev *hdev)
1089 struct hci_request req;
1090 struct hci_conn *conn;
1092 hci_req_init(&req, hdev);
1094 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1095 test_bit(HCI_PSCAN, &hdev->flags)) {
1097 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1100 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1101 disable_advertising(&req);
1103 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1104 hci_req_add_le_scan_disable(&req);
1107 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1108 struct hci_cp_disconnect dc;
1109 struct hci_cp_reject_conn_req rej;
1111 switch (conn->state) {
1114 dc.handle = cpu_to_le16(conn->handle);
1115 dc.reason = 0x15; /* Terminated due to Power Off */
1116 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1119 if (conn->type == LE_LINK)
1120 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1122 else if (conn->type == ACL_LINK)
1123 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1127 bacpy(&rej.bdaddr, &conn->dst);
1128 rej.reason = 0x15; /* Terminated due to Power Off */
1129 if (conn->type == ACL_LINK)
1130 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1132 else if (conn->type == SCO_LINK)
1133 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1139 return hci_req_run(&req, clean_up_hci_complete);
1142 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1145 struct mgmt_mode *cp = data;
1146 struct pending_cmd *cmd;
1149 BT_DBG("request for %s", hdev->name);
1151 if (cp->val != 0x00 && cp->val != 0x01)
1152 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1153 MGMT_STATUS_INVALID_PARAMS);
1157 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1158 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1163 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1164 cancel_delayed_work(&hdev->power_off);
1167 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1169 err = mgmt_powered(hdev, 1);
1174 if (!!cp->val == hdev_is_powered(hdev)) {
1175 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1179 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1186 queue_work(hdev->req_workqueue, &hdev->power_on);
1189 /* Disconnect connections, stop scans, etc */
1190 err = clean_up_hci_state(hdev);
1192 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1193 HCI_POWER_OFF_TIMEOUT);
1195 /* ENODATA means there were no HCI commands queued */
1196 if (err == -ENODATA) {
1197 cancel_delayed_work(&hdev->power_off);
1198 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1204 hci_dev_unlock(hdev);
1208 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1209 struct sock *skip_sk)
1211 struct sk_buff *skb;
1212 struct mgmt_hdr *hdr;
1214 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1218 hdr = (void *) skb_put(skb, sizeof(*hdr));
1219 hdr->opcode = cpu_to_le16(event);
1221 hdr->index = cpu_to_le16(hdev->id);
1223 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
1224 hdr->len = cpu_to_le16(data_len);
1227 memcpy(skb_put(skb, data_len), data, data_len);
1230 __net_timestamp(skb);
1232 hci_send_to_control(skb, skip_sk);
1238 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1242 ev = cpu_to_le32(get_current_settings(hdev));
1244 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1249 struct hci_dev *hdev;
1253 static void settings_rsp(struct pending_cmd *cmd, void *data)
1255 struct cmd_lookup *match = data;
1257 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1259 list_del(&cmd->list);
1261 if (match->sk == NULL) {
1262 match->sk = cmd->sk;
1263 sock_hold(match->sk);
1266 mgmt_pending_free(cmd);
1269 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1273 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1274 mgmt_pending_remove(cmd);
1277 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1279 if (!lmp_bredr_capable(hdev))
1280 return MGMT_STATUS_NOT_SUPPORTED;
1281 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1282 return MGMT_STATUS_REJECTED;
1284 return MGMT_STATUS_SUCCESS;
1287 static u8 mgmt_le_support(struct hci_dev *hdev)
1289 if (!lmp_le_capable(hdev))
1290 return MGMT_STATUS_NOT_SUPPORTED;
1291 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1292 return MGMT_STATUS_REJECTED;
1294 return MGMT_STATUS_SUCCESS;
1297 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1299 struct pending_cmd *cmd;
1300 struct mgmt_mode *cp;
1301 struct hci_request req;
1304 BT_DBG("status 0x%02x", status);
1308 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1313 u8 mgmt_err = mgmt_status(status);
1314 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1315 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1321 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1324 if (hdev->discov_timeout > 0) {
1325 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1326 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1330 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1334 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1337 new_settings(hdev, cmd->sk);
1339 /* When the discoverable mode gets changed, make sure
1340 * that class of device has the limited discoverable
1341 * bit correctly set.
1343 hci_req_init(&req, hdev);
1345 hci_req_run(&req, NULL);
1348 mgmt_pending_remove(cmd);
1351 hci_dev_unlock(hdev);
1354 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1357 struct mgmt_cp_set_discoverable *cp = data;
1358 struct pending_cmd *cmd;
1359 struct hci_request req;
1364 BT_DBG("request for %s", hdev->name);
1366 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1367 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1368 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1369 MGMT_STATUS_REJECTED);
1371 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1372 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1373 MGMT_STATUS_INVALID_PARAMS);
1375 timeout = __le16_to_cpu(cp->timeout);
1377 /* Disabling discoverable requires that no timeout is set,
1378 * and enabling limited discoverable requires a timeout.
1380 if ((cp->val == 0x00 && timeout > 0) ||
1381 (cp->val == 0x02 && timeout == 0))
1382 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1383 MGMT_STATUS_INVALID_PARAMS);
1387 if (!hdev_is_powered(hdev) && timeout > 0) {
1388 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1389 MGMT_STATUS_NOT_POWERED);
1393 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1394 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1395 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1400 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1401 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1402 MGMT_STATUS_REJECTED);
1406 if (!hdev_is_powered(hdev)) {
1407 bool changed = false;
1409 /* Setting limited discoverable when powered off is
1410 * not a valid operation since it requires a timeout
1411 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1413 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1414 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1418 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1423 err = new_settings(hdev, sk);
1428 /* If the current mode is the same, then just update the timeout
1429 * value with the new value. And if only the timeout gets updated,
1430 * then no need for any HCI transactions.
1432 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1433 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1434 &hdev->dev_flags)) {
1435 cancel_delayed_work(&hdev->discov_off);
1436 hdev->discov_timeout = timeout;
1438 if (cp->val && hdev->discov_timeout > 0) {
1439 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1440 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1444 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1448 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1454 /* Cancel any potential discoverable timeout that might be
1455 * still active and store new timeout value. The arming of
1456 * the timeout happens in the complete handler.
1458 cancel_delayed_work(&hdev->discov_off);
1459 hdev->discov_timeout = timeout;
1461 /* Limited discoverable mode */
1462 if (cp->val == 0x02)
1463 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1465 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1467 hci_req_init(&req, hdev);
1469 /* The procedure for LE-only controllers is much simpler - just
1470 * update the advertising data.
1472 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1478 struct hci_cp_write_current_iac_lap hci_cp;
1480 if (cp->val == 0x02) {
1481 /* Limited discoverable mode */
1482 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1483 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1484 hci_cp.iac_lap[1] = 0x8b;
1485 hci_cp.iac_lap[2] = 0x9e;
1486 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1487 hci_cp.iac_lap[4] = 0x8b;
1488 hci_cp.iac_lap[5] = 0x9e;
1490 /* General discoverable mode */
1492 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1493 hci_cp.iac_lap[1] = 0x8b;
1494 hci_cp.iac_lap[2] = 0x9e;
1497 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1498 (hci_cp.num_iac * 3) + 1, &hci_cp);
1500 scan |= SCAN_INQUIRY;
1502 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1505 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1508 update_adv_data(&req);
1510 err = hci_req_run(&req, set_discoverable_complete);
1512 mgmt_pending_remove(cmd);
1515 hci_dev_unlock(hdev);
1519 static void write_fast_connectable(struct hci_request *req, bool enable)
1521 struct hci_dev *hdev = req->hdev;
1522 struct hci_cp_write_page_scan_activity acp;
1525 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1528 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1532 type = PAGE_SCAN_TYPE_INTERLACED;
1534 /* 160 msec page scan interval */
1535 acp.interval = cpu_to_le16(0x0100);
1537 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1539 /* default 1.28 sec page scan */
1540 acp.interval = cpu_to_le16(0x0800);
1543 acp.window = cpu_to_le16(0x0012);
1545 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1546 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1547 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1550 if (hdev->page_scan_type != type)
1551 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1554 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1556 struct pending_cmd *cmd;
1557 struct mgmt_mode *cp;
1560 BT_DBG("status 0x%02x", status);
1564 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1569 u8 mgmt_err = mgmt_status(status);
1570 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1576 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1578 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1580 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1583 new_settings(hdev, cmd->sk);
1586 mgmt_pending_remove(cmd);
1589 hci_dev_unlock(hdev);
1592 static int set_connectable_update_settings(struct hci_dev *hdev,
1593 struct sock *sk, u8 val)
1595 bool changed = false;
1598 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1602 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1604 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1605 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1608 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1613 return new_settings(hdev, sk);
1618 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1621 struct mgmt_mode *cp = data;
1622 struct pending_cmd *cmd;
1623 struct hci_request req;
1627 BT_DBG("request for %s", hdev->name);
1629 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1630 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1631 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1632 MGMT_STATUS_REJECTED);
1634 if (cp->val != 0x00 && cp->val != 0x01)
1635 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1636 MGMT_STATUS_INVALID_PARAMS);
1640 if (!hdev_is_powered(hdev)) {
1641 err = set_connectable_update_settings(hdev, sk, cp->val);
1645 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1646 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1647 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1652 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1658 hci_req_init(&req, hdev);
1660 /* If BR/EDR is not enabled and we disable advertising as a
1661 * by-product of disabling connectable, we need to update the
1662 * advertising flags.
1664 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1666 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1667 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1669 update_adv_data(&req);
1670 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1676 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1677 hdev->discov_timeout > 0)
1678 cancel_delayed_work(&hdev->discov_off);
1681 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1684 /* If we're going from non-connectable to connectable or
1685 * vice-versa when fast connectable is enabled ensure that fast
1686 * connectable gets disabled. write_fast_connectable won't do
1687 * anything if the page scan parameters are already what they
1690 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1691 write_fast_connectable(&req, false);
1693 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1694 hci_conn_num(hdev, LE_LINK) == 0) {
1695 disable_advertising(&req);
1696 enable_advertising(&req);
1699 err = hci_req_run(&req, set_connectable_complete);
1701 mgmt_pending_remove(cmd);
1702 if (err == -ENODATA)
1703 err = set_connectable_update_settings(hdev, sk,
1709 hci_dev_unlock(hdev);
1713 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1716 struct mgmt_mode *cp = data;
1720 BT_DBG("request for %s", hdev->name);
1722 if (cp->val != 0x00 && cp->val != 0x01)
1723 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1724 MGMT_STATUS_INVALID_PARAMS);
1729 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1731 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1733 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1738 err = new_settings(hdev, sk);
1741 hci_dev_unlock(hdev);
1745 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1748 struct mgmt_mode *cp = data;
1749 struct pending_cmd *cmd;
1753 BT_DBG("request for %s", hdev->name);
1755 status = mgmt_bredr_support(hdev);
1757 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1760 if (cp->val != 0x00 && cp->val != 0x01)
1761 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1762 MGMT_STATUS_INVALID_PARAMS);
1766 if (!hdev_is_powered(hdev)) {
1767 bool changed = false;
1769 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1770 &hdev->dev_flags)) {
1771 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1775 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1780 err = new_settings(hdev, sk);
1785 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1786 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1793 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1794 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1798 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1804 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1806 mgmt_pending_remove(cmd);
1811 hci_dev_unlock(hdev);
1815 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1817 struct mgmt_mode *cp = data;
1818 struct pending_cmd *cmd;
1822 BT_DBG("request for %s", hdev->name);
1824 status = mgmt_bredr_support(hdev);
1826 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1828 if (!lmp_ssp_capable(hdev))
1829 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1830 MGMT_STATUS_NOT_SUPPORTED);
1832 if (cp->val != 0x00 && cp->val != 0x01)
1833 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1834 MGMT_STATUS_INVALID_PARAMS);
1838 if (!hdev_is_powered(hdev)) {
1842 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1845 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1848 changed = test_and_clear_bit(HCI_HS_ENABLED,
1851 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1854 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1859 err = new_settings(hdev, sk);
1864 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1865 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1866 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1871 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1872 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1876 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1882 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1884 mgmt_pending_remove(cmd);
1889 hci_dev_unlock(hdev);
1893 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1895 struct mgmt_mode *cp = data;
1900 BT_DBG("request for %s", hdev->name);
1902 status = mgmt_bredr_support(hdev);
1904 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1906 if (!lmp_ssp_capable(hdev))
1907 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1908 MGMT_STATUS_NOT_SUPPORTED);
1910 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1911 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1912 MGMT_STATUS_REJECTED);
1914 if (cp->val != 0x00 && cp->val != 0x01)
1915 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1916 MGMT_STATUS_INVALID_PARAMS);
1921 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1923 if (hdev_is_powered(hdev)) {
1924 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1925 MGMT_STATUS_REJECTED);
1929 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1932 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1937 err = new_settings(hdev, sk);
1940 hci_dev_unlock(hdev);
1944 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1946 struct cmd_lookup match = { NULL, hdev };
1949 u8 mgmt_err = mgmt_status(status);
1951 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1956 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1958 new_settings(hdev, match.sk);
1963 /* Make sure the controller has a good default for
1964 * advertising data. Restrict the update to when LE
1965 * has actually been enabled. During power on, the
1966 * update in powered_update_hci will take care of it.
1968 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1969 struct hci_request req;
1973 hci_req_init(&req, hdev);
1974 update_adv_data(&req);
1975 update_scan_rsp_data(&req);
1976 hci_req_run(&req, NULL);
1978 hci_dev_unlock(hdev);
1982 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1984 struct mgmt_mode *cp = data;
1985 struct hci_cp_write_le_host_supported hci_cp;
1986 struct pending_cmd *cmd;
1987 struct hci_request req;
1991 BT_DBG("request for %s", hdev->name);
1993 if (!lmp_le_capable(hdev))
1994 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1995 MGMT_STATUS_NOT_SUPPORTED);
1997 if (cp->val != 0x00 && cp->val != 0x01)
1998 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1999 MGMT_STATUS_INVALID_PARAMS);
2001 /* LE-only devices do not allow toggling LE on/off */
2002 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2003 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2004 MGMT_STATUS_REJECTED);
2009 enabled = lmp_host_le_capable(hdev);
2011 if (!hdev_is_powered(hdev) || val == enabled) {
2012 bool changed = false;
2014 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2015 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2019 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2020 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2024 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2029 err = new_settings(hdev, sk);
2034 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2035 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2036 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2041 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2047 hci_req_init(&req, hdev);
2049 memset(&hci_cp, 0, sizeof(hci_cp));
2053 hci_cp.simul = lmp_le_br_capable(hdev);
2055 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
2056 disable_advertising(&req);
2059 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2062 err = hci_req_run(&req, le_enable_complete);
2064 mgmt_pending_remove(cmd);
2067 hci_dev_unlock(hdev);
2071 /* This is a helper function to test for pending mgmt commands that can
2072 * cause CoD or EIR HCI commands. We can only allow one such pending
2073 * mgmt command at a time since otherwise we cannot easily track what
2074 * the current values are, will be, and based on that calculate if a new
2075 * HCI command needs to be sent and if yes with what value.
2077 static bool pending_eir_or_class(struct hci_dev *hdev)
2079 struct pending_cmd *cmd;
2081 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2082 switch (cmd->opcode) {
2083 case MGMT_OP_ADD_UUID:
2084 case MGMT_OP_REMOVE_UUID:
2085 case MGMT_OP_SET_DEV_CLASS:
2086 case MGMT_OP_SET_POWERED:
2094 static const u8 bluetooth_base_uuid[] = {
2095 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2096 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2099 static u8 get_uuid_size(const u8 *uuid)
2103 if (memcmp(uuid, bluetooth_base_uuid, 12))
2106 val = get_unaligned_le32(&uuid[12]);
2113 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2115 struct pending_cmd *cmd;
2119 cmd = mgmt_pending_find(mgmt_op, hdev);
2123 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2124 hdev->dev_class, 3);
2126 mgmt_pending_remove(cmd);
2129 hci_dev_unlock(hdev);
2132 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2134 BT_DBG("status 0x%02x", status);
2136 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2139 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2141 struct mgmt_cp_add_uuid *cp = data;
2142 struct pending_cmd *cmd;
2143 struct hci_request req;
2144 struct bt_uuid *uuid;
2147 BT_DBG("request for %s", hdev->name);
2151 if (pending_eir_or_class(hdev)) {
2152 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2157 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2163 memcpy(uuid->uuid, cp->uuid, 16);
2164 uuid->svc_hint = cp->svc_hint;
2165 uuid->size = get_uuid_size(cp->uuid);
2167 list_add_tail(&uuid->list, &hdev->uuids);
2169 hci_req_init(&req, hdev);
2174 err = hci_req_run(&req, add_uuid_complete);
2176 if (err != -ENODATA)
2179 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2180 hdev->dev_class, 3);
2184 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2193 hci_dev_unlock(hdev);
2197 static bool enable_service_cache(struct hci_dev *hdev)
2199 if (!hdev_is_powered(hdev))
2202 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2203 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2211 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2213 BT_DBG("status 0x%02x", status);
2215 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2218 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2221 struct mgmt_cp_remove_uuid *cp = data;
2222 struct pending_cmd *cmd;
2223 struct bt_uuid *match, *tmp;
2224 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2225 struct hci_request req;
2228 BT_DBG("request for %s", hdev->name);
2232 if (pending_eir_or_class(hdev)) {
2233 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2238 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2239 hci_uuids_clear(hdev);
2241 if (enable_service_cache(hdev)) {
2242 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2243 0, hdev->dev_class, 3);
2252 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2253 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2256 list_del(&match->list);
2262 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2263 MGMT_STATUS_INVALID_PARAMS);
2268 hci_req_init(&req, hdev);
2273 err = hci_req_run(&req, remove_uuid_complete);
2275 if (err != -ENODATA)
2278 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2279 hdev->dev_class, 3);
2283 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2292 hci_dev_unlock(hdev);
2296 static void set_class_complete(struct hci_dev *hdev, u8 status)
2298 BT_DBG("status 0x%02x", status);
2300 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2303 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2306 struct mgmt_cp_set_dev_class *cp = data;
2307 struct pending_cmd *cmd;
2308 struct hci_request req;
2311 BT_DBG("request for %s", hdev->name);
2313 if (!lmp_bredr_capable(hdev))
2314 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2315 MGMT_STATUS_NOT_SUPPORTED);
2319 if (pending_eir_or_class(hdev)) {
2320 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2325 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2326 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2327 MGMT_STATUS_INVALID_PARAMS);
2331 hdev->major_class = cp->major;
2332 hdev->minor_class = cp->minor;
2334 if (!hdev_is_powered(hdev)) {
2335 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2336 hdev->dev_class, 3);
2340 hci_req_init(&req, hdev);
2342 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2343 hci_dev_unlock(hdev);
2344 cancel_delayed_work_sync(&hdev->service_cache);
2351 err = hci_req_run(&req, set_class_complete);
2353 if (err != -ENODATA)
2356 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2357 hdev->dev_class, 3);
2361 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2370 hci_dev_unlock(hdev);
2374 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2377 struct mgmt_cp_load_link_keys *cp = data;
2378 u16 key_count, expected_len;
2382 BT_DBG("request for %s", hdev->name);
2384 if (!lmp_bredr_capable(hdev))
2385 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2386 MGMT_STATUS_NOT_SUPPORTED);
2388 key_count = __le16_to_cpu(cp->key_count);
2390 expected_len = sizeof(*cp) + key_count *
2391 sizeof(struct mgmt_link_key_info);
2392 if (expected_len != len) {
2393 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2395 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2396 MGMT_STATUS_INVALID_PARAMS);
2399 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2400 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2401 MGMT_STATUS_INVALID_PARAMS);
2403 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2406 for (i = 0; i < key_count; i++) {
2407 struct mgmt_link_key_info *key = &cp->keys[i];
2409 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2410 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2411 MGMT_STATUS_INVALID_PARAMS);
2416 hci_link_keys_clear(hdev);
2419 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2421 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2424 new_settings(hdev, NULL);
2426 for (i = 0; i < key_count; i++) {
2427 struct mgmt_link_key_info *key = &cp->keys[i];
2429 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2430 key->type, key->pin_len);
2433 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2435 hci_dev_unlock(hdev);
2440 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2441 u8 addr_type, struct sock *skip_sk)
2443 struct mgmt_ev_device_unpaired ev;
2445 bacpy(&ev.addr.bdaddr, bdaddr);
2446 ev.addr.type = addr_type;
2448 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2452 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2455 struct mgmt_cp_unpair_device *cp = data;
2456 struct mgmt_rp_unpair_device rp;
2457 struct hci_cp_disconnect dc;
2458 struct pending_cmd *cmd;
2459 struct hci_conn *conn;
2462 memset(&rp, 0, sizeof(rp));
2463 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2464 rp.addr.type = cp->addr.type;
2466 if (!bdaddr_type_is_valid(cp->addr.type))
2467 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2468 MGMT_STATUS_INVALID_PARAMS,
2471 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2472 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2473 MGMT_STATUS_INVALID_PARAMS,
2478 if (!hdev_is_powered(hdev)) {
2479 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2480 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2484 if (cp->addr.type == BDADDR_BREDR) {
2485 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2489 if (cp->addr.type == BDADDR_LE_PUBLIC)
2490 addr_type = ADDR_LE_DEV_PUBLIC;
2492 addr_type = ADDR_LE_DEV_RANDOM;
2494 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2496 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2498 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2502 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2503 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2507 if (cp->disconnect) {
2508 if (cp->addr.type == BDADDR_BREDR)
2509 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2512 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2519 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2521 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2525 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2532 dc.handle = cpu_to_le16(conn->handle);
2533 dc.reason = 0x13; /* Remote User Terminated Connection */
2534 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2536 mgmt_pending_remove(cmd);
2539 hci_dev_unlock(hdev);
2543 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2546 struct mgmt_cp_disconnect *cp = data;
2547 struct mgmt_rp_disconnect rp;
2548 struct hci_cp_disconnect dc;
2549 struct pending_cmd *cmd;
2550 struct hci_conn *conn;
2555 memset(&rp, 0, sizeof(rp));
2556 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2557 rp.addr.type = cp->addr.type;
2559 if (!bdaddr_type_is_valid(cp->addr.type))
2560 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2561 MGMT_STATUS_INVALID_PARAMS,
2566 if (!test_bit(HCI_UP, &hdev->flags)) {
2567 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2568 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2572 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2573 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2574 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2578 if (cp->addr.type == BDADDR_BREDR)
2579 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2582 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2584 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2585 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2586 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2590 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2596 dc.handle = cpu_to_le16(conn->handle);
2597 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2599 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2601 mgmt_pending_remove(cmd);
2604 hci_dev_unlock(hdev);
2608 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2610 switch (link_type) {
2612 switch (addr_type) {
2613 case ADDR_LE_DEV_PUBLIC:
2614 return BDADDR_LE_PUBLIC;
2617 /* Fallback to LE Random address type */
2618 return BDADDR_LE_RANDOM;
2622 /* Fallback to BR/EDR type */
2623 return BDADDR_BREDR;
2627 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2630 struct mgmt_rp_get_connections *rp;
2640 if (!hdev_is_powered(hdev)) {
2641 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2642 MGMT_STATUS_NOT_POWERED);
2647 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2648 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2652 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2653 rp = kmalloc(rp_len, GFP_KERNEL);
2660 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2661 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2663 bacpy(&rp->addr[i].bdaddr, &c->dst);
2664 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2665 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2670 rp->conn_count = cpu_to_le16(i);
2672 /* Recalculate length in case of filtered SCO connections, etc */
2673 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2675 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2681 hci_dev_unlock(hdev);
2685 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2686 struct mgmt_cp_pin_code_neg_reply *cp)
2688 struct pending_cmd *cmd;
2691 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2696 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2697 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2699 mgmt_pending_remove(cmd);
2704 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2707 struct hci_conn *conn;
2708 struct mgmt_cp_pin_code_reply *cp = data;
2709 struct hci_cp_pin_code_reply reply;
2710 struct pending_cmd *cmd;
2717 if (!hdev_is_powered(hdev)) {
2718 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2719 MGMT_STATUS_NOT_POWERED);
2723 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2725 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2726 MGMT_STATUS_NOT_CONNECTED);
2730 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2731 struct mgmt_cp_pin_code_neg_reply ncp;
2733 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2735 BT_ERR("PIN code is not 16 bytes long");
2737 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2739 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2740 MGMT_STATUS_INVALID_PARAMS);
2745 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2751 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2752 reply.pin_len = cp->pin_len;
2753 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2755 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2757 mgmt_pending_remove(cmd);
2760 hci_dev_unlock(hdev);
2764 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2767 struct mgmt_cp_set_io_capability *cp = data;
2773 hdev->io_capability = cp->io_capability;
2775 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2776 hdev->io_capability);
2778 hci_dev_unlock(hdev);
2780 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2784 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2786 struct hci_dev *hdev = conn->hdev;
2787 struct pending_cmd *cmd;
2789 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2790 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2793 if (cmd->user_data != conn)
2802 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2804 struct mgmt_rp_pair_device rp;
2805 struct hci_conn *conn = cmd->user_data;
2807 bacpy(&rp.addr.bdaddr, &conn->dst);
2808 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2810 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2813 /* So we don't get further callbacks for this connection */
2814 conn->connect_cfm_cb = NULL;
2815 conn->security_cfm_cb = NULL;
2816 conn->disconn_cfm_cb = NULL;
2818 hci_conn_drop(conn);
2820 mgmt_pending_remove(cmd);
2823 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2825 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2826 struct pending_cmd *cmd;
2828 cmd = find_pairing(conn);
2830 pairing_complete(cmd, status);
2833 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2835 struct pending_cmd *cmd;
2837 BT_DBG("status %u", status);
2839 cmd = find_pairing(conn);
2841 BT_DBG("Unable to find a pending command");
2843 pairing_complete(cmd, mgmt_status(status));
2846 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2848 struct pending_cmd *cmd;
2850 BT_DBG("status %u", status);
2855 cmd = find_pairing(conn);
2857 BT_DBG("Unable to find a pending command");
2859 pairing_complete(cmd, mgmt_status(status));
2862 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2865 struct mgmt_cp_pair_device *cp = data;
2866 struct mgmt_rp_pair_device rp;
2867 struct pending_cmd *cmd;
2868 u8 sec_level, auth_type;
2869 struct hci_conn *conn;
2874 memset(&rp, 0, sizeof(rp));
2875 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2876 rp.addr.type = cp->addr.type;
2878 if (!bdaddr_type_is_valid(cp->addr.type))
2879 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2880 MGMT_STATUS_INVALID_PARAMS,
2885 if (!hdev_is_powered(hdev)) {
2886 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2887 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2891 sec_level = BT_SECURITY_MEDIUM;
2892 auth_type = HCI_AT_DEDICATED_BONDING;
2894 if (cp->addr.type == BDADDR_BREDR) {
2895 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2900 /* Convert from L2CAP channel address type to HCI address type
2902 if (cp->addr.type == BDADDR_LE_PUBLIC)
2903 addr_type = ADDR_LE_DEV_PUBLIC;
2905 addr_type = ADDR_LE_DEV_RANDOM;
2907 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
2908 sec_level, auth_type);
2914 if (PTR_ERR(conn) == -EBUSY)
2915 status = MGMT_STATUS_BUSY;
2917 status = MGMT_STATUS_CONNECT_FAILED;
2919 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2925 if (conn->connect_cfm_cb) {
2926 hci_conn_drop(conn);
2927 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2928 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2932 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2935 hci_conn_drop(conn);
2939 /* For LE, just connecting isn't a proof that the pairing finished */
2940 if (cp->addr.type == BDADDR_BREDR) {
2941 conn->connect_cfm_cb = pairing_complete_cb;
2942 conn->security_cfm_cb = pairing_complete_cb;
2943 conn->disconn_cfm_cb = pairing_complete_cb;
2945 conn->connect_cfm_cb = le_pairing_complete_cb;
2946 conn->security_cfm_cb = le_pairing_complete_cb;
2947 conn->disconn_cfm_cb = le_pairing_complete_cb;
2950 conn->io_capability = cp->io_cap;
2951 cmd->user_data = conn;
2953 if (conn->state == BT_CONNECTED &&
2954 hci_conn_security(conn, sec_level, auth_type))
2955 pairing_complete(cmd, 0);
2960 hci_dev_unlock(hdev);
2964 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2967 struct mgmt_addr_info *addr = data;
2968 struct pending_cmd *cmd;
2969 struct hci_conn *conn;
2976 if (!hdev_is_powered(hdev)) {
2977 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2978 MGMT_STATUS_NOT_POWERED);
2982 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2984 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2985 MGMT_STATUS_INVALID_PARAMS);
2989 conn = cmd->user_data;
2991 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2992 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2993 MGMT_STATUS_INVALID_PARAMS);
2997 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2999 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3000 addr, sizeof(*addr));
3002 hci_dev_unlock(hdev);
3006 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3007 struct mgmt_addr_info *addr, u16 mgmt_op,
3008 u16 hci_op, __le32 passkey)
3010 struct pending_cmd *cmd;
3011 struct hci_conn *conn;
3016 if (!hdev_is_powered(hdev)) {
3017 err = cmd_complete(sk, hdev->id, mgmt_op,
3018 MGMT_STATUS_NOT_POWERED, addr,
3023 if (addr->type == BDADDR_BREDR)
3024 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3026 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3029 err = cmd_complete(sk, hdev->id, mgmt_op,
3030 MGMT_STATUS_NOT_CONNECTED, addr,
3035 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3036 /* Continue with pairing via SMP */
3037 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3040 err = cmd_complete(sk, hdev->id, mgmt_op,
3041 MGMT_STATUS_SUCCESS, addr,
3044 err = cmd_complete(sk, hdev->id, mgmt_op,
3045 MGMT_STATUS_FAILED, addr,
3051 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3057 /* Continue with pairing via HCI */
3058 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3059 struct hci_cp_user_passkey_reply cp;
3061 bacpy(&cp.bdaddr, &addr->bdaddr);
3062 cp.passkey = passkey;
3063 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3065 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3069 mgmt_pending_remove(cmd);
3072 hci_dev_unlock(hdev);
3076 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3077 void *data, u16 len)
3079 struct mgmt_cp_pin_code_neg_reply *cp = data;
3083 return user_pairing_resp(sk, hdev, &cp->addr,
3084 MGMT_OP_PIN_CODE_NEG_REPLY,
3085 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3088 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3091 struct mgmt_cp_user_confirm_reply *cp = data;
3095 if (len != sizeof(*cp))
3096 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3097 MGMT_STATUS_INVALID_PARAMS);
3099 return user_pairing_resp(sk, hdev, &cp->addr,
3100 MGMT_OP_USER_CONFIRM_REPLY,
3101 HCI_OP_USER_CONFIRM_REPLY, 0);
3104 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3105 void *data, u16 len)
3107 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3111 return user_pairing_resp(sk, hdev, &cp->addr,
3112 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3113 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3116 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3119 struct mgmt_cp_user_passkey_reply *cp = data;
3123 return user_pairing_resp(sk, hdev, &cp->addr,
3124 MGMT_OP_USER_PASSKEY_REPLY,
3125 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3128 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3129 void *data, u16 len)
3131 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3135 return user_pairing_resp(sk, hdev, &cp->addr,
3136 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3137 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3140 static void update_name(struct hci_request *req)
3142 struct hci_dev *hdev = req->hdev;
3143 struct hci_cp_write_local_name cp;
3145 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3147 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3150 static void set_name_complete(struct hci_dev *hdev, u8 status)
3152 struct mgmt_cp_set_local_name *cp;
3153 struct pending_cmd *cmd;
3155 BT_DBG("status 0x%02x", status);
3159 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3166 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3167 mgmt_status(status));
3169 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3172 mgmt_pending_remove(cmd);
3175 hci_dev_unlock(hdev);
3178 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3181 struct mgmt_cp_set_local_name *cp = data;
3182 struct pending_cmd *cmd;
3183 struct hci_request req;
3190 /* If the old values are the same as the new ones just return a
3191 * direct command complete event.
3193 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3194 !memcmp(hdev->short_name, cp->short_name,
3195 sizeof(hdev->short_name))) {
3196 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3201 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3203 if (!hdev_is_powered(hdev)) {
3204 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3206 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3211 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3217 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3223 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3225 hci_req_init(&req, hdev);
3227 if (lmp_bredr_capable(hdev)) {
3232 /* The name is stored in the scan response data and so
3233 * no need to udpate the advertising data here.
3235 if (lmp_le_capable(hdev))
3236 update_scan_rsp_data(&req);
3238 err = hci_req_run(&req, set_name_complete);
3240 mgmt_pending_remove(cmd);
3243 hci_dev_unlock(hdev);
3247 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3248 void *data, u16 data_len)
3250 struct pending_cmd *cmd;
3253 BT_DBG("%s", hdev->name);
3257 if (!hdev_is_powered(hdev)) {
3258 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3259 MGMT_STATUS_NOT_POWERED);
3263 if (!lmp_ssp_capable(hdev)) {
3264 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3265 MGMT_STATUS_NOT_SUPPORTED);
3269 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3270 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3275 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3281 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3282 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3285 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3288 mgmt_pending_remove(cmd);
3291 hci_dev_unlock(hdev);
3295 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3296 void *data, u16 len)
3300 BT_DBG("%s ", hdev->name);
3304 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3305 struct mgmt_cp_add_remote_oob_data *cp = data;
3308 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3309 cp->hash, cp->randomizer);
3311 status = MGMT_STATUS_FAILED;
3313 status = MGMT_STATUS_SUCCESS;
3315 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3316 status, &cp->addr, sizeof(cp->addr));
3317 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3318 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3321 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3327 status = MGMT_STATUS_FAILED;
3329 status = MGMT_STATUS_SUCCESS;
3331 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3332 status, &cp->addr, sizeof(cp->addr));
3334 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3335 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3336 MGMT_STATUS_INVALID_PARAMS);
3339 hci_dev_unlock(hdev);
3343 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3344 void *data, u16 len)
3346 struct mgmt_cp_remove_remote_oob_data *cp = data;
3350 BT_DBG("%s", hdev->name);
3354 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3356 status = MGMT_STATUS_INVALID_PARAMS;
3358 status = MGMT_STATUS_SUCCESS;
3360 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3361 status, &cp->addr, sizeof(cp->addr));
3363 hci_dev_unlock(hdev);
3367 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3369 struct pending_cmd *cmd;
3373 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3375 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3379 type = hdev->discovery.type;
3381 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3382 &type, sizeof(type));
3383 mgmt_pending_remove(cmd);
3388 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3390 unsigned long timeout = 0;
3392 BT_DBG("status %d", status);
3396 mgmt_start_discovery_failed(hdev, status);
3397 hci_dev_unlock(hdev);
3402 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3403 hci_dev_unlock(hdev);
3405 switch (hdev->discovery.type) {
3406 case DISCOV_TYPE_LE:
3407 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3410 case DISCOV_TYPE_INTERLEAVED:
3411 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3414 case DISCOV_TYPE_BREDR:
3418 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3424 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3427 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3428 void *data, u16 len)
3430 struct mgmt_cp_start_discovery *cp = data;
3431 struct pending_cmd *cmd;
3432 struct hci_cp_le_set_scan_param param_cp;
3433 struct hci_cp_le_set_scan_enable enable_cp;
3434 struct hci_cp_inquiry inq_cp;
3435 struct hci_request req;
3436 /* General inquiry access code (GIAC) */
3437 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3438 u8 status, own_addr_type;
3441 BT_DBG("%s", hdev->name);
3445 if (!hdev_is_powered(hdev)) {
3446 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3447 MGMT_STATUS_NOT_POWERED);
3451 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3452 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3457 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3458 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3463 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3469 hdev->discovery.type = cp->type;
3471 hci_req_init(&req, hdev);
3473 switch (hdev->discovery.type) {
3474 case DISCOV_TYPE_BREDR:
3475 status = mgmt_bredr_support(hdev);
3477 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3479 mgmt_pending_remove(cmd);
3483 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3484 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3486 mgmt_pending_remove(cmd);
3490 hci_inquiry_cache_flush(hdev);
3492 memset(&inq_cp, 0, sizeof(inq_cp));
3493 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3494 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3495 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3498 case DISCOV_TYPE_LE:
3499 case DISCOV_TYPE_INTERLEAVED:
3500 status = mgmt_le_support(hdev);
3502 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3504 mgmt_pending_remove(cmd);
3508 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3509 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3510 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3511 MGMT_STATUS_NOT_SUPPORTED);
3512 mgmt_pending_remove(cmd);
3516 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3517 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3518 MGMT_STATUS_REJECTED);
3519 mgmt_pending_remove(cmd);
3523 /* If controller is scanning, it means the background scanning
3524 * is running. Thus, we should temporarily stop it in order to
3525 * set the discovery scanning parameters.
3527 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3528 hci_req_add_le_scan_disable(&req);
3530 memset(¶m_cp, 0, sizeof(param_cp));
3532 /* All active scans will be done with either a resolvable
3533 * private address (when privacy feature has been enabled)
3534 * or unresolvable private address.
3536 err = hci_update_random_address(&req, true, &own_addr_type);
3538 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3539 MGMT_STATUS_FAILED);
3540 mgmt_pending_remove(cmd);
3544 param_cp.type = LE_SCAN_ACTIVE;
3545 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3546 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3547 param_cp.own_address_type = own_addr_type;
3548 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3551 memset(&enable_cp, 0, sizeof(enable_cp));
3552 enable_cp.enable = LE_SCAN_ENABLE;
3553 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3554 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3559 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3560 MGMT_STATUS_INVALID_PARAMS);
3561 mgmt_pending_remove(cmd);
3565 err = hci_req_run(&req, start_discovery_complete);
3567 mgmt_pending_remove(cmd);
3569 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3572 hci_dev_unlock(hdev);
3576 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3578 struct pending_cmd *cmd;
3581 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3585 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3586 &hdev->discovery.type, sizeof(hdev->discovery.type));
3587 mgmt_pending_remove(cmd);
3592 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3594 BT_DBG("status %d", status);
3599 mgmt_stop_discovery_failed(hdev, status);
3603 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3606 hci_dev_unlock(hdev);
3609 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3612 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3613 struct pending_cmd *cmd;
3614 struct hci_request req;
3617 BT_DBG("%s", hdev->name);
3621 if (!hci_discovery_active(hdev)) {
3622 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3623 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3624 sizeof(mgmt_cp->type));
3628 if (hdev->discovery.type != mgmt_cp->type) {
3629 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3630 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3631 sizeof(mgmt_cp->type));
3635 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3641 hci_req_init(&req, hdev);
3643 hci_stop_discovery(&req);
3645 err = hci_req_run(&req, stop_discovery_complete);
3647 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3651 mgmt_pending_remove(cmd);
3653 /* If no HCI commands were sent we're done */
3654 if (err == -ENODATA) {
3655 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3656 &mgmt_cp->type, sizeof(mgmt_cp->type));
3657 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3661 hci_dev_unlock(hdev);
3665 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3668 struct mgmt_cp_confirm_name *cp = data;
3669 struct inquiry_entry *e;
3672 BT_DBG("%s", hdev->name);
3676 if (!hci_discovery_active(hdev)) {
3677 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3678 MGMT_STATUS_FAILED, &cp->addr,
3683 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3685 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3686 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3691 if (cp->name_known) {
3692 e->name_state = NAME_KNOWN;
3695 e->name_state = NAME_NEEDED;
3696 hci_inquiry_cache_update_resolve(hdev, e);
3699 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3703 hci_dev_unlock(hdev);
3707 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3710 struct mgmt_cp_block_device *cp = data;
3714 BT_DBG("%s", hdev->name);
3716 if (!bdaddr_type_is_valid(cp->addr.type))
3717 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3718 MGMT_STATUS_INVALID_PARAMS,
3719 &cp->addr, sizeof(cp->addr));
3723 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3725 status = MGMT_STATUS_FAILED;
3727 status = MGMT_STATUS_SUCCESS;
3729 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3730 &cp->addr, sizeof(cp->addr));
3732 hci_dev_unlock(hdev);
3737 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3740 struct mgmt_cp_unblock_device *cp = data;
3744 BT_DBG("%s", hdev->name);
3746 if (!bdaddr_type_is_valid(cp->addr.type))
3747 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3748 MGMT_STATUS_INVALID_PARAMS,
3749 &cp->addr, sizeof(cp->addr));
3753 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3755 status = MGMT_STATUS_INVALID_PARAMS;
3757 status = MGMT_STATUS_SUCCESS;
3759 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3760 &cp->addr, sizeof(cp->addr));
3762 hci_dev_unlock(hdev);
3767 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3770 struct mgmt_cp_set_device_id *cp = data;
3771 struct hci_request req;
3775 BT_DBG("%s", hdev->name);
3777 source = __le16_to_cpu(cp->source);
3779 if (source > 0x0002)
3780 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3781 MGMT_STATUS_INVALID_PARAMS);
3785 hdev->devid_source = source;
3786 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3787 hdev->devid_product = __le16_to_cpu(cp->product);
3788 hdev->devid_version = __le16_to_cpu(cp->version);
3790 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3792 hci_req_init(&req, hdev);
3794 hci_req_run(&req, NULL);
3796 hci_dev_unlock(hdev);
3801 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3803 struct cmd_lookup match = { NULL, hdev };
3806 u8 mgmt_err = mgmt_status(status);
3808 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3809 cmd_status_rsp, &mgmt_err);
3813 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3816 new_settings(hdev, match.sk);
3822 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3825 struct mgmt_mode *cp = data;
3826 struct pending_cmd *cmd;
3827 struct hci_request req;
3828 u8 val, enabled, status;
3831 BT_DBG("request for %s", hdev->name);
3833 status = mgmt_le_support(hdev);
3835 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3838 if (cp->val != 0x00 && cp->val != 0x01)
3839 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3840 MGMT_STATUS_INVALID_PARAMS);
3845 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3847 /* The following conditions are ones which mean that we should
3848 * not do any HCI communication but directly send a mgmt
3849 * response to user space (after toggling the flag if
3852 if (!hdev_is_powered(hdev) || val == enabled ||
3853 hci_conn_num(hdev, LE_LINK) > 0) {
3854 bool changed = false;
3856 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3857 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3861 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3866 err = new_settings(hdev, sk);
3871 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3872 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3873 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3878 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3884 hci_req_init(&req, hdev);
3887 enable_advertising(&req);
3889 disable_advertising(&req);
3891 err = hci_req_run(&req, set_advertising_complete);
3893 mgmt_pending_remove(cmd);
3896 hci_dev_unlock(hdev);
3900 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3901 void *data, u16 len)
3903 struct mgmt_cp_set_static_address *cp = data;
3906 BT_DBG("%s", hdev->name);
3908 if (!lmp_le_capable(hdev))
3909 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3910 MGMT_STATUS_NOT_SUPPORTED);
3912 if (hdev_is_powered(hdev))
3913 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3914 MGMT_STATUS_REJECTED);
3916 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3917 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3918 return cmd_status(sk, hdev->id,
3919 MGMT_OP_SET_STATIC_ADDRESS,
3920 MGMT_STATUS_INVALID_PARAMS);
3922 /* Two most significant bits shall be set */
3923 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3924 return cmd_status(sk, hdev->id,
3925 MGMT_OP_SET_STATIC_ADDRESS,
3926 MGMT_STATUS_INVALID_PARAMS);
3931 bacpy(&hdev->static_addr, &cp->bdaddr);
3933 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3935 hci_dev_unlock(hdev);
3940 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3941 void *data, u16 len)
3943 struct mgmt_cp_set_scan_params *cp = data;
3944 __u16 interval, window;
3947 BT_DBG("%s", hdev->name);
3949 if (!lmp_le_capable(hdev))
3950 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3951 MGMT_STATUS_NOT_SUPPORTED);
3953 interval = __le16_to_cpu(cp->interval);
3955 if (interval < 0x0004 || interval > 0x4000)
3956 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3957 MGMT_STATUS_INVALID_PARAMS);
3959 window = __le16_to_cpu(cp->window);
3961 if (window < 0x0004 || window > 0x4000)
3962 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3963 MGMT_STATUS_INVALID_PARAMS);
3965 if (window > interval)
3966 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3967 MGMT_STATUS_INVALID_PARAMS);
3971 hdev->le_scan_interval = interval;
3972 hdev->le_scan_window = window;
3974 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3976 /* If background scan is running, restart it so new parameters are
3979 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
3980 hdev->discovery.state == DISCOVERY_STOPPED) {
3981 struct hci_request req;
3983 hci_req_init(&req, hdev);
3985 hci_req_add_le_scan_disable(&req);
3986 hci_req_add_le_passive_scan(&req);
3988 hci_req_run(&req, NULL);
3991 hci_dev_unlock(hdev);
3996 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3998 struct pending_cmd *cmd;
4000 BT_DBG("status 0x%02x", status);
4004 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4009 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4010 mgmt_status(status));
4012 struct mgmt_mode *cp = cmd->param;
4015 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4017 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4019 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4020 new_settings(hdev, cmd->sk);
4023 mgmt_pending_remove(cmd);
4026 hci_dev_unlock(hdev);
4029 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4030 void *data, u16 len)
4032 struct mgmt_mode *cp = data;
4033 struct pending_cmd *cmd;
4034 struct hci_request req;
4037 BT_DBG("%s", hdev->name);
4039 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4040 hdev->hci_ver < BLUETOOTH_VER_1_2)
4041 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4042 MGMT_STATUS_NOT_SUPPORTED);
4044 if (cp->val != 0x00 && cp->val != 0x01)
4045 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4046 MGMT_STATUS_INVALID_PARAMS);
4048 if (!hdev_is_powered(hdev))
4049 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4050 MGMT_STATUS_NOT_POWERED);
4052 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4053 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4054 MGMT_STATUS_REJECTED);
4058 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4059 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4064 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4065 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4070 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4077 hci_req_init(&req, hdev);
4079 write_fast_connectable(&req, cp->val);
4081 err = hci_req_run(&req, fast_connectable_complete);
4083 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4084 MGMT_STATUS_FAILED);
4085 mgmt_pending_remove(cmd);
4089 hci_dev_unlock(hdev);
4094 static void set_bredr_scan(struct hci_request *req)
4096 struct hci_dev *hdev = req->hdev;
4099 /* Ensure that fast connectable is disabled. This function will
4100 * not do anything if the page scan parameters are already what
4103 write_fast_connectable(req, false);
4105 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4107 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4108 scan |= SCAN_INQUIRY;
4111 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4114 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4116 struct pending_cmd *cmd;
4118 BT_DBG("status 0x%02x", status);
4122 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4127 u8 mgmt_err = mgmt_status(status);
4129 /* We need to restore the flag if related HCI commands
4132 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4134 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4136 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4137 new_settings(hdev, cmd->sk);
4140 mgmt_pending_remove(cmd);
4143 hci_dev_unlock(hdev);
4146 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4148 struct mgmt_mode *cp = data;
4149 struct pending_cmd *cmd;
4150 struct hci_request req;
4153 BT_DBG("request for %s", hdev->name);
4155 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4156 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4157 MGMT_STATUS_NOT_SUPPORTED);
4159 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4160 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4161 MGMT_STATUS_REJECTED);
4163 if (cp->val != 0x00 && cp->val != 0x01)
4164 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4165 MGMT_STATUS_INVALID_PARAMS);
4169 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4170 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4174 if (!hdev_is_powered(hdev)) {
4176 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4177 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4178 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4179 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4180 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4183 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4185 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4189 err = new_settings(hdev, sk);
4193 /* Reject disabling when powered on */
4195 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4196 MGMT_STATUS_REJECTED);
4200 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4201 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4206 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4212 /* We need to flip the bit already here so that update_adv_data
4213 * generates the correct flags.
4215 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4217 hci_req_init(&req, hdev);
4219 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4220 set_bredr_scan(&req);
4222 /* Since only the advertising data flags will change, there
4223 * is no need to update the scan response data.
4225 update_adv_data(&req);
4227 err = hci_req_run(&req, set_bredr_complete);
4229 mgmt_pending_remove(cmd);
4232 hci_dev_unlock(hdev);
4236 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4237 void *data, u16 len)
4239 struct mgmt_mode *cp = data;
4240 struct pending_cmd *cmd;
4244 BT_DBG("request for %s", hdev->name);
4246 status = mgmt_bredr_support(hdev);
4248 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4251 if (!lmp_sc_capable(hdev) &&
4252 !test_bit(HCI_FORCE_SC, &hdev->dev_flags))
4253 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4254 MGMT_STATUS_NOT_SUPPORTED);
4256 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4257 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4258 MGMT_STATUS_INVALID_PARAMS);
4262 if (!hdev_is_powered(hdev)) {
4266 changed = !test_and_set_bit(HCI_SC_ENABLED,
4268 if (cp->val == 0x02)
4269 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4271 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4273 changed = test_and_clear_bit(HCI_SC_ENABLED,
4275 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4278 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4283 err = new_settings(hdev, sk);
4288 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4289 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4296 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4297 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4298 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4302 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4308 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4310 mgmt_pending_remove(cmd);
4314 if (cp->val == 0x02)
4315 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4317 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4320 hci_dev_unlock(hdev);
4324 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4325 void *data, u16 len)
4327 struct mgmt_mode *cp = data;
4331 BT_DBG("request for %s", hdev->name);
4333 if (cp->val != 0x00 && cp->val != 0x01)
4334 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4335 MGMT_STATUS_INVALID_PARAMS);
4340 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4342 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4344 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4349 err = new_settings(hdev, sk);
4352 hci_dev_unlock(hdev);
4356 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4359 struct mgmt_cp_set_privacy *cp = cp_data;
4363 BT_DBG("request for %s", hdev->name);
4365 if (!lmp_le_capable(hdev))
4366 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4367 MGMT_STATUS_NOT_SUPPORTED);
4369 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4370 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4371 MGMT_STATUS_INVALID_PARAMS);
4373 if (hdev_is_powered(hdev))
4374 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4375 MGMT_STATUS_REJECTED);
4379 /* If user space supports this command it is also expected to
4380 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4382 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4385 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4386 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4387 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4389 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4390 memset(hdev->irk, 0, sizeof(hdev->irk));
4391 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4394 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4399 err = new_settings(hdev, sk);
4402 hci_dev_unlock(hdev);
4406 static bool irk_is_valid(struct mgmt_irk_info *irk)
4408 switch (irk->addr.type) {
4409 case BDADDR_LE_PUBLIC:
4412 case BDADDR_LE_RANDOM:
4413 /* Two most significant bits shall be set */
4414 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4422 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4425 struct mgmt_cp_load_irks *cp = cp_data;
4426 u16 irk_count, expected_len;
4429 BT_DBG("request for %s", hdev->name);
4431 if (!lmp_le_capable(hdev))
4432 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4433 MGMT_STATUS_NOT_SUPPORTED);
4435 irk_count = __le16_to_cpu(cp->irk_count);
4437 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4438 if (expected_len != len) {
4439 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4441 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4442 MGMT_STATUS_INVALID_PARAMS);
4445 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4447 for (i = 0; i < irk_count; i++) {
4448 struct mgmt_irk_info *key = &cp->irks[i];
4450 if (!irk_is_valid(key))
4451 return cmd_status(sk, hdev->id,
4453 MGMT_STATUS_INVALID_PARAMS);
4458 hci_smp_irks_clear(hdev);
4460 for (i = 0; i < irk_count; i++) {
4461 struct mgmt_irk_info *irk = &cp->irks[i];
4464 if (irk->addr.type == BDADDR_LE_PUBLIC)
4465 addr_type = ADDR_LE_DEV_PUBLIC;
4467 addr_type = ADDR_LE_DEV_RANDOM;
4469 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4473 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4475 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4477 hci_dev_unlock(hdev);
4482 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4484 if (key->master != 0x00 && key->master != 0x01)
4487 switch (key->addr.type) {
4488 case BDADDR_LE_PUBLIC:
4491 case BDADDR_LE_RANDOM:
4492 /* Two most significant bits shall be set */
4493 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4501 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4502 void *cp_data, u16 len)
4504 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4505 u16 key_count, expected_len;
4508 BT_DBG("request for %s", hdev->name);
4510 if (!lmp_le_capable(hdev))
4511 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4512 MGMT_STATUS_NOT_SUPPORTED);
4514 key_count = __le16_to_cpu(cp->key_count);
4516 expected_len = sizeof(*cp) + key_count *
4517 sizeof(struct mgmt_ltk_info);
4518 if (expected_len != len) {
4519 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4521 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4522 MGMT_STATUS_INVALID_PARAMS);
4525 BT_DBG("%s key_count %u", hdev->name, key_count);
4527 for (i = 0; i < key_count; i++) {
4528 struct mgmt_ltk_info *key = &cp->keys[i];
4530 if (!ltk_is_valid(key))
4531 return cmd_status(sk, hdev->id,
4532 MGMT_OP_LOAD_LONG_TERM_KEYS,
4533 MGMT_STATUS_INVALID_PARAMS);
4538 hci_smp_ltks_clear(hdev);
4540 for (i = 0; i < key_count; i++) {
4541 struct mgmt_ltk_info *key = &cp->keys[i];
4542 u8 type, addr_type, authenticated;
4544 if (key->addr.type == BDADDR_LE_PUBLIC)
4545 addr_type = ADDR_LE_DEV_PUBLIC;
4547 addr_type = ADDR_LE_DEV_RANDOM;
4552 type = HCI_SMP_LTK_SLAVE;
4554 switch (key->type) {
4555 case MGMT_LTK_UNAUTHENTICATED:
4556 authenticated = 0x00;
4558 case MGMT_LTK_AUTHENTICATED:
4559 authenticated = 0x01;
4565 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4566 authenticated, key->val, key->enc_size, key->ediv,
4570 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4573 hci_dev_unlock(hdev);
4578 struct cmd_conn_lookup {
4579 struct hci_conn *conn;
4580 bool valid_tx_power;
4584 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4586 struct cmd_conn_lookup *match = data;
4587 struct mgmt_cp_get_conn_info *cp;
4588 struct mgmt_rp_get_conn_info rp;
4589 struct hci_conn *conn = cmd->user_data;
4591 if (conn != match->conn)
4594 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4596 memset(&rp, 0, sizeof(rp));
4597 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4598 rp.addr.type = cp->addr.type;
4600 if (!match->mgmt_status) {
4601 rp.rssi = conn->rssi;
4603 if (match->valid_tx_power) {
4604 rp.tx_power = conn->tx_power;
4605 rp.max_tx_power = conn->max_tx_power;
4607 rp.tx_power = HCI_TX_POWER_INVALID;
4608 rp.max_tx_power = HCI_TX_POWER_INVALID;
4612 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4613 match->mgmt_status, &rp, sizeof(rp));
4615 hci_conn_drop(conn);
4617 mgmt_pending_remove(cmd);
4620 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4622 struct hci_cp_read_rssi *cp;
4623 struct hci_conn *conn;
4624 struct cmd_conn_lookup match;
4627 BT_DBG("status 0x%02x", status);
4631 /* TX power data is valid in case request completed successfully,
4632 * otherwise we assume it's not valid. At the moment we assume that
4633 * either both or none of current and max values are valid to keep code
4636 match.valid_tx_power = !status;
4638 /* Commands sent in request are either Read RSSI or Read Transmit Power
4639 * Level so we check which one was last sent to retrieve connection
4640 * handle. Both commands have handle as first parameter so it's safe to
4641 * cast data on the same command struct.
4643 * First command sent is always Read RSSI and we fail only if it fails.
4644 * In other case we simply override error to indicate success as we
4645 * already remembered if TX power value is actually valid.
4647 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4649 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4654 BT_ERR("invalid sent_cmd in response");
4658 handle = __le16_to_cpu(cp->handle);
4659 conn = hci_conn_hash_lookup_handle(hdev, handle);
4661 BT_ERR("unknown handle (%d) in response", handle);
4666 match.mgmt_status = mgmt_status(status);
4668 /* Cache refresh is complete, now reply for mgmt request for given
4671 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4672 get_conn_info_complete, &match);
4675 hci_dev_unlock(hdev);
4678 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4681 struct mgmt_cp_get_conn_info *cp = data;
4682 struct mgmt_rp_get_conn_info rp;
4683 struct hci_conn *conn;
4684 unsigned long conn_info_age;
4687 BT_DBG("%s", hdev->name);
4689 memset(&rp, 0, sizeof(rp));
4690 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4691 rp.addr.type = cp->addr.type;
4693 if (!bdaddr_type_is_valid(cp->addr.type))
4694 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4695 MGMT_STATUS_INVALID_PARAMS,
4700 if (!hdev_is_powered(hdev)) {
4701 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4702 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
4706 if (cp->addr.type == BDADDR_BREDR)
4707 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4710 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4712 if (!conn || conn->state != BT_CONNECTED) {
4713 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4714 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
4718 /* To avoid client trying to guess when to poll again for information we
4719 * calculate conn info age as random value between min/max set in hdev.
4721 conn_info_age = hdev->conn_info_min_age +
4722 prandom_u32_max(hdev->conn_info_max_age -
4723 hdev->conn_info_min_age);
4725 /* Query controller to refresh cached values if they are too old or were
4728 if (time_after(jiffies, conn->conn_info_timestamp +
4729 msecs_to_jiffies(conn_info_age)) ||
4730 !conn->conn_info_timestamp) {
4731 struct hci_request req;
4732 struct hci_cp_read_tx_power req_txp_cp;
4733 struct hci_cp_read_rssi req_rssi_cp;
4734 struct pending_cmd *cmd;
4736 hci_req_init(&req, hdev);
4737 req_rssi_cp.handle = cpu_to_le16(conn->handle);
4738 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
4741 /* For LE links TX power does not change thus we don't need to
4742 * query for it once value is known.
4744 if (!bdaddr_type_is_le(cp->addr.type) ||
4745 conn->tx_power == HCI_TX_POWER_INVALID) {
4746 req_txp_cp.handle = cpu_to_le16(conn->handle);
4747 req_txp_cp.type = 0x00;
4748 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4749 sizeof(req_txp_cp), &req_txp_cp);
4752 /* Max TX power needs to be read only once per connection */
4753 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
4754 req_txp_cp.handle = cpu_to_le16(conn->handle);
4755 req_txp_cp.type = 0x01;
4756 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4757 sizeof(req_txp_cp), &req_txp_cp);
4760 err = hci_req_run(&req, conn_info_refresh_complete);
4764 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
4771 hci_conn_hold(conn);
4772 cmd->user_data = conn;
4774 conn->conn_info_timestamp = jiffies;
4776 /* Cache is valid, just reply with values cached in hci_conn */
4777 rp.rssi = conn->rssi;
4778 rp.tx_power = conn->tx_power;
4779 rp.max_tx_power = conn->max_tx_power;
4781 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4782 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4786 hci_dev_unlock(hdev);
4790 static const struct mgmt_handler {
4791 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4795 } mgmt_handlers[] = {
4796 { NULL }, /* 0x0000 (no command) */
4797 { read_version, false, MGMT_READ_VERSION_SIZE },
4798 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
4799 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
4800 { read_controller_info, false, MGMT_READ_INFO_SIZE },
4801 { set_powered, false, MGMT_SETTING_SIZE },
4802 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
4803 { set_connectable, false, MGMT_SETTING_SIZE },
4804 { set_fast_connectable, false, MGMT_SETTING_SIZE },
4805 { set_pairable, false, MGMT_SETTING_SIZE },
4806 { set_link_security, false, MGMT_SETTING_SIZE },
4807 { set_ssp, false, MGMT_SETTING_SIZE },
4808 { set_hs, false, MGMT_SETTING_SIZE },
4809 { set_le, false, MGMT_SETTING_SIZE },
4810 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
4811 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
4812 { add_uuid, false, MGMT_ADD_UUID_SIZE },
4813 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
4814 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
4815 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4816 { disconnect, false, MGMT_DISCONNECT_SIZE },
4817 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
4818 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
4819 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4820 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
4821 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
4822 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4823 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
4824 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
4825 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4826 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
4827 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4828 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4829 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4830 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4831 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
4832 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
4833 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
4834 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
4835 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
4836 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
4837 { set_advertising, false, MGMT_SETTING_SIZE },
4838 { set_bredr, false, MGMT_SETTING_SIZE },
4839 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
4840 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
4841 { set_secure_conn, false, MGMT_SETTING_SIZE },
4842 { set_debug_keys, false, MGMT_SETTING_SIZE },
4843 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
4844 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
4845 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
4849 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4853 struct mgmt_hdr *hdr;
4854 u16 opcode, index, len;
4855 struct hci_dev *hdev = NULL;
4856 const struct mgmt_handler *handler;
4859 BT_DBG("got %zu bytes", msglen);
4861 if (msglen < sizeof(*hdr))
4864 buf = kmalloc(msglen, GFP_KERNEL);
4868 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4874 opcode = __le16_to_cpu(hdr->opcode);
4875 index = __le16_to_cpu(hdr->index);
4876 len = __le16_to_cpu(hdr->len);
4878 if (len != msglen - sizeof(*hdr)) {
4883 if (index != MGMT_INDEX_NONE) {
4884 hdev = hci_dev_get(index);
4886 err = cmd_status(sk, index, opcode,
4887 MGMT_STATUS_INVALID_INDEX);
4891 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4892 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4893 err = cmd_status(sk, index, opcode,
4894 MGMT_STATUS_INVALID_INDEX);
4899 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4900 mgmt_handlers[opcode].func == NULL) {
4901 BT_DBG("Unknown op %u", opcode);
4902 err = cmd_status(sk, index, opcode,
4903 MGMT_STATUS_UNKNOWN_COMMAND);
4907 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4908 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4909 err = cmd_status(sk, index, opcode,
4910 MGMT_STATUS_INVALID_INDEX);
4914 handler = &mgmt_handlers[opcode];
4916 if ((handler->var_len && len < handler->data_len) ||
4917 (!handler->var_len && len != handler->data_len)) {
4918 err = cmd_status(sk, index, opcode,
4919 MGMT_STATUS_INVALID_PARAMS);
4924 mgmt_init_hdev(sk, hdev);
4926 cp = buf + sizeof(*hdr);
4928 err = handler->func(sk, hdev, cp, len);
4942 void mgmt_index_added(struct hci_dev *hdev)
4944 if (hdev->dev_type != HCI_BREDR)
4947 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4950 void mgmt_index_removed(struct hci_dev *hdev)
4952 u8 status = MGMT_STATUS_INVALID_INDEX;
4954 if (hdev->dev_type != HCI_BREDR)
4957 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4959 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4962 /* This function requires the caller holds hdev->lock */
4963 static void restart_le_auto_conns(struct hci_dev *hdev)
4965 struct hci_conn_params *p;
4967 list_for_each_entry(p, &hdev->le_conn_params, list) {
4968 if (p->auto_connect == HCI_AUTO_CONN_ALWAYS)
4969 hci_pend_le_conn_add(hdev, &p->addr, p->addr_type);
4973 static void powered_complete(struct hci_dev *hdev, u8 status)
4975 struct cmd_lookup match = { NULL, hdev };
4977 BT_DBG("status 0x%02x", status);
4981 restart_le_auto_conns(hdev);
4983 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4985 new_settings(hdev, match.sk);
4987 hci_dev_unlock(hdev);
4993 static int powered_update_hci(struct hci_dev *hdev)
4995 struct hci_request req;
4998 hci_req_init(&req, hdev);
5000 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
5001 !lmp_host_ssp_capable(hdev)) {
5004 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
5007 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
5008 lmp_bredr_capable(hdev)) {
5009 struct hci_cp_write_le_host_supported cp;
5012 cp.simul = lmp_le_br_capable(hdev);
5014 /* Check first if we already have the right
5015 * host state (host features set)
5017 if (cp.le != lmp_host_le_capable(hdev) ||
5018 cp.simul != lmp_host_le_br_capable(hdev))
5019 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5023 if (lmp_le_capable(hdev)) {
5024 /* Make sure the controller has a good default for
5025 * advertising data. This also applies to the case
5026 * where BR/EDR was toggled during the AUTO_OFF phase.
5028 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5029 update_adv_data(&req);
5030 update_scan_rsp_data(&req);
5033 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5034 enable_advertising(&req);
5037 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5038 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5039 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5040 sizeof(link_sec), &link_sec);
5042 if (lmp_bredr_capable(hdev)) {
5043 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5044 set_bredr_scan(&req);
5050 return hci_req_run(&req, powered_complete);
5053 int mgmt_powered(struct hci_dev *hdev, u8 powered)
5055 struct cmd_lookup match = { NULL, hdev };
5056 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
5057 u8 zero_cod[] = { 0, 0, 0 };
5060 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
5064 if (powered_update_hci(hdev) == 0)
5067 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
5072 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5073 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
5075 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
5076 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
5077 zero_cod, sizeof(zero_cod), NULL);
5080 err = new_settings(hdev, match.sk);
5088 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
5090 struct pending_cmd *cmd;
5093 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5097 if (err == -ERFKILL)
5098 status = MGMT_STATUS_RFKILLED;
5100 status = MGMT_STATUS_FAILED;
5102 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
5104 mgmt_pending_remove(cmd);
5107 void mgmt_discoverable_timeout(struct hci_dev *hdev)
5109 struct hci_request req;
5113 /* When discoverable timeout triggers, then just make sure
5114 * the limited discoverable flag is cleared. Even in the case
5115 * of a timeout triggered from general discoverable, it is
5116 * safe to unconditionally clear the flag.
5118 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5119 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5121 hci_req_init(&req, hdev);
5122 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
5123 u8 scan = SCAN_PAGE;
5124 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
5125 sizeof(scan), &scan);
5128 update_adv_data(&req);
5129 hci_req_run(&req, NULL);
5131 hdev->discov_timeout = 0;
5133 new_settings(hdev, NULL);
5135 hci_dev_unlock(hdev);
5138 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
5142 /* Nothing needed here if there's a pending command since that
5143 * commands request completion callback takes care of everything
5146 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
5149 /* Powering off may clear the scan mode - don't let that interfere */
5150 if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5154 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5156 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5157 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5161 struct hci_request req;
5163 /* In case this change in discoverable was triggered by
5164 * a disabling of connectable there could be a need to
5165 * update the advertising flags.
5167 hci_req_init(&req, hdev);
5168 update_adv_data(&req);
5169 hci_req_run(&req, NULL);
5171 new_settings(hdev, NULL);
5175 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
5179 /* Nothing needed here if there's a pending command since that
5180 * commands request completion callback takes care of everything
5183 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
5186 /* Powering off may clear the scan mode - don't let that interfere */
5187 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5191 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5193 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5196 new_settings(hdev, NULL);
5199 void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
5201 /* Powering off may stop advertising - don't let that interfere */
5202 if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5206 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
5208 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5211 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
5213 u8 mgmt_err = mgmt_status(status);
5215 if (scan & SCAN_PAGE)
5216 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
5217 cmd_status_rsp, &mgmt_err);
5219 if (scan & SCAN_INQUIRY)
5220 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
5221 cmd_status_rsp, &mgmt_err);
5224 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
5227 struct mgmt_ev_new_link_key ev;
5229 memset(&ev, 0, sizeof(ev));
5231 ev.store_hint = persistent;
5232 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5233 ev.key.addr.type = BDADDR_BREDR;
5234 ev.key.type = key->type;
5235 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
5236 ev.key.pin_len = key->pin_len;
5238 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
5241 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
5243 if (ltk->authenticated)
5244 return MGMT_LTK_AUTHENTICATED;
5246 return MGMT_LTK_UNAUTHENTICATED;
5249 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
5251 struct mgmt_ev_new_long_term_key ev;
5253 memset(&ev, 0, sizeof(ev));
5255 /* Devices using resolvable or non-resolvable random addresses
5256 * without providing an indentity resolving key don't require
5257 * to store long term keys. Their addresses will change the
5260 * Only when a remote device provides an identity address
5261 * make sure the long term key is stored. If the remote
5262 * identity is known, the long term keys are internally
5263 * mapped to the identity address. So allow static random
5264 * and public addresses here.
5266 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5267 (key->bdaddr.b[5] & 0xc0) != 0xc0)
5268 ev.store_hint = 0x00;
5270 ev.store_hint = persistent;
5272 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5273 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
5274 ev.key.type = mgmt_ltk_type(key);
5275 ev.key.enc_size = key->enc_size;
5276 ev.key.ediv = key->ediv;
5277 ev.key.rand = key->rand;
5279 if (key->type == HCI_SMP_LTK)
5282 memcpy(ev.key.val, key->val, sizeof(key->val));
5284 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
5287 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
5289 struct mgmt_ev_new_irk ev;
5291 memset(&ev, 0, sizeof(ev));
5293 /* For identity resolving keys from devices that are already
5294 * using a public address or static random address, do not
5295 * ask for storing this key. The identity resolving key really
5296 * is only mandatory for devices using resovlable random
5299 * Storing all identity resolving keys has the downside that
5300 * they will be also loaded on next boot of they system. More
5301 * identity resolving keys, means more time during scanning is
5302 * needed to actually resolve these addresses.
5304 if (bacmp(&irk->rpa, BDADDR_ANY))
5305 ev.store_hint = 0x01;
5307 ev.store_hint = 0x00;
5309 bacpy(&ev.rpa, &irk->rpa);
5310 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
5311 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
5312 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
5314 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
5317 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
5320 struct mgmt_ev_new_csrk ev;
5322 memset(&ev, 0, sizeof(ev));
5324 /* Devices using resolvable or non-resolvable random addresses
5325 * without providing an indentity resolving key don't require
5326 * to store signature resolving keys. Their addresses will change
5327 * the next time around.
5329 * Only when a remote device provides an identity address
5330 * make sure the signature resolving key is stored. So allow
5331 * static random and public addresses here.
5333 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5334 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
5335 ev.store_hint = 0x00;
5337 ev.store_hint = persistent;
5339 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
5340 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
5341 ev.key.master = csrk->master;
5342 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
5344 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
5347 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
5350 eir[eir_len++] = sizeof(type) + data_len;
5351 eir[eir_len++] = type;
5352 memcpy(&eir[eir_len], data, data_len);
5353 eir_len += data_len;
5358 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5359 u8 addr_type, u32 flags, u8 *name, u8 name_len,
5363 struct mgmt_ev_device_connected *ev = (void *) buf;
5366 bacpy(&ev->addr.bdaddr, bdaddr);
5367 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5369 ev->flags = __cpu_to_le32(flags);
5372 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
5375 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
5376 eir_len = eir_append_data(ev->eir, eir_len,
5377 EIR_CLASS_OF_DEV, dev_class, 3);
5379 ev->eir_len = cpu_to_le16(eir_len);
5381 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
5382 sizeof(*ev) + eir_len, NULL);
5385 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
5387 struct mgmt_cp_disconnect *cp = cmd->param;
5388 struct sock **sk = data;
5389 struct mgmt_rp_disconnect rp;
5391 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5392 rp.addr.type = cp->addr.type;
5394 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
5400 mgmt_pending_remove(cmd);
5403 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
5405 struct hci_dev *hdev = data;
5406 struct mgmt_cp_unpair_device *cp = cmd->param;
5407 struct mgmt_rp_unpair_device rp;
5409 memset(&rp, 0, sizeof(rp));
5410 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5411 rp.addr.type = cp->addr.type;
5413 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
5415 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
5417 mgmt_pending_remove(cmd);
5420 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
5421 u8 link_type, u8 addr_type, u8 reason,
5422 bool mgmt_connected)
5424 struct mgmt_ev_device_disconnected ev;
5425 struct pending_cmd *power_off;
5426 struct sock *sk = NULL;
5428 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5430 struct mgmt_mode *cp = power_off->param;
5432 /* The connection is still in hci_conn_hash so test for 1
5433 * instead of 0 to know if this is the last one.
5435 if (!cp->val && hci_conn_count(hdev) == 1) {
5436 cancel_delayed_work(&hdev->power_off);
5437 queue_work(hdev->req_workqueue, &hdev->power_off.work);
5441 if (!mgmt_connected)
5444 if (link_type != ACL_LINK && link_type != LE_LINK)
5447 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
5449 bacpy(&ev.addr.bdaddr, bdaddr);
5450 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5453 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
5458 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5462 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
5463 u8 link_type, u8 addr_type, u8 status)
5465 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
5466 struct mgmt_cp_disconnect *cp;
5467 struct mgmt_rp_disconnect rp;
5468 struct pending_cmd *cmd;
5470 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5473 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
5479 if (bacmp(bdaddr, &cp->addr.bdaddr))
5482 if (cp->addr.type != bdaddr_type)
5485 bacpy(&rp.addr.bdaddr, bdaddr);
5486 rp.addr.type = bdaddr_type;
5488 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
5489 mgmt_status(status), &rp, sizeof(rp));
5491 mgmt_pending_remove(cmd);
5494 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5495 u8 addr_type, u8 status)
5497 struct mgmt_ev_connect_failed ev;
5498 struct pending_cmd *power_off;
5500 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5502 struct mgmt_mode *cp = power_off->param;
5504 /* The connection is still in hci_conn_hash so test for 1
5505 * instead of 0 to know if this is the last one.
5507 if (!cp->val && hci_conn_count(hdev) == 1) {
5508 cancel_delayed_work(&hdev->power_off);
5509 queue_work(hdev->req_workqueue, &hdev->power_off.work);
5513 bacpy(&ev.addr.bdaddr, bdaddr);
5514 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5515 ev.status = mgmt_status(status);
5517 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
5520 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
5522 struct mgmt_ev_pin_code_request ev;
5524 bacpy(&ev.addr.bdaddr, bdaddr);
5525 ev.addr.type = BDADDR_BREDR;
5528 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
5531 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5534 struct pending_cmd *cmd;
5535 struct mgmt_rp_pin_code_reply rp;
5537 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
5541 bacpy(&rp.addr.bdaddr, bdaddr);
5542 rp.addr.type = BDADDR_BREDR;
5544 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
5545 mgmt_status(status), &rp, sizeof(rp));
5547 mgmt_pending_remove(cmd);
5550 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5553 struct pending_cmd *cmd;
5554 struct mgmt_rp_pin_code_reply rp;
5556 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
5560 bacpy(&rp.addr.bdaddr, bdaddr);
5561 rp.addr.type = BDADDR_BREDR;
5563 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
5564 mgmt_status(status), &rp, sizeof(rp));
5566 mgmt_pending_remove(cmd);
5569 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5570 u8 link_type, u8 addr_type, u32 value,
5573 struct mgmt_ev_user_confirm_request ev;
5575 BT_DBG("%s", hdev->name);
5577 bacpy(&ev.addr.bdaddr, bdaddr);
5578 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5579 ev.confirm_hint = confirm_hint;
5580 ev.value = cpu_to_le32(value);
5582 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
5586 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5587 u8 link_type, u8 addr_type)
5589 struct mgmt_ev_user_passkey_request ev;
5591 BT_DBG("%s", hdev->name);
5593 bacpy(&ev.addr.bdaddr, bdaddr);
5594 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5596 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
5600 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5601 u8 link_type, u8 addr_type, u8 status,
5604 struct pending_cmd *cmd;
5605 struct mgmt_rp_user_confirm_reply rp;
5608 cmd = mgmt_pending_find(opcode, hdev);
5612 bacpy(&rp.addr.bdaddr, bdaddr);
5613 rp.addr.type = link_to_bdaddr(link_type, addr_type);
5614 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
5617 mgmt_pending_remove(cmd);
5622 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5623 u8 link_type, u8 addr_type, u8 status)
5625 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5626 status, MGMT_OP_USER_CONFIRM_REPLY);
5629 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5630 u8 link_type, u8 addr_type, u8 status)
5632 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5634 MGMT_OP_USER_CONFIRM_NEG_REPLY);
5637 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5638 u8 link_type, u8 addr_type, u8 status)
5640 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5641 status, MGMT_OP_USER_PASSKEY_REPLY);
5644 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5645 u8 link_type, u8 addr_type, u8 status)
5647 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5649 MGMT_OP_USER_PASSKEY_NEG_REPLY);
5652 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
5653 u8 link_type, u8 addr_type, u32 passkey,
5656 struct mgmt_ev_passkey_notify ev;
5658 BT_DBG("%s", hdev->name);
5660 bacpy(&ev.addr.bdaddr, bdaddr);
5661 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5662 ev.passkey = __cpu_to_le32(passkey);
5663 ev.entered = entered;
5665 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
5668 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5669 u8 addr_type, u8 status)
5671 struct mgmt_ev_auth_failed ev;
5673 bacpy(&ev.addr.bdaddr, bdaddr);
5674 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5675 ev.status = mgmt_status(status);
5677 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
5680 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
5682 struct cmd_lookup match = { NULL, hdev };
5686 u8 mgmt_err = mgmt_status(status);
5687 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
5688 cmd_status_rsp, &mgmt_err);
5692 if (test_bit(HCI_AUTH, &hdev->flags))
5693 changed = !test_and_set_bit(HCI_LINK_SECURITY,
5696 changed = test_and_clear_bit(HCI_LINK_SECURITY,
5699 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
5703 new_settings(hdev, match.sk);
5709 static void clear_eir(struct hci_request *req)
5711 struct hci_dev *hdev = req->hdev;
5712 struct hci_cp_write_eir cp;
5714 if (!lmp_ext_inq_capable(hdev))
5717 memset(hdev->eir, 0, sizeof(hdev->eir));
5719 memset(&cp, 0, sizeof(cp));
5721 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
5724 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5726 struct cmd_lookup match = { NULL, hdev };
5727 struct hci_request req;
5728 bool changed = false;
5731 u8 mgmt_err = mgmt_status(status);
5733 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
5734 &hdev->dev_flags)) {
5735 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5736 new_settings(hdev, NULL);
5739 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
5745 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5747 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5749 changed = test_and_clear_bit(HCI_HS_ENABLED,
5752 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5755 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
5758 new_settings(hdev, match.sk);
5763 hci_req_init(&req, hdev);
5765 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
5770 hci_req_run(&req, NULL);
5773 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5775 struct cmd_lookup match = { NULL, hdev };
5776 bool changed = false;
5779 u8 mgmt_err = mgmt_status(status);
5782 if (test_and_clear_bit(HCI_SC_ENABLED,
5784 new_settings(hdev, NULL);
5785 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5788 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5789 cmd_status_rsp, &mgmt_err);
5794 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5796 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5797 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5800 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5801 settings_rsp, &match);
5804 new_settings(hdev, match.sk);
5810 static void sk_lookup(struct pending_cmd *cmd, void *data)
5812 struct cmd_lookup *match = data;
5814 if (match->sk == NULL) {
5815 match->sk = cmd->sk;
5816 sock_hold(match->sk);
5820 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
5823 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
5825 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
5826 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
5827 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
5830 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
5837 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
5839 struct mgmt_cp_set_local_name ev;
5840 struct pending_cmd *cmd;
5845 memset(&ev, 0, sizeof(ev));
5846 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
5847 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
5849 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
5851 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
5853 /* If this is a HCI command related to powering on the
5854 * HCI dev don't send any mgmt signals.
5856 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5860 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
5861 cmd ? cmd->sk : NULL);
5864 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
5865 u8 *randomizer192, u8 *hash256,
5866 u8 *randomizer256, u8 status)
5868 struct pending_cmd *cmd;
5870 BT_DBG("%s status %u", hdev->name, status);
5872 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
5877 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5878 mgmt_status(status));
5880 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
5881 hash256 && randomizer256) {
5882 struct mgmt_rp_read_local_oob_ext_data rp;
5884 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
5885 memcpy(rp.randomizer192, randomizer192,
5886 sizeof(rp.randomizer192));
5888 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
5889 memcpy(rp.randomizer256, randomizer256,
5890 sizeof(rp.randomizer256));
5892 cmd_complete(cmd->sk, hdev->id,
5893 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5896 struct mgmt_rp_read_local_oob_data rp;
5898 memcpy(rp.hash, hash192, sizeof(rp.hash));
5899 memcpy(rp.randomizer, randomizer192,
5900 sizeof(rp.randomizer));
5902 cmd_complete(cmd->sk, hdev->id,
5903 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5908 mgmt_pending_remove(cmd);
5911 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5912 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
5913 u8 ssp, u8 *eir, u16 eir_len, u8 *scan_rsp,
5917 struct mgmt_ev_device_found *ev = (void *) buf;
5918 struct smp_irk *irk;
5921 if (!hci_discovery_active(hdev))
5924 /* Make sure that the buffer is big enough. The 5 extra bytes
5925 * are for the potential CoD field.
5927 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
5930 memset(buf, 0, sizeof(buf));
5932 irk = hci_get_irk(hdev, bdaddr, addr_type);
5934 bacpy(&ev->addr.bdaddr, &irk->bdaddr);
5935 ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
5937 bacpy(&ev->addr.bdaddr, bdaddr);
5938 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5943 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5945 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5948 memcpy(ev->eir, eir, eir_len);
5950 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
5951 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5954 if (scan_rsp_len > 0)
5955 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
5957 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
5958 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
5960 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5963 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5964 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
5966 struct mgmt_ev_device_found *ev;
5967 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
5970 ev = (struct mgmt_ev_device_found *) buf;
5972 memset(buf, 0, sizeof(buf));
5974 bacpy(&ev->addr.bdaddr, bdaddr);
5975 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5978 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
5981 ev->eir_len = cpu_to_le16(eir_len);
5983 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
5986 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
5988 struct mgmt_ev_discovering ev;
5989 struct pending_cmd *cmd;
5991 BT_DBG("%s discovering %u", hdev->name, discovering);
5994 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
5996 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5999 u8 type = hdev->discovery.type;
6001 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
6003 mgmt_pending_remove(cmd);
6006 memset(&ev, 0, sizeof(ev));
6007 ev.type = hdev->discovery.type;
6008 ev.discovering = discovering;
6010 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6013 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
6015 struct pending_cmd *cmd;
6016 struct mgmt_ev_device_blocked ev;
6018 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
6020 bacpy(&ev.addr.bdaddr, bdaddr);
6021 ev.addr.type = type;
6023 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
6024 cmd ? cmd->sk : NULL);
6027 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
6029 struct pending_cmd *cmd;
6030 struct mgmt_ev_device_unblocked ev;
6032 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
6034 bacpy(&ev.addr.bdaddr, bdaddr);
6035 ev.addr.type = type;
6037 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
6038 cmd ? cmd->sk : NULL);
6041 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6043 BT_DBG("%s status %u", hdev->name, status);
6045 /* Clear the advertising mgmt setting if we failed to re-enable it */
6047 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6048 new_settings(hdev, NULL);
6052 void mgmt_reenable_advertising(struct hci_dev *hdev)
6054 struct hci_request req;
6056 if (hci_conn_num(hdev, LE_LINK) > 0)
6059 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6062 hci_req_init(&req, hdev);
6063 enable_advertising(&req);
6065 /* If this fails we have no option but to let user space know
6066 * that we've disabled advertising.
6068 if (hci_req_run(&req, adv_enable_complete) < 0) {
6069 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6070 new_settings(hdev, NULL);