2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 8
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
87 MGMT_OP_GET_CONN_INFO,
88 MGMT_OP_GET_CLOCK_INFO,
90 MGMT_OP_REMOVE_DEVICE,
91 MGMT_OP_LOAD_CONN_PARAM,
92 MGMT_OP_READ_UNCONF_INDEX_LIST,
93 MGMT_OP_READ_CONFIG_INFO,
94 MGMT_OP_SET_EXTERNAL_CONFIG,
95 MGMT_OP_SET_PUBLIC_ADDRESS,
98 static const u16 mgmt_events[] = {
99 MGMT_EV_CONTROLLER_ERROR,
101 MGMT_EV_INDEX_REMOVED,
102 MGMT_EV_NEW_SETTINGS,
103 MGMT_EV_CLASS_OF_DEV_CHANGED,
104 MGMT_EV_LOCAL_NAME_CHANGED,
105 MGMT_EV_NEW_LINK_KEY,
106 MGMT_EV_NEW_LONG_TERM_KEY,
107 MGMT_EV_DEVICE_CONNECTED,
108 MGMT_EV_DEVICE_DISCONNECTED,
109 MGMT_EV_CONNECT_FAILED,
110 MGMT_EV_PIN_CODE_REQUEST,
111 MGMT_EV_USER_CONFIRM_REQUEST,
112 MGMT_EV_USER_PASSKEY_REQUEST,
114 MGMT_EV_DEVICE_FOUND,
116 MGMT_EV_DEVICE_BLOCKED,
117 MGMT_EV_DEVICE_UNBLOCKED,
118 MGMT_EV_DEVICE_UNPAIRED,
119 MGMT_EV_PASSKEY_NOTIFY,
122 MGMT_EV_DEVICE_ADDED,
123 MGMT_EV_DEVICE_REMOVED,
124 MGMT_EV_NEW_CONN_PARAM,
125 MGMT_EV_UNCONF_INDEX_ADDED,
126 MGMT_EV_UNCONF_INDEX_REMOVED,
127 MGMT_EV_NEW_CONFIG_OPTIONS,
130 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
133 struct list_head list;
141 /* HCI to MGMT error code conversion table */
142 static u8 mgmt_status_table[] = {
144 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
145 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
146 MGMT_STATUS_FAILED, /* Hardware Failure */
147 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
148 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
149 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
150 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
151 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
152 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
153 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
154 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
155 MGMT_STATUS_BUSY, /* Command Disallowed */
156 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
157 MGMT_STATUS_REJECTED, /* Rejected Security */
158 MGMT_STATUS_REJECTED, /* Rejected Personal */
159 MGMT_STATUS_TIMEOUT, /* Host Timeout */
160 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
161 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
162 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
163 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
164 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
165 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
166 MGMT_STATUS_BUSY, /* Repeated Attempts */
167 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
168 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
169 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
170 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
171 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
172 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
173 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
174 MGMT_STATUS_FAILED, /* Unspecified Error */
175 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
176 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
177 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
178 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
179 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
180 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
181 MGMT_STATUS_FAILED, /* Unit Link Key Used */
182 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
183 MGMT_STATUS_TIMEOUT, /* Instant Passed */
184 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
185 MGMT_STATUS_FAILED, /* Transaction Collision */
186 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
187 MGMT_STATUS_REJECTED, /* QoS Rejected */
188 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
189 MGMT_STATUS_REJECTED, /* Insufficient Security */
190 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
191 MGMT_STATUS_BUSY, /* Role Switch Pending */
192 MGMT_STATUS_FAILED, /* Slot Violation */
193 MGMT_STATUS_FAILED, /* Role Switch Failed */
194 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
195 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
196 MGMT_STATUS_BUSY, /* Host Busy Pairing */
197 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
198 MGMT_STATUS_BUSY, /* Controller Busy */
199 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
200 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
201 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
202 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
203 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
206 static u8 mgmt_status(u8 hci_status)
208 if (hci_status < ARRAY_SIZE(mgmt_status_table))
209 return mgmt_status_table[hci_status];
211 return MGMT_STATUS_FAILED;
214 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
215 struct sock *skip_sk)
218 struct mgmt_hdr *hdr;
220 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
224 hdr = (void *) skb_put(skb, sizeof(*hdr));
225 hdr->opcode = cpu_to_le16(event);
227 hdr->index = cpu_to_le16(hdev->id);
229 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
230 hdr->len = cpu_to_le16(data_len);
233 memcpy(skb_put(skb, data_len), data, data_len);
236 __net_timestamp(skb);
238 hci_send_to_control(skb, skip_sk);
244 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
247 struct mgmt_hdr *hdr;
248 struct mgmt_ev_cmd_status *ev;
251 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
253 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
257 hdr = (void *) skb_put(skb, sizeof(*hdr));
259 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
260 hdr->index = cpu_to_le16(index);
261 hdr->len = cpu_to_le16(sizeof(*ev));
263 ev = (void *) skb_put(skb, sizeof(*ev));
265 ev->opcode = cpu_to_le16(cmd);
267 err = sock_queue_rcv_skb(sk, skb);
274 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
275 void *rp, size_t rp_len)
278 struct mgmt_hdr *hdr;
279 struct mgmt_ev_cmd_complete *ev;
282 BT_DBG("sock %p", sk);
284 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
288 hdr = (void *) skb_put(skb, sizeof(*hdr));
290 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
291 hdr->index = cpu_to_le16(index);
292 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
294 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
295 ev->opcode = cpu_to_le16(cmd);
299 memcpy(ev->data, rp, rp_len);
301 err = sock_queue_rcv_skb(sk, skb);
308 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
311 struct mgmt_rp_read_version rp;
313 BT_DBG("sock %p", sk);
315 rp.version = MGMT_VERSION;
316 rp.revision = cpu_to_le16(MGMT_REVISION);
318 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
322 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
325 struct mgmt_rp_read_commands *rp;
326 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
327 const u16 num_events = ARRAY_SIZE(mgmt_events);
332 BT_DBG("sock %p", sk);
334 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
336 rp = kmalloc(rp_size, GFP_KERNEL);
340 rp->num_commands = cpu_to_le16(num_commands);
341 rp->num_events = cpu_to_le16(num_events);
343 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
344 put_unaligned_le16(mgmt_commands[i], opcode);
346 for (i = 0; i < num_events; i++, opcode++)
347 put_unaligned_le16(mgmt_events[i], opcode);
349 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
356 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
359 struct mgmt_rp_read_index_list *rp;
365 BT_DBG("sock %p", sk);
367 read_lock(&hci_dev_list_lock);
370 list_for_each_entry(d, &hci_dev_list, list) {
371 if (d->dev_type == HCI_BREDR &&
372 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
376 rp_len = sizeof(*rp) + (2 * count);
377 rp = kmalloc(rp_len, GFP_ATOMIC);
379 read_unlock(&hci_dev_list_lock);
384 list_for_each_entry(d, &hci_dev_list, list) {
385 if (test_bit(HCI_SETUP, &d->dev_flags) ||
386 test_bit(HCI_CONFIG, &d->dev_flags) ||
387 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
390 /* Devices marked as raw-only are neither configured
391 * nor unconfigured controllers.
393 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
396 if (d->dev_type == HCI_BREDR &&
397 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
398 rp->index[count++] = cpu_to_le16(d->id);
399 BT_DBG("Added hci%u", d->id);
403 rp->num_controllers = cpu_to_le16(count);
404 rp_len = sizeof(*rp) + (2 * count);
406 read_unlock(&hci_dev_list_lock);
408 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
416 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
417 void *data, u16 data_len)
419 struct mgmt_rp_read_unconf_index_list *rp;
425 BT_DBG("sock %p", sk);
427 read_lock(&hci_dev_list_lock);
430 list_for_each_entry(d, &hci_dev_list, list) {
431 if (d->dev_type == HCI_BREDR &&
432 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
436 rp_len = sizeof(*rp) + (2 * count);
437 rp = kmalloc(rp_len, GFP_ATOMIC);
439 read_unlock(&hci_dev_list_lock);
444 list_for_each_entry(d, &hci_dev_list, list) {
445 if (test_bit(HCI_SETUP, &d->dev_flags) ||
446 test_bit(HCI_CONFIG, &d->dev_flags) ||
447 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
450 /* Devices marked as raw-only are neither configured
451 * nor unconfigured controllers.
453 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
456 if (d->dev_type == HCI_BREDR &&
457 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
458 rp->index[count++] = cpu_to_le16(d->id);
459 BT_DBG("Added hci%u", d->id);
463 rp->num_controllers = cpu_to_le16(count);
464 rp_len = sizeof(*rp) + (2 * count);
466 read_unlock(&hci_dev_list_lock);
468 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
476 static bool is_configured(struct hci_dev *hdev)
478 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
479 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
482 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
483 !bacmp(&hdev->public_addr, BDADDR_ANY))
489 static __le32 get_missing_options(struct hci_dev *hdev)
493 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
494 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
495 options |= MGMT_OPTION_EXTERNAL_CONFIG;
497 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
498 !bacmp(&hdev->public_addr, BDADDR_ANY))
499 options |= MGMT_OPTION_PUBLIC_ADDRESS;
501 return cpu_to_le32(options);
504 static int new_options(struct hci_dev *hdev, struct sock *skip)
506 __le32 options = get_missing_options(hdev);
508 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
509 sizeof(options), skip);
512 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
514 __le32 options = get_missing_options(hdev);
516 return cmd_complete(sk, hdev->id, opcode, 0, &options,
520 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
521 void *data, u16 data_len)
523 struct mgmt_rp_read_config_info rp;
526 BT_DBG("sock %p %s", sk, hdev->name);
530 memset(&rp, 0, sizeof(rp));
531 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
533 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
534 options |= MGMT_OPTION_EXTERNAL_CONFIG;
536 if (hdev->set_bdaddr)
537 options |= MGMT_OPTION_PUBLIC_ADDRESS;
539 rp.supported_options = cpu_to_le32(options);
540 rp.missing_options = get_missing_options(hdev);
542 hci_dev_unlock(hdev);
544 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
548 static u32 get_supported_settings(struct hci_dev *hdev)
552 settings |= MGMT_SETTING_POWERED;
553 settings |= MGMT_SETTING_BONDABLE;
554 settings |= MGMT_SETTING_DEBUG_KEYS;
555 settings |= MGMT_SETTING_CONNECTABLE;
556 settings |= MGMT_SETTING_DISCOVERABLE;
558 if (lmp_bredr_capable(hdev)) {
559 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
560 settings |= MGMT_SETTING_FAST_CONNECTABLE;
561 settings |= MGMT_SETTING_BREDR;
562 settings |= MGMT_SETTING_LINK_SECURITY;
564 if (lmp_ssp_capable(hdev)) {
565 settings |= MGMT_SETTING_SSP;
566 settings |= MGMT_SETTING_HS;
569 if (lmp_sc_capable(hdev) ||
570 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
571 settings |= MGMT_SETTING_SECURE_CONN;
574 if (lmp_le_capable(hdev)) {
575 settings |= MGMT_SETTING_LE;
576 settings |= MGMT_SETTING_ADVERTISING;
577 settings |= MGMT_SETTING_SECURE_CONN;
578 settings |= MGMT_SETTING_PRIVACY;
581 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
583 settings |= MGMT_SETTING_CONFIGURATION;
588 static u32 get_current_settings(struct hci_dev *hdev)
592 if (hdev_is_powered(hdev))
593 settings |= MGMT_SETTING_POWERED;
595 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
596 settings |= MGMT_SETTING_CONNECTABLE;
598 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
599 settings |= MGMT_SETTING_FAST_CONNECTABLE;
601 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
602 settings |= MGMT_SETTING_DISCOVERABLE;
604 if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
605 settings |= MGMT_SETTING_BONDABLE;
607 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
608 settings |= MGMT_SETTING_BREDR;
610 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
611 settings |= MGMT_SETTING_LE;
613 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
614 settings |= MGMT_SETTING_LINK_SECURITY;
616 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
617 settings |= MGMT_SETTING_SSP;
619 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
620 settings |= MGMT_SETTING_HS;
622 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
623 settings |= MGMT_SETTING_ADVERTISING;
625 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
626 settings |= MGMT_SETTING_SECURE_CONN;
628 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
629 settings |= MGMT_SETTING_DEBUG_KEYS;
631 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
632 settings |= MGMT_SETTING_PRIVACY;
637 #define PNP_INFO_SVCLASS_ID 0x1200
639 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
641 u8 *ptr = data, *uuids_start = NULL;
642 struct bt_uuid *uuid;
647 list_for_each_entry(uuid, &hdev->uuids, list) {
650 if (uuid->size != 16)
653 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
657 if (uuid16 == PNP_INFO_SVCLASS_ID)
663 uuids_start[1] = EIR_UUID16_ALL;
667 /* Stop if not enough space to put next UUID */
668 if ((ptr - data) + sizeof(u16) > len) {
669 uuids_start[1] = EIR_UUID16_SOME;
673 *ptr++ = (uuid16 & 0x00ff);
674 *ptr++ = (uuid16 & 0xff00) >> 8;
675 uuids_start[0] += sizeof(uuid16);
681 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
683 u8 *ptr = data, *uuids_start = NULL;
684 struct bt_uuid *uuid;
689 list_for_each_entry(uuid, &hdev->uuids, list) {
690 if (uuid->size != 32)
696 uuids_start[1] = EIR_UUID32_ALL;
700 /* Stop if not enough space to put next UUID */
701 if ((ptr - data) + sizeof(u32) > len) {
702 uuids_start[1] = EIR_UUID32_SOME;
706 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
708 uuids_start[0] += sizeof(u32);
714 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
716 u8 *ptr = data, *uuids_start = NULL;
717 struct bt_uuid *uuid;
722 list_for_each_entry(uuid, &hdev->uuids, list) {
723 if (uuid->size != 128)
729 uuids_start[1] = EIR_UUID128_ALL;
733 /* Stop if not enough space to put next UUID */
734 if ((ptr - data) + 16 > len) {
735 uuids_start[1] = EIR_UUID128_SOME;
739 memcpy(ptr, uuid->uuid, 16);
741 uuids_start[0] += 16;
747 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
749 struct pending_cmd *cmd;
751 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
752 if (cmd->opcode == opcode)
759 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
760 struct hci_dev *hdev,
763 struct pending_cmd *cmd;
765 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
766 if (cmd->user_data != data)
768 if (cmd->opcode == opcode)
775 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
780 name_len = strlen(hdev->dev_name);
782 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
784 if (name_len > max_len) {
786 ptr[1] = EIR_NAME_SHORT;
788 ptr[1] = EIR_NAME_COMPLETE;
790 ptr[0] = name_len + 1;
792 memcpy(ptr + 2, hdev->dev_name, name_len);
794 ad_len += (name_len + 2);
795 ptr += (name_len + 2);
801 static void update_scan_rsp_data(struct hci_request *req)
803 struct hci_dev *hdev = req->hdev;
804 struct hci_cp_le_set_scan_rsp_data cp;
807 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
810 memset(&cp, 0, sizeof(cp));
812 len = create_scan_rsp_data(hdev, cp.data);
814 if (hdev->scan_rsp_data_len == len &&
815 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
818 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
819 hdev->scan_rsp_data_len = len;
823 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
826 static u8 get_adv_discov_flags(struct hci_dev *hdev)
828 struct pending_cmd *cmd;
830 /* If there's a pending mgmt command the flags will not yet have
831 * their final values, so check for this first.
833 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
835 struct mgmt_mode *cp = cmd->param;
837 return LE_AD_GENERAL;
838 else if (cp->val == 0x02)
839 return LE_AD_LIMITED;
841 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
842 return LE_AD_LIMITED;
843 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
844 return LE_AD_GENERAL;
850 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
852 u8 ad_len = 0, flags = 0;
854 flags |= get_adv_discov_flags(hdev);
856 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
857 flags |= LE_AD_NO_BREDR;
860 BT_DBG("adv flags 0x%02x", flags);
870 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
872 ptr[1] = EIR_TX_POWER;
873 ptr[2] = (u8) hdev->adv_tx_power;
882 static void update_adv_data(struct hci_request *req)
884 struct hci_dev *hdev = req->hdev;
885 struct hci_cp_le_set_adv_data cp;
888 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
891 memset(&cp, 0, sizeof(cp));
893 len = create_adv_data(hdev, cp.data);
895 if (hdev->adv_data_len == len &&
896 memcmp(cp.data, hdev->adv_data, len) == 0)
899 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
900 hdev->adv_data_len = len;
904 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
907 int mgmt_update_adv_data(struct hci_dev *hdev)
909 struct hci_request req;
911 hci_req_init(&req, hdev);
912 update_adv_data(&req);
914 return hci_req_run(&req, NULL);
917 static void create_eir(struct hci_dev *hdev, u8 *data)
922 name_len = strlen(hdev->dev_name);
928 ptr[1] = EIR_NAME_SHORT;
930 ptr[1] = EIR_NAME_COMPLETE;
932 /* EIR Data length */
933 ptr[0] = name_len + 1;
935 memcpy(ptr + 2, hdev->dev_name, name_len);
937 ptr += (name_len + 2);
940 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
942 ptr[1] = EIR_TX_POWER;
943 ptr[2] = (u8) hdev->inq_tx_power;
948 if (hdev->devid_source > 0) {
950 ptr[1] = EIR_DEVICE_ID;
952 put_unaligned_le16(hdev->devid_source, ptr + 2);
953 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
954 put_unaligned_le16(hdev->devid_product, ptr + 6);
955 put_unaligned_le16(hdev->devid_version, ptr + 8);
960 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
961 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
962 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
965 static void update_eir(struct hci_request *req)
967 struct hci_dev *hdev = req->hdev;
968 struct hci_cp_write_eir cp;
970 if (!hdev_is_powered(hdev))
973 if (!lmp_ext_inq_capable(hdev))
976 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
979 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
982 memset(&cp, 0, sizeof(cp));
984 create_eir(hdev, cp.data);
986 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
989 memcpy(hdev->eir, cp.data, sizeof(cp.data));
991 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
994 static u8 get_service_classes(struct hci_dev *hdev)
996 struct bt_uuid *uuid;
999 list_for_each_entry(uuid, &hdev->uuids, list)
1000 val |= uuid->svc_hint;
1005 static void update_class(struct hci_request *req)
1007 struct hci_dev *hdev = req->hdev;
1010 BT_DBG("%s", hdev->name);
1012 if (!hdev_is_powered(hdev))
1015 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1018 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1021 cod[0] = hdev->minor_class;
1022 cod[1] = hdev->major_class;
1023 cod[2] = get_service_classes(hdev);
1025 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1028 if (memcmp(cod, hdev->dev_class, 3) == 0)
1031 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1034 static bool get_connectable(struct hci_dev *hdev)
1036 struct pending_cmd *cmd;
1038 /* If there's a pending mgmt command the flag will not yet have
1039 * it's final value, so check for this first.
1041 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1043 struct mgmt_mode *cp = cmd->param;
1047 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1050 static void disable_advertising(struct hci_request *req)
1054 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1057 static void enable_advertising(struct hci_request *req)
1059 struct hci_dev *hdev = req->hdev;
1060 struct hci_cp_le_set_adv_param cp;
1061 u8 own_addr_type, enable = 0x01;
1064 if (hci_conn_num(hdev, LE_LINK) > 0)
1067 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1068 disable_advertising(req);
1070 /* Clear the HCI_LE_ADV bit temporarily so that the
1071 * hci_update_random_address knows that it's safe to go ahead
1072 * and write a new random address. The flag will be set back on
1073 * as soon as the SET_ADV_ENABLE HCI command completes.
1075 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1077 connectable = get_connectable(hdev);
1079 /* Set require_privacy to true only when non-connectable
1080 * advertising is used. In that case it is fine to use a
1081 * non-resolvable private address.
1083 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1086 memset(&cp, 0, sizeof(cp));
1087 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1088 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1089 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1090 cp.own_address_type = own_addr_type;
1091 cp.channel_map = hdev->le_adv_channel_map;
1093 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1095 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1098 static void service_cache_off(struct work_struct *work)
1100 struct hci_dev *hdev = container_of(work, struct hci_dev,
1101 service_cache.work);
1102 struct hci_request req;
1104 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1107 hci_req_init(&req, hdev);
1114 hci_dev_unlock(hdev);
1116 hci_req_run(&req, NULL);
1119 static void rpa_expired(struct work_struct *work)
1121 struct hci_dev *hdev = container_of(work, struct hci_dev,
1123 struct hci_request req;
1127 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1129 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1132 /* The generation of a new RPA and programming it into the
1133 * controller happens in the enable_advertising() function.
1135 hci_req_init(&req, hdev);
1136 enable_advertising(&req);
1137 hci_req_run(&req, NULL);
1140 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1142 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1145 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1146 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1148 /* Non-mgmt controlled devices get this bit set
1149 * implicitly so that pairing works for them, however
1150 * for mgmt we require user-space to explicitly enable
1153 clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1156 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1157 void *data, u16 data_len)
1159 struct mgmt_rp_read_info rp;
1161 BT_DBG("sock %p %s", sk, hdev->name);
1165 memset(&rp, 0, sizeof(rp));
1167 bacpy(&rp.bdaddr, &hdev->bdaddr);
1169 rp.version = hdev->hci_ver;
1170 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1172 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1173 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1175 memcpy(rp.dev_class, hdev->dev_class, 3);
1177 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1178 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1180 hci_dev_unlock(hdev);
1182 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1186 static void mgmt_pending_free(struct pending_cmd *cmd)
1193 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1194 struct hci_dev *hdev, void *data,
1197 struct pending_cmd *cmd;
1199 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1203 cmd->opcode = opcode;
1204 cmd->index = hdev->id;
1206 cmd->param = kmalloc(len, GFP_KERNEL);
1213 memcpy(cmd->param, data, len);
1218 list_add(&cmd->list, &hdev->mgmt_pending);
1223 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1224 void (*cb)(struct pending_cmd *cmd,
1228 struct pending_cmd *cmd, *tmp;
1230 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1231 if (opcode > 0 && cmd->opcode != opcode)
1238 static void mgmt_pending_remove(struct pending_cmd *cmd)
1240 list_del(&cmd->list);
1241 mgmt_pending_free(cmd);
1244 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1246 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1248 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1252 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1254 BT_DBG("%s status 0x%02x", hdev->name, status);
1256 if (hci_conn_count(hdev) == 0) {
1257 cancel_delayed_work(&hdev->power_off);
1258 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1262 static bool hci_stop_discovery(struct hci_request *req)
1264 struct hci_dev *hdev = req->hdev;
1265 struct hci_cp_remote_name_req_cancel cp;
1266 struct inquiry_entry *e;
1268 switch (hdev->discovery.state) {
1269 case DISCOVERY_FINDING:
1270 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1271 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1273 cancel_delayed_work(&hdev->le_scan_disable);
1274 hci_req_add_le_scan_disable(req);
1279 case DISCOVERY_RESOLVING:
1280 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1285 bacpy(&cp.bdaddr, &e->data.bdaddr);
1286 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1292 /* Passive scanning */
1293 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1294 hci_req_add_le_scan_disable(req);
1304 static int clean_up_hci_state(struct hci_dev *hdev)
1306 struct hci_request req;
1307 struct hci_conn *conn;
1308 bool discov_stopped;
1311 hci_req_init(&req, hdev);
1313 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1314 test_bit(HCI_PSCAN, &hdev->flags)) {
1316 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1319 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1320 disable_advertising(&req);
1322 discov_stopped = hci_stop_discovery(&req);
1324 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1325 struct hci_cp_disconnect dc;
1326 struct hci_cp_reject_conn_req rej;
1328 switch (conn->state) {
1331 dc.handle = cpu_to_le16(conn->handle);
1332 dc.reason = 0x15; /* Terminated due to Power Off */
1333 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1336 if (conn->type == LE_LINK)
1337 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1339 else if (conn->type == ACL_LINK)
1340 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1344 bacpy(&rej.bdaddr, &conn->dst);
1345 rej.reason = 0x15; /* Terminated due to Power Off */
1346 if (conn->type == ACL_LINK)
1347 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1349 else if (conn->type == SCO_LINK)
1350 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1356 err = hci_req_run(&req, clean_up_hci_complete);
1357 if (!err && discov_stopped)
1358 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1363 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1366 struct mgmt_mode *cp = data;
1367 struct pending_cmd *cmd;
1370 BT_DBG("request for %s", hdev->name);
1372 if (cp->val != 0x00 && cp->val != 0x01)
1373 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1374 MGMT_STATUS_INVALID_PARAMS);
1378 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1379 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1384 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1385 cancel_delayed_work(&hdev->power_off);
1388 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1390 err = mgmt_powered(hdev, 1);
1395 if (!!cp->val == hdev_is_powered(hdev)) {
1396 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1400 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1407 queue_work(hdev->req_workqueue, &hdev->power_on);
1410 /* Disconnect connections, stop scans, etc */
1411 err = clean_up_hci_state(hdev);
1413 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1414 HCI_POWER_OFF_TIMEOUT);
1416 /* ENODATA means there were no HCI commands queued */
1417 if (err == -ENODATA) {
1418 cancel_delayed_work(&hdev->power_off);
1419 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1425 hci_dev_unlock(hdev);
1429 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1433 ev = cpu_to_le32(get_current_settings(hdev));
1435 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1438 int mgmt_new_settings(struct hci_dev *hdev)
1440 return new_settings(hdev, NULL);
1445 struct hci_dev *hdev;
1449 static void settings_rsp(struct pending_cmd *cmd, void *data)
1451 struct cmd_lookup *match = data;
1453 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1455 list_del(&cmd->list);
1457 if (match->sk == NULL) {
1458 match->sk = cmd->sk;
1459 sock_hold(match->sk);
1462 mgmt_pending_free(cmd);
1465 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1469 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1470 mgmt_pending_remove(cmd);
1473 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1475 if (!lmp_bredr_capable(hdev))
1476 return MGMT_STATUS_NOT_SUPPORTED;
1477 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1478 return MGMT_STATUS_REJECTED;
1480 return MGMT_STATUS_SUCCESS;
1483 static u8 mgmt_le_support(struct hci_dev *hdev)
1485 if (!lmp_le_capable(hdev))
1486 return MGMT_STATUS_NOT_SUPPORTED;
1487 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1488 return MGMT_STATUS_REJECTED;
1490 return MGMT_STATUS_SUCCESS;
1493 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1495 struct pending_cmd *cmd;
1496 struct mgmt_mode *cp;
1497 struct hci_request req;
1500 BT_DBG("status 0x%02x", status);
1504 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1509 u8 mgmt_err = mgmt_status(status);
1510 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1511 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1517 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1520 if (hdev->discov_timeout > 0) {
1521 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1522 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1526 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1530 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1533 new_settings(hdev, cmd->sk);
1535 /* When the discoverable mode gets changed, make sure
1536 * that class of device has the limited discoverable
1537 * bit correctly set. Also update page scan based on whitelist
1540 hci_req_init(&req, hdev);
1541 hci_update_page_scan(hdev, &req);
1543 hci_req_run(&req, NULL);
1546 mgmt_pending_remove(cmd);
1549 hci_dev_unlock(hdev);
1552 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1555 struct mgmt_cp_set_discoverable *cp = data;
1556 struct pending_cmd *cmd;
1557 struct hci_request req;
1562 BT_DBG("request for %s", hdev->name);
1564 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1565 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1566 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1567 MGMT_STATUS_REJECTED);
1569 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1570 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1571 MGMT_STATUS_INVALID_PARAMS);
1573 timeout = __le16_to_cpu(cp->timeout);
1575 /* Disabling discoverable requires that no timeout is set,
1576 * and enabling limited discoverable requires a timeout.
1578 if ((cp->val == 0x00 && timeout > 0) ||
1579 (cp->val == 0x02 && timeout == 0))
1580 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1581 MGMT_STATUS_INVALID_PARAMS);
1585 if (!hdev_is_powered(hdev) && timeout > 0) {
1586 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1587 MGMT_STATUS_NOT_POWERED);
1591 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1592 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1593 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1598 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1599 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1600 MGMT_STATUS_REJECTED);
1604 if (!hdev_is_powered(hdev)) {
1605 bool changed = false;
1607 /* Setting limited discoverable when powered off is
1608 * not a valid operation since it requires a timeout
1609 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1611 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1612 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1616 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1621 err = new_settings(hdev, sk);
1626 /* If the current mode is the same, then just update the timeout
1627 * value with the new value. And if only the timeout gets updated,
1628 * then no need for any HCI transactions.
1630 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1631 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1632 &hdev->dev_flags)) {
1633 cancel_delayed_work(&hdev->discov_off);
1634 hdev->discov_timeout = timeout;
1636 if (cp->val && hdev->discov_timeout > 0) {
1637 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1638 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1642 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1646 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1652 /* Cancel any potential discoverable timeout that might be
1653 * still active and store new timeout value. The arming of
1654 * the timeout happens in the complete handler.
1656 cancel_delayed_work(&hdev->discov_off);
1657 hdev->discov_timeout = timeout;
1659 /* Limited discoverable mode */
1660 if (cp->val == 0x02)
1661 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1663 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1665 hci_req_init(&req, hdev);
1667 /* The procedure for LE-only controllers is much simpler - just
1668 * update the advertising data.
1670 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1676 struct hci_cp_write_current_iac_lap hci_cp;
1678 if (cp->val == 0x02) {
1679 /* Limited discoverable mode */
1680 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1681 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1682 hci_cp.iac_lap[1] = 0x8b;
1683 hci_cp.iac_lap[2] = 0x9e;
1684 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1685 hci_cp.iac_lap[4] = 0x8b;
1686 hci_cp.iac_lap[5] = 0x9e;
1688 /* General discoverable mode */
1690 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1691 hci_cp.iac_lap[1] = 0x8b;
1692 hci_cp.iac_lap[2] = 0x9e;
1695 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1696 (hci_cp.num_iac * 3) + 1, &hci_cp);
1698 scan |= SCAN_INQUIRY;
1700 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1703 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1706 update_adv_data(&req);
1708 err = hci_req_run(&req, set_discoverable_complete);
1710 mgmt_pending_remove(cmd);
1713 hci_dev_unlock(hdev);
1717 static void write_fast_connectable(struct hci_request *req, bool enable)
1719 struct hci_dev *hdev = req->hdev;
1720 struct hci_cp_write_page_scan_activity acp;
1723 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1726 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1730 type = PAGE_SCAN_TYPE_INTERLACED;
1732 /* 160 msec page scan interval */
1733 acp.interval = cpu_to_le16(0x0100);
1735 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1737 /* default 1.28 sec page scan */
1738 acp.interval = cpu_to_le16(0x0800);
1741 acp.window = cpu_to_le16(0x0012);
1743 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1744 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1745 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1748 if (hdev->page_scan_type != type)
1749 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1752 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1754 struct pending_cmd *cmd;
1755 struct mgmt_mode *cp;
1756 bool conn_changed, discov_changed;
1758 BT_DBG("status 0x%02x", status);
1762 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1767 u8 mgmt_err = mgmt_status(status);
1768 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1774 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1776 discov_changed = false;
1778 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1780 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1784 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1786 if (conn_changed || discov_changed) {
1787 new_settings(hdev, cmd->sk);
1788 hci_update_page_scan(hdev, NULL);
1790 mgmt_update_adv_data(hdev);
1791 hci_update_background_scan(hdev);
1795 mgmt_pending_remove(cmd);
1798 hci_dev_unlock(hdev);
1801 static int set_connectable_update_settings(struct hci_dev *hdev,
1802 struct sock *sk, u8 val)
1804 bool changed = false;
1807 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1811 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1813 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1814 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1817 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1822 hci_update_page_scan(hdev, NULL);
1823 hci_update_background_scan(hdev);
1824 return new_settings(hdev, sk);
1830 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1833 struct mgmt_mode *cp = data;
1834 struct pending_cmd *cmd;
1835 struct hci_request req;
1839 BT_DBG("request for %s", hdev->name);
1841 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1842 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1843 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1844 MGMT_STATUS_REJECTED);
1846 if (cp->val != 0x00 && cp->val != 0x01)
1847 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1848 MGMT_STATUS_INVALID_PARAMS);
1852 if (!hdev_is_powered(hdev)) {
1853 err = set_connectable_update_settings(hdev, sk, cp->val);
1857 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1858 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1859 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1864 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1870 hci_req_init(&req, hdev);
1872 /* If BR/EDR is not enabled and we disable advertising as a
1873 * by-product of disabling connectable, we need to update the
1874 * advertising flags.
1876 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1878 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1879 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1881 update_adv_data(&req);
1882 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1886 /* If we don't have any whitelist entries just
1887 * disable all scanning. If there are entries
1888 * and we had both page and inquiry scanning
1889 * enabled then fall back to only page scanning.
1890 * Otherwise no changes are needed.
1892 if (list_empty(&hdev->whitelist))
1893 scan = SCAN_DISABLED;
1894 else if (test_bit(HCI_ISCAN, &hdev->flags))
1897 goto no_scan_update;
1899 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1900 hdev->discov_timeout > 0)
1901 cancel_delayed_work(&hdev->discov_off);
1904 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1908 /* If we're going from non-connectable to connectable or
1909 * vice-versa when fast connectable is enabled ensure that fast
1910 * connectable gets disabled. write_fast_connectable won't do
1911 * anything if the page scan parameters are already what they
1914 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1915 write_fast_connectable(&req, false);
1917 /* Update the advertising parameters if necessary */
1918 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1919 enable_advertising(&req);
1921 err = hci_req_run(&req, set_connectable_complete);
1923 mgmt_pending_remove(cmd);
1924 if (err == -ENODATA)
1925 err = set_connectable_update_settings(hdev, sk,
1931 hci_dev_unlock(hdev);
1935 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1938 struct mgmt_mode *cp = data;
1942 BT_DBG("request for %s", hdev->name);
1944 if (cp->val != 0x00 && cp->val != 0x01)
1945 return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1946 MGMT_STATUS_INVALID_PARAMS);
1951 changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
1953 changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1955 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1960 err = new_settings(hdev, sk);
1963 hci_dev_unlock(hdev);
1967 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1970 struct mgmt_mode *cp = data;
1971 struct pending_cmd *cmd;
1975 BT_DBG("request for %s", hdev->name);
1977 status = mgmt_bredr_support(hdev);
1979 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1982 if (cp->val != 0x00 && cp->val != 0x01)
1983 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1984 MGMT_STATUS_INVALID_PARAMS);
1988 if (!hdev_is_powered(hdev)) {
1989 bool changed = false;
1991 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1992 &hdev->dev_flags)) {
1993 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1997 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2002 err = new_settings(hdev, sk);
2007 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2008 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2015 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2016 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2020 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2026 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2028 mgmt_pending_remove(cmd);
2033 hci_dev_unlock(hdev);
2037 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2039 struct mgmt_mode *cp = data;
2040 struct pending_cmd *cmd;
2044 BT_DBG("request for %s", hdev->name);
2046 status = mgmt_bredr_support(hdev);
2048 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2050 if (!lmp_ssp_capable(hdev))
2051 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2052 MGMT_STATUS_NOT_SUPPORTED);
2054 if (cp->val != 0x00 && cp->val != 0x01)
2055 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2056 MGMT_STATUS_INVALID_PARAMS);
2060 if (!hdev_is_powered(hdev)) {
2064 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2067 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2070 changed = test_and_clear_bit(HCI_HS_ENABLED,
2073 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2076 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2081 err = new_settings(hdev, sk);
2086 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2087 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2088 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2093 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2094 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2098 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2104 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2105 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2106 sizeof(cp->val), &cp->val);
2108 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2110 mgmt_pending_remove(cmd);
2115 hci_dev_unlock(hdev);
2119 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2121 struct mgmt_mode *cp = data;
2126 BT_DBG("request for %s", hdev->name);
2128 status = mgmt_bredr_support(hdev);
2130 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2132 if (!lmp_ssp_capable(hdev))
2133 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2134 MGMT_STATUS_NOT_SUPPORTED);
2136 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2137 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2138 MGMT_STATUS_REJECTED);
2140 if (cp->val != 0x00 && cp->val != 0x01)
2141 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2142 MGMT_STATUS_INVALID_PARAMS);
2147 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2149 if (hdev_is_powered(hdev)) {
2150 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2151 MGMT_STATUS_REJECTED);
2155 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2158 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2163 err = new_settings(hdev, sk);
2166 hci_dev_unlock(hdev);
2170 static void le_enable_complete(struct hci_dev *hdev, u8 status)
2172 struct cmd_lookup match = { NULL, hdev };
2175 u8 mgmt_err = mgmt_status(status);
2177 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2182 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2184 new_settings(hdev, match.sk);
2189 /* Make sure the controller has a good default for
2190 * advertising data. Restrict the update to when LE
2191 * has actually been enabled. During power on, the
2192 * update in powered_update_hci will take care of it.
2194 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2195 struct hci_request req;
2199 hci_req_init(&req, hdev);
2200 update_adv_data(&req);
2201 update_scan_rsp_data(&req);
2202 hci_req_run(&req, NULL);
2204 hci_update_background_scan(hdev);
2206 hci_dev_unlock(hdev);
2210 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2212 struct mgmt_mode *cp = data;
2213 struct hci_cp_write_le_host_supported hci_cp;
2214 struct pending_cmd *cmd;
2215 struct hci_request req;
2219 BT_DBG("request for %s", hdev->name);
2221 if (!lmp_le_capable(hdev))
2222 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2223 MGMT_STATUS_NOT_SUPPORTED);
2225 if (cp->val != 0x00 && cp->val != 0x01)
2226 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2227 MGMT_STATUS_INVALID_PARAMS);
2229 /* LE-only devices do not allow toggling LE on/off */
2230 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2231 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2232 MGMT_STATUS_REJECTED);
2237 enabled = lmp_host_le_capable(hdev);
2239 if (!hdev_is_powered(hdev) || val == enabled) {
2240 bool changed = false;
2242 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2243 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2247 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2248 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2252 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2257 err = new_settings(hdev, sk);
2262 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2263 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2264 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2269 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2275 hci_req_init(&req, hdev);
2277 memset(&hci_cp, 0, sizeof(hci_cp));
2281 hci_cp.simul = 0x00;
2283 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2284 disable_advertising(&req);
2287 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2290 err = hci_req_run(&req, le_enable_complete);
2292 mgmt_pending_remove(cmd);
2295 hci_dev_unlock(hdev);
2299 /* This is a helper function to test for pending mgmt commands that can
2300 * cause CoD or EIR HCI commands. We can only allow one such pending
2301 * mgmt command at a time since otherwise we cannot easily track what
2302 * the current values are, will be, and based on that calculate if a new
2303 * HCI command needs to be sent and if yes with what value.
2305 static bool pending_eir_or_class(struct hci_dev *hdev)
2307 struct pending_cmd *cmd;
2309 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2310 switch (cmd->opcode) {
2311 case MGMT_OP_ADD_UUID:
2312 case MGMT_OP_REMOVE_UUID:
2313 case MGMT_OP_SET_DEV_CLASS:
2314 case MGMT_OP_SET_POWERED:
2322 static const u8 bluetooth_base_uuid[] = {
2323 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2324 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2327 static u8 get_uuid_size(const u8 *uuid)
2331 if (memcmp(uuid, bluetooth_base_uuid, 12))
2334 val = get_unaligned_le32(&uuid[12]);
2341 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2343 struct pending_cmd *cmd;
2347 cmd = mgmt_pending_find(mgmt_op, hdev);
2351 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2352 hdev->dev_class, 3);
2354 mgmt_pending_remove(cmd);
2357 hci_dev_unlock(hdev);
2360 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2362 BT_DBG("status 0x%02x", status);
2364 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2367 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2369 struct mgmt_cp_add_uuid *cp = data;
2370 struct pending_cmd *cmd;
2371 struct hci_request req;
2372 struct bt_uuid *uuid;
2375 BT_DBG("request for %s", hdev->name);
2379 if (pending_eir_or_class(hdev)) {
2380 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2385 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2391 memcpy(uuid->uuid, cp->uuid, 16);
2392 uuid->svc_hint = cp->svc_hint;
2393 uuid->size = get_uuid_size(cp->uuid);
2395 list_add_tail(&uuid->list, &hdev->uuids);
2397 hci_req_init(&req, hdev);
2402 err = hci_req_run(&req, add_uuid_complete);
2404 if (err != -ENODATA)
2407 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2408 hdev->dev_class, 3);
2412 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2421 hci_dev_unlock(hdev);
2425 static bool enable_service_cache(struct hci_dev *hdev)
2427 if (!hdev_is_powered(hdev))
2430 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2431 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2439 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2441 BT_DBG("status 0x%02x", status);
2443 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2446 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2449 struct mgmt_cp_remove_uuid *cp = data;
2450 struct pending_cmd *cmd;
2451 struct bt_uuid *match, *tmp;
2452 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2453 struct hci_request req;
2456 BT_DBG("request for %s", hdev->name);
2460 if (pending_eir_or_class(hdev)) {
2461 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2466 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2467 hci_uuids_clear(hdev);
2469 if (enable_service_cache(hdev)) {
2470 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2471 0, hdev->dev_class, 3);
2480 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2481 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2484 list_del(&match->list);
2490 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2491 MGMT_STATUS_INVALID_PARAMS);
2496 hci_req_init(&req, hdev);
2501 err = hci_req_run(&req, remove_uuid_complete);
2503 if (err != -ENODATA)
2506 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2507 hdev->dev_class, 3);
2511 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2520 hci_dev_unlock(hdev);
2524 static void set_class_complete(struct hci_dev *hdev, u8 status)
2526 BT_DBG("status 0x%02x", status);
2528 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2531 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2534 struct mgmt_cp_set_dev_class *cp = data;
2535 struct pending_cmd *cmd;
2536 struct hci_request req;
2539 BT_DBG("request for %s", hdev->name);
2541 if (!lmp_bredr_capable(hdev))
2542 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2543 MGMT_STATUS_NOT_SUPPORTED);
2547 if (pending_eir_or_class(hdev)) {
2548 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2553 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2554 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2555 MGMT_STATUS_INVALID_PARAMS);
2559 hdev->major_class = cp->major;
2560 hdev->minor_class = cp->minor;
2562 if (!hdev_is_powered(hdev)) {
2563 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2564 hdev->dev_class, 3);
2568 hci_req_init(&req, hdev);
2570 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2571 hci_dev_unlock(hdev);
2572 cancel_delayed_work_sync(&hdev->service_cache);
2579 err = hci_req_run(&req, set_class_complete);
2581 if (err != -ENODATA)
2584 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2585 hdev->dev_class, 3);
2589 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2598 hci_dev_unlock(hdev);
2602 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2605 struct mgmt_cp_load_link_keys *cp = data;
2606 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2607 sizeof(struct mgmt_link_key_info));
2608 u16 key_count, expected_len;
2612 BT_DBG("request for %s", hdev->name);
2614 if (!lmp_bredr_capable(hdev))
2615 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2616 MGMT_STATUS_NOT_SUPPORTED);
2618 key_count = __le16_to_cpu(cp->key_count);
2619 if (key_count > max_key_count) {
2620 BT_ERR("load_link_keys: too big key_count value %u",
2622 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2623 MGMT_STATUS_INVALID_PARAMS);
2626 expected_len = sizeof(*cp) + key_count *
2627 sizeof(struct mgmt_link_key_info);
2628 if (expected_len != len) {
2629 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2631 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2632 MGMT_STATUS_INVALID_PARAMS);
2635 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2636 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2637 MGMT_STATUS_INVALID_PARAMS);
2639 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2642 for (i = 0; i < key_count; i++) {
2643 struct mgmt_link_key_info *key = &cp->keys[i];
2645 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2646 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2647 MGMT_STATUS_INVALID_PARAMS);
2652 hci_link_keys_clear(hdev);
2655 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2658 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2662 new_settings(hdev, NULL);
2664 for (i = 0; i < key_count; i++) {
2665 struct mgmt_link_key_info *key = &cp->keys[i];
2667 /* Always ignore debug keys and require a new pairing if
2668 * the user wants to use them.
2670 if (key->type == HCI_LK_DEBUG_COMBINATION)
2673 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2674 key->type, key->pin_len, NULL);
2677 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2679 hci_dev_unlock(hdev);
2684 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2685 u8 addr_type, struct sock *skip_sk)
2687 struct mgmt_ev_device_unpaired ev;
2689 bacpy(&ev.addr.bdaddr, bdaddr);
2690 ev.addr.type = addr_type;
2692 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2696 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2699 struct mgmt_cp_unpair_device *cp = data;
2700 struct mgmt_rp_unpair_device rp;
2701 struct hci_cp_disconnect dc;
2702 struct pending_cmd *cmd;
2703 struct hci_conn *conn;
2706 memset(&rp, 0, sizeof(rp));
2707 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2708 rp.addr.type = cp->addr.type;
2710 if (!bdaddr_type_is_valid(cp->addr.type))
2711 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2712 MGMT_STATUS_INVALID_PARAMS,
2715 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2716 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2717 MGMT_STATUS_INVALID_PARAMS,
2722 if (!hdev_is_powered(hdev)) {
2723 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2724 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2728 if (cp->addr.type == BDADDR_BREDR) {
2729 /* If disconnection is requested, then look up the
2730 * connection. If the remote device is connected, it
2731 * will be later used to terminate the link.
2733 * Setting it to NULL explicitly will cause no
2734 * termination of the link.
2737 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2742 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2746 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2749 /* Defer clearing up the connection parameters
2750 * until closing to give a chance of keeping
2751 * them if a repairing happens.
2753 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2755 /* If disconnection is not requested, then
2756 * clear the connection variable so that the
2757 * link is not terminated.
2759 if (!cp->disconnect)
2763 if (cp->addr.type == BDADDR_LE_PUBLIC)
2764 addr_type = ADDR_LE_DEV_PUBLIC;
2766 addr_type = ADDR_LE_DEV_RANDOM;
2768 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2770 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2774 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2775 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2779 /* If the connection variable is set, then termination of the
2780 * link is requested.
2783 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2785 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2789 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2796 dc.handle = cpu_to_le16(conn->handle);
2797 dc.reason = 0x13; /* Remote User Terminated Connection */
2798 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2800 mgmt_pending_remove(cmd);
2803 hci_dev_unlock(hdev);
2807 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2810 struct mgmt_cp_disconnect *cp = data;
2811 struct mgmt_rp_disconnect rp;
2812 struct pending_cmd *cmd;
2813 struct hci_conn *conn;
2818 memset(&rp, 0, sizeof(rp));
2819 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2820 rp.addr.type = cp->addr.type;
2822 if (!bdaddr_type_is_valid(cp->addr.type))
2823 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2824 MGMT_STATUS_INVALID_PARAMS,
2829 if (!test_bit(HCI_UP, &hdev->flags)) {
2830 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2831 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2835 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2836 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2837 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2841 if (cp->addr.type == BDADDR_BREDR)
2842 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2845 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2847 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2848 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2849 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2853 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2859 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2861 mgmt_pending_remove(cmd);
2864 hci_dev_unlock(hdev);
2868 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2870 switch (link_type) {
2872 switch (addr_type) {
2873 case ADDR_LE_DEV_PUBLIC:
2874 return BDADDR_LE_PUBLIC;
2877 /* Fallback to LE Random address type */
2878 return BDADDR_LE_RANDOM;
2882 /* Fallback to BR/EDR type */
2883 return BDADDR_BREDR;
2887 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2890 struct mgmt_rp_get_connections *rp;
2900 if (!hdev_is_powered(hdev)) {
2901 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2902 MGMT_STATUS_NOT_POWERED);
2907 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2908 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2912 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2913 rp = kmalloc(rp_len, GFP_KERNEL);
2920 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2921 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2923 bacpy(&rp->addr[i].bdaddr, &c->dst);
2924 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2925 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2930 rp->conn_count = cpu_to_le16(i);
2932 /* Recalculate length in case of filtered SCO connections, etc */
2933 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2935 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2941 hci_dev_unlock(hdev);
2945 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2946 struct mgmt_cp_pin_code_neg_reply *cp)
2948 struct pending_cmd *cmd;
2951 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2956 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2957 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2959 mgmt_pending_remove(cmd);
2964 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2967 struct hci_conn *conn;
2968 struct mgmt_cp_pin_code_reply *cp = data;
2969 struct hci_cp_pin_code_reply reply;
2970 struct pending_cmd *cmd;
2977 if (!hdev_is_powered(hdev)) {
2978 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2979 MGMT_STATUS_NOT_POWERED);
2983 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2985 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2986 MGMT_STATUS_NOT_CONNECTED);
2990 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2991 struct mgmt_cp_pin_code_neg_reply ncp;
2993 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2995 BT_ERR("PIN code is not 16 bytes long");
2997 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2999 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3000 MGMT_STATUS_INVALID_PARAMS);
3005 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3011 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3012 reply.pin_len = cp->pin_len;
3013 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3015 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3017 mgmt_pending_remove(cmd);
3020 hci_dev_unlock(hdev);
3024 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3027 struct mgmt_cp_set_io_capability *cp = data;
3031 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3032 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3033 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3037 hdev->io_capability = cp->io_capability;
3039 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3040 hdev->io_capability);
3042 hci_dev_unlock(hdev);
3044 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
3048 static struct pending_cmd *find_pairing(struct hci_conn *conn)
3050 struct hci_dev *hdev = conn->hdev;
3051 struct pending_cmd *cmd;
3053 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3054 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3057 if (cmd->user_data != conn)
3066 static void pairing_complete(struct pending_cmd *cmd, u8 status)
3068 struct mgmt_rp_pair_device rp;
3069 struct hci_conn *conn = cmd->user_data;
3071 bacpy(&rp.addr.bdaddr, &conn->dst);
3072 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3074 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3077 /* So we don't get further callbacks for this connection */
3078 conn->connect_cfm_cb = NULL;
3079 conn->security_cfm_cb = NULL;
3080 conn->disconn_cfm_cb = NULL;
3082 hci_conn_drop(conn);
3085 mgmt_pending_remove(cmd);
3087 /* The device is paired so there is no need to remove
3088 * its connection parameters anymore.
3090 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3093 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3095 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3096 struct pending_cmd *cmd;
3098 cmd = find_pairing(conn);
3100 pairing_complete(cmd, status);
3103 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3105 struct pending_cmd *cmd;
3107 BT_DBG("status %u", status);
3109 cmd = find_pairing(conn);
3111 BT_DBG("Unable to find a pending command");
3113 pairing_complete(cmd, mgmt_status(status));
3116 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3118 struct pending_cmd *cmd;
3120 BT_DBG("status %u", status);
3125 cmd = find_pairing(conn);
3127 BT_DBG("Unable to find a pending command");
3129 pairing_complete(cmd, mgmt_status(status));
3132 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3135 struct mgmt_cp_pair_device *cp = data;
3136 struct mgmt_rp_pair_device rp;
3137 struct pending_cmd *cmd;
3138 u8 sec_level, auth_type;
3139 struct hci_conn *conn;
3144 memset(&rp, 0, sizeof(rp));
3145 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3146 rp.addr.type = cp->addr.type;
3148 if (!bdaddr_type_is_valid(cp->addr.type))
3149 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3150 MGMT_STATUS_INVALID_PARAMS,
3153 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3154 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3155 MGMT_STATUS_INVALID_PARAMS,
3160 if (!hdev_is_powered(hdev)) {
3161 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3162 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3166 sec_level = BT_SECURITY_MEDIUM;
3167 auth_type = HCI_AT_DEDICATED_BONDING;
3169 if (cp->addr.type == BDADDR_BREDR) {
3170 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3175 /* Convert from L2CAP channel address type to HCI address type
3177 if (cp->addr.type == BDADDR_LE_PUBLIC)
3178 addr_type = ADDR_LE_DEV_PUBLIC;
3180 addr_type = ADDR_LE_DEV_RANDOM;
3182 /* When pairing a new device, it is expected to remember
3183 * this device for future connections. Adding the connection
3184 * parameter information ahead of time allows tracking
3185 * of the slave preferred values and will speed up any
3186 * further connection establishment.
3188 * If connection parameters already exist, then they
3189 * will be kept and this function does nothing.
3191 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3193 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3194 sec_level, HCI_LE_CONN_TIMEOUT,
3201 if (PTR_ERR(conn) == -EBUSY)
3202 status = MGMT_STATUS_BUSY;
3204 status = MGMT_STATUS_CONNECT_FAILED;
3206 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3212 if (conn->connect_cfm_cb) {
3213 hci_conn_drop(conn);
3214 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3215 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3219 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3222 hci_conn_drop(conn);
3226 /* For LE, just connecting isn't a proof that the pairing finished */
3227 if (cp->addr.type == BDADDR_BREDR) {
3228 conn->connect_cfm_cb = pairing_complete_cb;
3229 conn->security_cfm_cb = pairing_complete_cb;
3230 conn->disconn_cfm_cb = pairing_complete_cb;
3232 conn->connect_cfm_cb = le_pairing_complete_cb;
3233 conn->security_cfm_cb = le_pairing_complete_cb;
3234 conn->disconn_cfm_cb = le_pairing_complete_cb;
3237 conn->io_capability = cp->io_cap;
3238 cmd->user_data = hci_conn_get(conn);
3240 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3241 hci_conn_security(conn, sec_level, auth_type, true))
3242 pairing_complete(cmd, 0);
3247 hci_dev_unlock(hdev);
3251 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3254 struct mgmt_addr_info *addr = data;
3255 struct pending_cmd *cmd;
3256 struct hci_conn *conn;
3263 if (!hdev_is_powered(hdev)) {
3264 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3265 MGMT_STATUS_NOT_POWERED);
3269 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3271 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3272 MGMT_STATUS_INVALID_PARAMS);
3276 conn = cmd->user_data;
3278 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3279 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3280 MGMT_STATUS_INVALID_PARAMS);
3284 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
3286 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3287 addr, sizeof(*addr));
3289 hci_dev_unlock(hdev);
3293 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3294 struct mgmt_addr_info *addr, u16 mgmt_op,
3295 u16 hci_op, __le32 passkey)
3297 struct pending_cmd *cmd;
3298 struct hci_conn *conn;
3303 if (!hdev_is_powered(hdev)) {
3304 err = cmd_complete(sk, hdev->id, mgmt_op,
3305 MGMT_STATUS_NOT_POWERED, addr,
3310 if (addr->type == BDADDR_BREDR)
3311 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3313 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3316 err = cmd_complete(sk, hdev->id, mgmt_op,
3317 MGMT_STATUS_NOT_CONNECTED, addr,
3322 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3323 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3325 err = cmd_complete(sk, hdev->id, mgmt_op,
3326 MGMT_STATUS_SUCCESS, addr,
3329 err = cmd_complete(sk, hdev->id, mgmt_op,
3330 MGMT_STATUS_FAILED, addr,
3336 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3342 /* Continue with pairing via HCI */
3343 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3344 struct hci_cp_user_passkey_reply cp;
3346 bacpy(&cp.bdaddr, &addr->bdaddr);
3347 cp.passkey = passkey;
3348 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3350 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3354 mgmt_pending_remove(cmd);
3357 hci_dev_unlock(hdev);
3361 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3362 void *data, u16 len)
3364 struct mgmt_cp_pin_code_neg_reply *cp = data;
3368 return user_pairing_resp(sk, hdev, &cp->addr,
3369 MGMT_OP_PIN_CODE_NEG_REPLY,
3370 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3373 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3376 struct mgmt_cp_user_confirm_reply *cp = data;
3380 if (len != sizeof(*cp))
3381 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3382 MGMT_STATUS_INVALID_PARAMS);
3384 return user_pairing_resp(sk, hdev, &cp->addr,
3385 MGMT_OP_USER_CONFIRM_REPLY,
3386 HCI_OP_USER_CONFIRM_REPLY, 0);
3389 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3390 void *data, u16 len)
3392 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3396 return user_pairing_resp(sk, hdev, &cp->addr,
3397 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3398 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3401 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3404 struct mgmt_cp_user_passkey_reply *cp = data;
3408 return user_pairing_resp(sk, hdev, &cp->addr,
3409 MGMT_OP_USER_PASSKEY_REPLY,
3410 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3413 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3414 void *data, u16 len)
3416 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3420 return user_pairing_resp(sk, hdev, &cp->addr,
3421 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3422 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3425 static void update_name(struct hci_request *req)
3427 struct hci_dev *hdev = req->hdev;
3428 struct hci_cp_write_local_name cp;
3430 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3432 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3435 static void set_name_complete(struct hci_dev *hdev, u8 status)
3437 struct mgmt_cp_set_local_name *cp;
3438 struct pending_cmd *cmd;
3440 BT_DBG("status 0x%02x", status);
3444 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3451 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3452 mgmt_status(status));
3454 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3457 mgmt_pending_remove(cmd);
3460 hci_dev_unlock(hdev);
3463 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3466 struct mgmt_cp_set_local_name *cp = data;
3467 struct pending_cmd *cmd;
3468 struct hci_request req;
3475 /* If the old values are the same as the new ones just return a
3476 * direct command complete event.
3478 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3479 !memcmp(hdev->short_name, cp->short_name,
3480 sizeof(hdev->short_name))) {
3481 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3486 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3488 if (!hdev_is_powered(hdev)) {
3489 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3491 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3496 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3502 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3508 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3510 hci_req_init(&req, hdev);
3512 if (lmp_bredr_capable(hdev)) {
3517 /* The name is stored in the scan response data and so
3518 * no need to udpate the advertising data here.
3520 if (lmp_le_capable(hdev))
3521 update_scan_rsp_data(&req);
3523 err = hci_req_run(&req, set_name_complete);
3525 mgmt_pending_remove(cmd);
3528 hci_dev_unlock(hdev);
3532 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3533 void *data, u16 data_len)
3535 struct pending_cmd *cmd;
3538 BT_DBG("%s", hdev->name);
3542 if (!hdev_is_powered(hdev)) {
3543 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3544 MGMT_STATUS_NOT_POWERED);
3548 if (!lmp_ssp_capable(hdev)) {
3549 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3550 MGMT_STATUS_NOT_SUPPORTED);
3554 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3555 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3560 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3566 if (bredr_sc_enabled(hdev))
3567 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3570 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3573 mgmt_pending_remove(cmd);
3576 hci_dev_unlock(hdev);
3580 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3581 void *data, u16 len)
3585 BT_DBG("%s ", hdev->name);
3589 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3590 struct mgmt_cp_add_remote_oob_data *cp = data;
3593 if (cp->addr.type != BDADDR_BREDR) {
3594 err = cmd_complete(sk, hdev->id,
3595 MGMT_OP_ADD_REMOTE_OOB_DATA,
3596 MGMT_STATUS_INVALID_PARAMS,
3597 &cp->addr, sizeof(cp->addr));
3601 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3602 cp->addr.type, cp->hash,
3603 cp->rand, NULL, NULL);
3605 status = MGMT_STATUS_FAILED;
3607 status = MGMT_STATUS_SUCCESS;
3609 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3610 status, &cp->addr, sizeof(cp->addr));
3611 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3612 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3613 u8 *rand192, *hash192;
3616 if (cp->addr.type != BDADDR_BREDR) {
3617 err = cmd_complete(sk, hdev->id,
3618 MGMT_OP_ADD_REMOTE_OOB_DATA,
3619 MGMT_STATUS_INVALID_PARAMS,
3620 &cp->addr, sizeof(cp->addr));
3624 if (bdaddr_type_is_le(cp->addr.type)) {
3628 rand192 = cp->rand192;
3629 hash192 = cp->hash192;
3632 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3633 cp->addr.type, hash192, rand192,
3634 cp->hash256, cp->rand256);
3636 status = MGMT_STATUS_FAILED;
3638 status = MGMT_STATUS_SUCCESS;
3640 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3641 status, &cp->addr, sizeof(cp->addr));
3643 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3644 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3645 MGMT_STATUS_INVALID_PARAMS);
3649 hci_dev_unlock(hdev);
3653 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3654 void *data, u16 len)
3656 struct mgmt_cp_remove_remote_oob_data *cp = data;
3660 BT_DBG("%s", hdev->name);
3662 if (cp->addr.type != BDADDR_BREDR)
3663 return cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3664 MGMT_STATUS_INVALID_PARAMS,
3665 &cp->addr, sizeof(cp->addr));
3669 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3670 hci_remote_oob_data_clear(hdev);
3671 status = MGMT_STATUS_SUCCESS;
3675 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3677 status = MGMT_STATUS_INVALID_PARAMS;
3679 status = MGMT_STATUS_SUCCESS;
3682 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3683 status, &cp->addr, sizeof(cp->addr));
3685 hci_dev_unlock(hdev);
3689 static bool trigger_discovery(struct hci_request *req, u8 *status)
3691 struct hci_dev *hdev = req->hdev;
3692 struct hci_cp_le_set_scan_param param_cp;
3693 struct hci_cp_le_set_scan_enable enable_cp;
3694 struct hci_cp_inquiry inq_cp;
3695 /* General inquiry access code (GIAC) */
3696 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3700 switch (hdev->discovery.type) {
3701 case DISCOV_TYPE_BREDR:
3702 *status = mgmt_bredr_support(hdev);
3706 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3707 *status = MGMT_STATUS_BUSY;
3711 hci_inquiry_cache_flush(hdev);
3713 memset(&inq_cp, 0, sizeof(inq_cp));
3714 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3715 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3716 hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3719 case DISCOV_TYPE_LE:
3720 case DISCOV_TYPE_INTERLEAVED:
3721 *status = mgmt_le_support(hdev);
3725 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3726 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3727 *status = MGMT_STATUS_NOT_SUPPORTED;
3731 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3732 /* Don't let discovery abort an outgoing
3733 * connection attempt that's using directed
3736 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3738 *status = MGMT_STATUS_REJECTED;
3742 disable_advertising(req);
3745 /* If controller is scanning, it means the background scanning
3746 * is running. Thus, we should temporarily stop it in order to
3747 * set the discovery scanning parameters.
3749 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3750 hci_req_add_le_scan_disable(req);
3752 memset(¶m_cp, 0, sizeof(param_cp));
3754 /* All active scans will be done with either a resolvable
3755 * private address (when privacy feature has been enabled)
3756 * or unresolvable private address.
3758 err = hci_update_random_address(req, true, &own_addr_type);
3760 *status = MGMT_STATUS_FAILED;
3764 param_cp.type = LE_SCAN_ACTIVE;
3765 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3766 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3767 param_cp.own_address_type = own_addr_type;
3768 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3771 memset(&enable_cp, 0, sizeof(enable_cp));
3772 enable_cp.enable = LE_SCAN_ENABLE;
3773 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3774 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3779 *status = MGMT_STATUS_INVALID_PARAMS;
3786 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3788 struct pending_cmd *cmd;
3789 unsigned long timeout;
3791 BT_DBG("status %d", status);
3795 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3797 u8 type = hdev->discovery.type;
3799 cmd_complete(cmd->sk, hdev->id, cmd->opcode,
3800 mgmt_status(status), &type, sizeof(type));
3801 mgmt_pending_remove(cmd);
3805 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3809 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3811 switch (hdev->discovery.type) {
3812 case DISCOV_TYPE_LE:
3813 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3815 case DISCOV_TYPE_INTERLEAVED:
3816 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3818 case DISCOV_TYPE_BREDR:
3822 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3828 queue_delayed_work(hdev->workqueue,
3829 &hdev->le_scan_disable, timeout);
3832 hci_dev_unlock(hdev);
3835 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3836 void *data, u16 len)
3838 struct mgmt_cp_start_discovery *cp = data;
3839 struct pending_cmd *cmd;
3840 struct hci_request req;
3844 BT_DBG("%s", hdev->name);
3848 if (!hdev_is_powered(hdev)) {
3849 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3850 MGMT_STATUS_NOT_POWERED,
3851 &cp->type, sizeof(cp->type));
3855 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3856 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3857 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3858 MGMT_STATUS_BUSY, &cp->type,
3863 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3869 hdev->discovery.type = cp->type;
3870 hdev->discovery.rssi = HCI_RSSI_INVALID;
3871 hdev->discovery.uuid_count = 0;
3873 hci_req_init(&req, hdev);
3875 if (!trigger_discovery(&req, &status)) {
3876 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3877 status, &cp->type, sizeof(cp->type));
3878 mgmt_pending_remove(cmd);
3882 err = hci_req_run(&req, start_discovery_complete);
3884 mgmt_pending_remove(cmd);
3888 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3891 hci_dev_unlock(hdev);
3895 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3897 struct pending_cmd *cmd;
3899 BT_DBG("status %d", status);
3903 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3905 u8 type = hdev->discovery.type;
3907 cmd_complete(cmd->sk, hdev->id, cmd->opcode,
3908 mgmt_status(status), &type, sizeof(type));
3909 mgmt_pending_remove(cmd);
3913 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3915 hci_dev_unlock(hdev);
3918 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3921 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3922 struct pending_cmd *cmd;
3923 struct hci_request req;
3926 BT_DBG("%s", hdev->name);
3930 if (!hci_discovery_active(hdev)) {
3931 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3932 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3933 sizeof(mgmt_cp->type));
3937 if (hdev->discovery.type != mgmt_cp->type) {
3938 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3939 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3940 sizeof(mgmt_cp->type));
3944 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3950 hci_req_init(&req, hdev);
3952 hci_stop_discovery(&req);
3954 err = hci_req_run(&req, stop_discovery_complete);
3956 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3960 mgmt_pending_remove(cmd);
3962 /* If no HCI commands were sent we're done */
3963 if (err == -ENODATA) {
3964 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3965 &mgmt_cp->type, sizeof(mgmt_cp->type));
3966 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3970 hci_dev_unlock(hdev);
3974 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3977 struct mgmt_cp_confirm_name *cp = data;
3978 struct inquiry_entry *e;
3981 BT_DBG("%s", hdev->name);
3985 if (!hci_discovery_active(hdev)) {
3986 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3987 MGMT_STATUS_FAILED, &cp->addr,
3992 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3994 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3995 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4000 if (cp->name_known) {
4001 e->name_state = NAME_KNOWN;
4004 e->name_state = NAME_NEEDED;
4005 hci_inquiry_cache_update_resolve(hdev, e);
4008 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
4012 hci_dev_unlock(hdev);
4016 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4019 struct mgmt_cp_block_device *cp = data;
4023 BT_DBG("%s", hdev->name);
4025 if (!bdaddr_type_is_valid(cp->addr.type))
4026 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4027 MGMT_STATUS_INVALID_PARAMS,
4028 &cp->addr, sizeof(cp->addr));
4032 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4035 status = MGMT_STATUS_FAILED;
4039 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4041 status = MGMT_STATUS_SUCCESS;
4044 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4045 &cp->addr, sizeof(cp->addr));
4047 hci_dev_unlock(hdev);
4052 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4055 struct mgmt_cp_unblock_device *cp = data;
4059 BT_DBG("%s", hdev->name);
4061 if (!bdaddr_type_is_valid(cp->addr.type))
4062 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4063 MGMT_STATUS_INVALID_PARAMS,
4064 &cp->addr, sizeof(cp->addr));
4068 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4071 status = MGMT_STATUS_INVALID_PARAMS;
4075 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4077 status = MGMT_STATUS_SUCCESS;
4080 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4081 &cp->addr, sizeof(cp->addr));
4083 hci_dev_unlock(hdev);
4088 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4091 struct mgmt_cp_set_device_id *cp = data;
4092 struct hci_request req;
4096 BT_DBG("%s", hdev->name);
4098 source = __le16_to_cpu(cp->source);
4100 if (source > 0x0002)
4101 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4102 MGMT_STATUS_INVALID_PARAMS);
4106 hdev->devid_source = source;
4107 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4108 hdev->devid_product = __le16_to_cpu(cp->product);
4109 hdev->devid_version = __le16_to_cpu(cp->version);
4111 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4113 hci_req_init(&req, hdev);
4115 hci_req_run(&req, NULL);
4117 hci_dev_unlock(hdev);
4122 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
4124 struct cmd_lookup match = { NULL, hdev };
4127 u8 mgmt_err = mgmt_status(status);
4129 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4130 cmd_status_rsp, &mgmt_err);
4134 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4135 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4137 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4139 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4142 new_settings(hdev, match.sk);
4148 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4151 struct mgmt_mode *cp = data;
4152 struct pending_cmd *cmd;
4153 struct hci_request req;
4154 u8 val, enabled, status;
4157 BT_DBG("request for %s", hdev->name);
4159 status = mgmt_le_support(hdev);
4161 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4164 if (cp->val != 0x00 && cp->val != 0x01)
4165 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4166 MGMT_STATUS_INVALID_PARAMS);
4171 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4173 /* The following conditions are ones which mean that we should
4174 * not do any HCI communication but directly send a mgmt
4175 * response to user space (after toggling the flag if
4178 if (!hdev_is_powered(hdev) || val == enabled ||
4179 hci_conn_num(hdev, LE_LINK) > 0 ||
4180 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4181 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4182 bool changed = false;
4184 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4185 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4189 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4194 err = new_settings(hdev, sk);
4199 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4200 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4201 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4206 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4212 hci_req_init(&req, hdev);
4215 enable_advertising(&req);
4217 disable_advertising(&req);
4219 err = hci_req_run(&req, set_advertising_complete);
4221 mgmt_pending_remove(cmd);
4224 hci_dev_unlock(hdev);
4228 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4229 void *data, u16 len)
4231 struct mgmt_cp_set_static_address *cp = data;
4234 BT_DBG("%s", hdev->name);
4236 if (!lmp_le_capable(hdev))
4237 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4238 MGMT_STATUS_NOT_SUPPORTED);
4240 if (hdev_is_powered(hdev))
4241 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4242 MGMT_STATUS_REJECTED);
4244 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4245 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4246 return cmd_status(sk, hdev->id,
4247 MGMT_OP_SET_STATIC_ADDRESS,
4248 MGMT_STATUS_INVALID_PARAMS);
4250 /* Two most significant bits shall be set */
4251 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4252 return cmd_status(sk, hdev->id,
4253 MGMT_OP_SET_STATIC_ADDRESS,
4254 MGMT_STATUS_INVALID_PARAMS);
4259 bacpy(&hdev->static_addr, &cp->bdaddr);
4261 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4263 hci_dev_unlock(hdev);
4268 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4269 void *data, u16 len)
4271 struct mgmt_cp_set_scan_params *cp = data;
4272 __u16 interval, window;
4275 BT_DBG("%s", hdev->name);
4277 if (!lmp_le_capable(hdev))
4278 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4279 MGMT_STATUS_NOT_SUPPORTED);
4281 interval = __le16_to_cpu(cp->interval);
4283 if (interval < 0x0004 || interval > 0x4000)
4284 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4285 MGMT_STATUS_INVALID_PARAMS);
4287 window = __le16_to_cpu(cp->window);
4289 if (window < 0x0004 || window > 0x4000)
4290 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4291 MGMT_STATUS_INVALID_PARAMS);
4293 if (window > interval)
4294 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4295 MGMT_STATUS_INVALID_PARAMS);
4299 hdev->le_scan_interval = interval;
4300 hdev->le_scan_window = window;
4302 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4304 /* If background scan is running, restart it so new parameters are
4307 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4308 hdev->discovery.state == DISCOVERY_STOPPED) {
4309 struct hci_request req;
4311 hci_req_init(&req, hdev);
4313 hci_req_add_le_scan_disable(&req);
4314 hci_req_add_le_passive_scan(&req);
4316 hci_req_run(&req, NULL);
4319 hci_dev_unlock(hdev);
4324 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4326 struct pending_cmd *cmd;
4328 BT_DBG("status 0x%02x", status);
4332 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4337 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4338 mgmt_status(status));
4340 struct mgmt_mode *cp = cmd->param;
4343 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4345 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4347 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4348 new_settings(hdev, cmd->sk);
4351 mgmt_pending_remove(cmd);
4354 hci_dev_unlock(hdev);
4357 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4358 void *data, u16 len)
4360 struct mgmt_mode *cp = data;
4361 struct pending_cmd *cmd;
4362 struct hci_request req;
4365 BT_DBG("%s", hdev->name);
4367 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4368 hdev->hci_ver < BLUETOOTH_VER_1_2)
4369 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4370 MGMT_STATUS_NOT_SUPPORTED);
4372 if (cp->val != 0x00 && cp->val != 0x01)
4373 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4374 MGMT_STATUS_INVALID_PARAMS);
4376 if (!hdev_is_powered(hdev))
4377 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4378 MGMT_STATUS_NOT_POWERED);
4380 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4381 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4382 MGMT_STATUS_REJECTED);
4386 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4387 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4392 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4393 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4398 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4405 hci_req_init(&req, hdev);
4407 write_fast_connectable(&req, cp->val);
4409 err = hci_req_run(&req, fast_connectable_complete);
4411 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4412 MGMT_STATUS_FAILED);
4413 mgmt_pending_remove(cmd);
4417 hci_dev_unlock(hdev);
4422 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4424 struct pending_cmd *cmd;
4426 BT_DBG("status 0x%02x", status);
4430 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4435 u8 mgmt_err = mgmt_status(status);
4437 /* We need to restore the flag if related HCI commands
4440 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4442 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4444 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4445 new_settings(hdev, cmd->sk);
4448 mgmt_pending_remove(cmd);
4451 hci_dev_unlock(hdev);
4454 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4456 struct mgmt_mode *cp = data;
4457 struct pending_cmd *cmd;
4458 struct hci_request req;
4461 BT_DBG("request for %s", hdev->name);
4463 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4464 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4465 MGMT_STATUS_NOT_SUPPORTED);
4467 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4468 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4469 MGMT_STATUS_REJECTED);
4471 if (cp->val != 0x00 && cp->val != 0x01)
4472 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4473 MGMT_STATUS_INVALID_PARAMS);
4477 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4478 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4482 if (!hdev_is_powered(hdev)) {
4484 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4485 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4486 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4487 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4488 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4491 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4493 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4497 err = new_settings(hdev, sk);
4501 /* Reject disabling when powered on */
4503 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4504 MGMT_STATUS_REJECTED);
4508 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4509 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4514 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4520 /* We need to flip the bit already here so that update_adv_data
4521 * generates the correct flags.
4523 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4525 hci_req_init(&req, hdev);
4527 write_fast_connectable(&req, false);
4528 hci_update_page_scan(hdev, &req);
4530 /* Since only the advertising data flags will change, there
4531 * is no need to update the scan response data.
4533 update_adv_data(&req);
4535 err = hci_req_run(&req, set_bredr_complete);
4537 mgmt_pending_remove(cmd);
4540 hci_dev_unlock(hdev);
4544 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4545 void *data, u16 len)
4547 struct mgmt_mode *cp = data;
4548 struct pending_cmd *cmd;
4552 BT_DBG("request for %s", hdev->name);
4554 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4555 !lmp_sc_capable(hdev) && !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4556 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4557 MGMT_STATUS_NOT_SUPPORTED);
4559 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4560 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4561 MGMT_STATUS_INVALID_PARAMS);
4565 if (!hdev_is_powered(hdev) ||
4566 (!lmp_sc_capable(hdev) &&
4567 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) ||
4568 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4572 changed = !test_and_set_bit(HCI_SC_ENABLED,
4574 if (cp->val == 0x02)
4575 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4577 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4579 changed = test_and_clear_bit(HCI_SC_ENABLED,
4581 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4584 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4589 err = new_settings(hdev, sk);
4594 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4595 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4602 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4603 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4604 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4608 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4614 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4616 mgmt_pending_remove(cmd);
4620 if (cp->val == 0x02)
4621 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4623 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4626 hci_dev_unlock(hdev);
4630 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4631 void *data, u16 len)
4633 struct mgmt_mode *cp = data;
4634 bool changed, use_changed;
4637 BT_DBG("request for %s", hdev->name);
4639 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4640 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4641 MGMT_STATUS_INVALID_PARAMS);
4646 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4649 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4652 if (cp->val == 0x02)
4653 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4656 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4659 if (hdev_is_powered(hdev) && use_changed &&
4660 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4661 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4662 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4663 sizeof(mode), &mode);
4666 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4671 err = new_settings(hdev, sk);
4674 hci_dev_unlock(hdev);
4678 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4681 struct mgmt_cp_set_privacy *cp = cp_data;
4685 BT_DBG("request for %s", hdev->name);
4687 if (!lmp_le_capable(hdev))
4688 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4689 MGMT_STATUS_NOT_SUPPORTED);
4691 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4692 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4693 MGMT_STATUS_INVALID_PARAMS);
4695 if (hdev_is_powered(hdev))
4696 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4697 MGMT_STATUS_REJECTED);
4701 /* If user space supports this command it is also expected to
4702 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4704 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4707 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4708 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4709 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4711 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4712 memset(hdev->irk, 0, sizeof(hdev->irk));
4713 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4716 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4721 err = new_settings(hdev, sk);
4724 hci_dev_unlock(hdev);
4728 static bool irk_is_valid(struct mgmt_irk_info *irk)
4730 switch (irk->addr.type) {
4731 case BDADDR_LE_PUBLIC:
4734 case BDADDR_LE_RANDOM:
4735 /* Two most significant bits shall be set */
4736 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4744 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4747 struct mgmt_cp_load_irks *cp = cp_data;
4748 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4749 sizeof(struct mgmt_irk_info));
4750 u16 irk_count, expected_len;
4753 BT_DBG("request for %s", hdev->name);
4755 if (!lmp_le_capable(hdev))
4756 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4757 MGMT_STATUS_NOT_SUPPORTED);
4759 irk_count = __le16_to_cpu(cp->irk_count);
4760 if (irk_count > max_irk_count) {
4761 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4762 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4763 MGMT_STATUS_INVALID_PARAMS);
4766 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4767 if (expected_len != len) {
4768 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4770 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4771 MGMT_STATUS_INVALID_PARAMS);
4774 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4776 for (i = 0; i < irk_count; i++) {
4777 struct mgmt_irk_info *key = &cp->irks[i];
4779 if (!irk_is_valid(key))
4780 return cmd_status(sk, hdev->id,
4782 MGMT_STATUS_INVALID_PARAMS);
4787 hci_smp_irks_clear(hdev);
4789 for (i = 0; i < irk_count; i++) {
4790 struct mgmt_irk_info *irk = &cp->irks[i];
4793 if (irk->addr.type == BDADDR_LE_PUBLIC)
4794 addr_type = ADDR_LE_DEV_PUBLIC;
4796 addr_type = ADDR_LE_DEV_RANDOM;
4798 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4802 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4804 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4806 hci_dev_unlock(hdev);
4811 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4813 if (key->master != 0x00 && key->master != 0x01)
4816 switch (key->addr.type) {
4817 case BDADDR_LE_PUBLIC:
4820 case BDADDR_LE_RANDOM:
4821 /* Two most significant bits shall be set */
4822 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4830 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4831 void *cp_data, u16 len)
4833 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4834 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4835 sizeof(struct mgmt_ltk_info));
4836 u16 key_count, expected_len;
4839 BT_DBG("request for %s", hdev->name);
4841 if (!lmp_le_capable(hdev))
4842 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4843 MGMT_STATUS_NOT_SUPPORTED);
4845 key_count = __le16_to_cpu(cp->key_count);
4846 if (key_count > max_key_count) {
4847 BT_ERR("load_ltks: too big key_count value %u", key_count);
4848 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4849 MGMT_STATUS_INVALID_PARAMS);
4852 expected_len = sizeof(*cp) + key_count *
4853 sizeof(struct mgmt_ltk_info);
4854 if (expected_len != len) {
4855 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4857 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4858 MGMT_STATUS_INVALID_PARAMS);
4861 BT_DBG("%s key_count %u", hdev->name, key_count);
4863 for (i = 0; i < key_count; i++) {
4864 struct mgmt_ltk_info *key = &cp->keys[i];
4866 if (!ltk_is_valid(key))
4867 return cmd_status(sk, hdev->id,
4868 MGMT_OP_LOAD_LONG_TERM_KEYS,
4869 MGMT_STATUS_INVALID_PARAMS);
4874 hci_smp_ltks_clear(hdev);
4876 for (i = 0; i < key_count; i++) {
4877 struct mgmt_ltk_info *key = &cp->keys[i];
4878 u8 type, addr_type, authenticated;
4880 if (key->addr.type == BDADDR_LE_PUBLIC)
4881 addr_type = ADDR_LE_DEV_PUBLIC;
4883 addr_type = ADDR_LE_DEV_RANDOM;
4885 switch (key->type) {
4886 case MGMT_LTK_UNAUTHENTICATED:
4887 authenticated = 0x00;
4888 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
4890 case MGMT_LTK_AUTHENTICATED:
4891 authenticated = 0x01;
4892 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
4894 case MGMT_LTK_P256_UNAUTH:
4895 authenticated = 0x00;
4896 type = SMP_LTK_P256;
4898 case MGMT_LTK_P256_AUTH:
4899 authenticated = 0x01;
4900 type = SMP_LTK_P256;
4902 case MGMT_LTK_P256_DEBUG:
4903 authenticated = 0x00;
4904 type = SMP_LTK_P256_DEBUG;
4909 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4910 authenticated, key->val, key->enc_size, key->ediv,
4914 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4917 hci_dev_unlock(hdev);
4922 struct cmd_conn_lookup {
4923 struct hci_conn *conn;
4924 bool valid_tx_power;
4928 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4930 struct cmd_conn_lookup *match = data;
4931 struct mgmt_cp_get_conn_info *cp;
4932 struct mgmt_rp_get_conn_info rp;
4933 struct hci_conn *conn = cmd->user_data;
4935 if (conn != match->conn)
4938 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4940 memset(&rp, 0, sizeof(rp));
4941 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4942 rp.addr.type = cp->addr.type;
4944 if (!match->mgmt_status) {
4945 rp.rssi = conn->rssi;
4947 if (match->valid_tx_power) {
4948 rp.tx_power = conn->tx_power;
4949 rp.max_tx_power = conn->max_tx_power;
4951 rp.tx_power = HCI_TX_POWER_INVALID;
4952 rp.max_tx_power = HCI_TX_POWER_INVALID;
4956 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4957 match->mgmt_status, &rp, sizeof(rp));
4959 hci_conn_drop(conn);
4962 mgmt_pending_remove(cmd);
4965 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4967 struct hci_cp_read_rssi *cp;
4968 struct hci_conn *conn;
4969 struct cmd_conn_lookup match;
4972 BT_DBG("status 0x%02x", status);
4976 /* TX power data is valid in case request completed successfully,
4977 * otherwise we assume it's not valid. At the moment we assume that
4978 * either both or none of current and max values are valid to keep code
4981 match.valid_tx_power = !status;
4983 /* Commands sent in request are either Read RSSI or Read Transmit Power
4984 * Level so we check which one was last sent to retrieve connection
4985 * handle. Both commands have handle as first parameter so it's safe to
4986 * cast data on the same command struct.
4988 * First command sent is always Read RSSI and we fail only if it fails.
4989 * In other case we simply override error to indicate success as we
4990 * already remembered if TX power value is actually valid.
4992 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4994 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4999 BT_ERR("invalid sent_cmd in response");
5003 handle = __le16_to_cpu(cp->handle);
5004 conn = hci_conn_hash_lookup_handle(hdev, handle);
5006 BT_ERR("unknown handle (%d) in response", handle);
5011 match.mgmt_status = mgmt_status(status);
5013 /* Cache refresh is complete, now reply for mgmt request for given
5016 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
5017 get_conn_info_complete, &match);
5020 hci_dev_unlock(hdev);
5023 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5026 struct mgmt_cp_get_conn_info *cp = data;
5027 struct mgmt_rp_get_conn_info rp;
5028 struct hci_conn *conn;
5029 unsigned long conn_info_age;
5032 BT_DBG("%s", hdev->name);
5034 memset(&rp, 0, sizeof(rp));
5035 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5036 rp.addr.type = cp->addr.type;
5038 if (!bdaddr_type_is_valid(cp->addr.type))
5039 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5040 MGMT_STATUS_INVALID_PARAMS,
5045 if (!hdev_is_powered(hdev)) {
5046 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5047 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5051 if (cp->addr.type == BDADDR_BREDR)
5052 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5055 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5057 if (!conn || conn->state != BT_CONNECTED) {
5058 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5059 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
5063 /* To avoid client trying to guess when to poll again for information we
5064 * calculate conn info age as random value between min/max set in hdev.
5066 conn_info_age = hdev->conn_info_min_age +
5067 prandom_u32_max(hdev->conn_info_max_age -
5068 hdev->conn_info_min_age);
5070 /* Query controller to refresh cached values if they are too old or were
5073 if (time_after(jiffies, conn->conn_info_timestamp +
5074 msecs_to_jiffies(conn_info_age)) ||
5075 !conn->conn_info_timestamp) {
5076 struct hci_request req;
5077 struct hci_cp_read_tx_power req_txp_cp;
5078 struct hci_cp_read_rssi req_rssi_cp;
5079 struct pending_cmd *cmd;
5081 hci_req_init(&req, hdev);
5082 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5083 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5086 /* For LE links TX power does not change thus we don't need to
5087 * query for it once value is known.
5089 if (!bdaddr_type_is_le(cp->addr.type) ||
5090 conn->tx_power == HCI_TX_POWER_INVALID) {
5091 req_txp_cp.handle = cpu_to_le16(conn->handle);
5092 req_txp_cp.type = 0x00;
5093 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5094 sizeof(req_txp_cp), &req_txp_cp);
5097 /* Max TX power needs to be read only once per connection */
5098 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5099 req_txp_cp.handle = cpu_to_le16(conn->handle);
5100 req_txp_cp.type = 0x01;
5101 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5102 sizeof(req_txp_cp), &req_txp_cp);
5105 err = hci_req_run(&req, conn_info_refresh_complete);
5109 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5116 hci_conn_hold(conn);
5117 cmd->user_data = hci_conn_get(conn);
5119 conn->conn_info_timestamp = jiffies;
5121 /* Cache is valid, just reply with values cached in hci_conn */
5122 rp.rssi = conn->rssi;
5123 rp.tx_power = conn->tx_power;
5124 rp.max_tx_power = conn->max_tx_power;
5126 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5127 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5131 hci_dev_unlock(hdev);
5135 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
5137 struct mgmt_cp_get_clock_info *cp;
5138 struct mgmt_rp_get_clock_info rp;
5139 struct hci_cp_read_clock *hci_cp;
5140 struct pending_cmd *cmd;
5141 struct hci_conn *conn;
5143 BT_DBG("%s status %u", hdev->name, status);
5147 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5151 if (hci_cp->which) {
5152 u16 handle = __le16_to_cpu(hci_cp->handle);
5153 conn = hci_conn_hash_lookup_handle(hdev, handle);
5158 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5164 memset(&rp, 0, sizeof(rp));
5165 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
5170 rp.local_clock = cpu_to_le32(hdev->clock);
5173 rp.piconet_clock = cpu_to_le32(conn->clock);
5174 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5178 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
5180 mgmt_pending_remove(cmd);
5182 hci_conn_drop(conn);
5187 hci_dev_unlock(hdev);
5190 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5193 struct mgmt_cp_get_clock_info *cp = data;
5194 struct mgmt_rp_get_clock_info rp;
5195 struct hci_cp_read_clock hci_cp;
5196 struct pending_cmd *cmd;
5197 struct hci_request req;
5198 struct hci_conn *conn;
5201 BT_DBG("%s", hdev->name);
5203 memset(&rp, 0, sizeof(rp));
5204 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5205 rp.addr.type = cp->addr.type;
5207 if (cp->addr.type != BDADDR_BREDR)
5208 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5209 MGMT_STATUS_INVALID_PARAMS,
5214 if (!hdev_is_powered(hdev)) {
5215 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5216 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5220 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5221 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5223 if (!conn || conn->state != BT_CONNECTED) {
5224 err = cmd_complete(sk, hdev->id,
5225 MGMT_OP_GET_CLOCK_INFO,
5226 MGMT_STATUS_NOT_CONNECTED,
5234 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5240 hci_req_init(&req, hdev);
5242 memset(&hci_cp, 0, sizeof(hci_cp));
5243 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5246 hci_conn_hold(conn);
5247 cmd->user_data = hci_conn_get(conn);
5249 hci_cp.handle = cpu_to_le16(conn->handle);
5250 hci_cp.which = 0x01; /* Piconet clock */
5251 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5254 err = hci_req_run(&req, get_clock_info_complete);
5256 mgmt_pending_remove(cmd);
5259 hci_dev_unlock(hdev);
5263 static void device_added(struct sock *sk, struct hci_dev *hdev,
5264 bdaddr_t *bdaddr, u8 type, u8 action)
5266 struct mgmt_ev_device_added ev;
5268 bacpy(&ev.addr.bdaddr, bdaddr);
5269 ev.addr.type = type;
5272 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5275 static int add_device(struct sock *sk, struct hci_dev *hdev,
5276 void *data, u16 len)
5278 struct mgmt_cp_add_device *cp = data;
5279 u8 auto_conn, addr_type;
5282 BT_DBG("%s", hdev->name);
5284 if (!bdaddr_type_is_valid(cp->addr.type) ||
5285 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5286 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5287 MGMT_STATUS_INVALID_PARAMS,
5288 &cp->addr, sizeof(cp->addr));
5290 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5291 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5292 MGMT_STATUS_INVALID_PARAMS,
5293 &cp->addr, sizeof(cp->addr));
5297 if (cp->addr.type == BDADDR_BREDR) {
5298 /* Only incoming connections action is supported for now */
5299 if (cp->action != 0x01) {
5300 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5301 MGMT_STATUS_INVALID_PARAMS,
5302 &cp->addr, sizeof(cp->addr));
5306 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5311 hci_update_page_scan(hdev, NULL);
5316 if (cp->addr.type == BDADDR_LE_PUBLIC)
5317 addr_type = ADDR_LE_DEV_PUBLIC;
5319 addr_type = ADDR_LE_DEV_RANDOM;
5321 if (cp->action == 0x02)
5322 auto_conn = HCI_AUTO_CONN_ALWAYS;
5323 else if (cp->action == 0x01)
5324 auto_conn = HCI_AUTO_CONN_DIRECT;
5326 auto_conn = HCI_AUTO_CONN_REPORT;
5328 /* If the connection parameters don't exist for this device,
5329 * they will be created and configured with defaults.
5331 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5333 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5335 &cp->addr, sizeof(cp->addr));
5340 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5342 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5343 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5346 hci_dev_unlock(hdev);
5350 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5351 bdaddr_t *bdaddr, u8 type)
5353 struct mgmt_ev_device_removed ev;
5355 bacpy(&ev.addr.bdaddr, bdaddr);
5356 ev.addr.type = type;
5358 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5361 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5362 void *data, u16 len)
5364 struct mgmt_cp_remove_device *cp = data;
5367 BT_DBG("%s", hdev->name);
5371 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5372 struct hci_conn_params *params;
5375 if (!bdaddr_type_is_valid(cp->addr.type)) {
5376 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5377 MGMT_STATUS_INVALID_PARAMS,
5378 &cp->addr, sizeof(cp->addr));
5382 if (cp->addr.type == BDADDR_BREDR) {
5383 err = hci_bdaddr_list_del(&hdev->whitelist,
5387 err = cmd_complete(sk, hdev->id,
5388 MGMT_OP_REMOVE_DEVICE,
5389 MGMT_STATUS_INVALID_PARAMS,
5390 &cp->addr, sizeof(cp->addr));
5394 hci_update_page_scan(hdev, NULL);
5396 device_removed(sk, hdev, &cp->addr.bdaddr,
5401 if (cp->addr.type == BDADDR_LE_PUBLIC)
5402 addr_type = ADDR_LE_DEV_PUBLIC;
5404 addr_type = ADDR_LE_DEV_RANDOM;
5406 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5409 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5410 MGMT_STATUS_INVALID_PARAMS,
5411 &cp->addr, sizeof(cp->addr));
5415 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5416 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5417 MGMT_STATUS_INVALID_PARAMS,
5418 &cp->addr, sizeof(cp->addr));
5422 list_del(¶ms->action);
5423 list_del(¶ms->list);
5425 hci_update_background_scan(hdev);
5427 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5429 struct hci_conn_params *p, *tmp;
5430 struct bdaddr_list *b, *btmp;
5432 if (cp->addr.type) {
5433 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5434 MGMT_STATUS_INVALID_PARAMS,
5435 &cp->addr, sizeof(cp->addr));
5439 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5440 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5445 hci_update_page_scan(hdev, NULL);
5447 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5448 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5450 device_removed(sk, hdev, &p->addr, p->addr_type);
5451 list_del(&p->action);
5456 BT_DBG("All LE connection parameters were removed");
5458 hci_update_background_scan(hdev);
5462 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5463 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5466 hci_dev_unlock(hdev);
5470 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5473 struct mgmt_cp_load_conn_param *cp = data;
5474 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5475 sizeof(struct mgmt_conn_param));
5476 u16 param_count, expected_len;
5479 if (!lmp_le_capable(hdev))
5480 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5481 MGMT_STATUS_NOT_SUPPORTED);
5483 param_count = __le16_to_cpu(cp->param_count);
5484 if (param_count > max_param_count) {
5485 BT_ERR("load_conn_param: too big param_count value %u",
5487 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5488 MGMT_STATUS_INVALID_PARAMS);
5491 expected_len = sizeof(*cp) + param_count *
5492 sizeof(struct mgmt_conn_param);
5493 if (expected_len != len) {
5494 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5496 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5497 MGMT_STATUS_INVALID_PARAMS);
5500 BT_DBG("%s param_count %u", hdev->name, param_count);
5504 hci_conn_params_clear_disabled(hdev);
5506 for (i = 0; i < param_count; i++) {
5507 struct mgmt_conn_param *param = &cp->params[i];
5508 struct hci_conn_params *hci_param;
5509 u16 min, max, latency, timeout;
5512 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
5515 if (param->addr.type == BDADDR_LE_PUBLIC) {
5516 addr_type = ADDR_LE_DEV_PUBLIC;
5517 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5518 addr_type = ADDR_LE_DEV_RANDOM;
5520 BT_ERR("Ignoring invalid connection parameters");
5524 min = le16_to_cpu(param->min_interval);
5525 max = le16_to_cpu(param->max_interval);
5526 latency = le16_to_cpu(param->latency);
5527 timeout = le16_to_cpu(param->timeout);
5529 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5530 min, max, latency, timeout);
5532 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5533 BT_ERR("Ignoring invalid connection parameters");
5537 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
5540 BT_ERR("Failed to add connection parameters");
5544 hci_param->conn_min_interval = min;
5545 hci_param->conn_max_interval = max;
5546 hci_param->conn_latency = latency;
5547 hci_param->supervision_timeout = timeout;
5550 hci_dev_unlock(hdev);
5552 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5555 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5556 void *data, u16 len)
5558 struct mgmt_cp_set_external_config *cp = data;
5562 BT_DBG("%s", hdev->name);
5564 if (hdev_is_powered(hdev))
5565 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5566 MGMT_STATUS_REJECTED);
5568 if (cp->config != 0x00 && cp->config != 0x01)
5569 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5570 MGMT_STATUS_INVALID_PARAMS);
5572 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5573 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5574 MGMT_STATUS_NOT_SUPPORTED);
5579 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5582 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5585 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5592 err = new_options(hdev, sk);
5594 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5595 mgmt_index_removed(hdev);
5597 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5598 set_bit(HCI_CONFIG, &hdev->dev_flags);
5599 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5601 queue_work(hdev->req_workqueue, &hdev->power_on);
5603 set_bit(HCI_RAW, &hdev->flags);
5604 mgmt_index_added(hdev);
5609 hci_dev_unlock(hdev);
5613 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5614 void *data, u16 len)
5616 struct mgmt_cp_set_public_address *cp = data;
5620 BT_DBG("%s", hdev->name);
5622 if (hdev_is_powered(hdev))
5623 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5624 MGMT_STATUS_REJECTED);
5626 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5627 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5628 MGMT_STATUS_INVALID_PARAMS);
5630 if (!hdev->set_bdaddr)
5631 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5632 MGMT_STATUS_NOT_SUPPORTED);
5636 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5637 bacpy(&hdev->public_addr, &cp->bdaddr);
5639 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5646 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5647 err = new_options(hdev, sk);
5649 if (is_configured(hdev)) {
5650 mgmt_index_removed(hdev);
5652 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5654 set_bit(HCI_CONFIG, &hdev->dev_flags);
5655 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5657 queue_work(hdev->req_workqueue, &hdev->power_on);
5661 hci_dev_unlock(hdev);
5665 static const struct mgmt_handler {
5666 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5670 } mgmt_handlers[] = {
5671 { NULL }, /* 0x0000 (no command) */
5672 { read_version, false, MGMT_READ_VERSION_SIZE },
5673 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
5674 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
5675 { read_controller_info, false, MGMT_READ_INFO_SIZE },
5676 { set_powered, false, MGMT_SETTING_SIZE },
5677 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
5678 { set_connectable, false, MGMT_SETTING_SIZE },
5679 { set_fast_connectable, false, MGMT_SETTING_SIZE },
5680 { set_bondable, false, MGMT_SETTING_SIZE },
5681 { set_link_security, false, MGMT_SETTING_SIZE },
5682 { set_ssp, false, MGMT_SETTING_SIZE },
5683 { set_hs, false, MGMT_SETTING_SIZE },
5684 { set_le, false, MGMT_SETTING_SIZE },
5685 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
5686 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
5687 { add_uuid, false, MGMT_ADD_UUID_SIZE },
5688 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
5689 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
5690 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5691 { disconnect, false, MGMT_DISCONNECT_SIZE },
5692 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
5693 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
5694 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5695 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
5696 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
5697 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5698 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
5699 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
5700 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5701 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
5702 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5703 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5704 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5705 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5706 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
5707 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
5708 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
5709 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
5710 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
5711 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
5712 { set_advertising, false, MGMT_SETTING_SIZE },
5713 { set_bredr, false, MGMT_SETTING_SIZE },
5714 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
5715 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
5716 { set_secure_conn, false, MGMT_SETTING_SIZE },
5717 { set_debug_keys, false, MGMT_SETTING_SIZE },
5718 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
5719 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
5720 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5721 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5722 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5723 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5724 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
5725 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
5726 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
5727 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
5728 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
5731 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5735 struct mgmt_hdr *hdr;
5736 u16 opcode, index, len;
5737 struct hci_dev *hdev = NULL;
5738 const struct mgmt_handler *handler;
5741 BT_DBG("got %zu bytes", msglen);
5743 if (msglen < sizeof(*hdr))
5746 buf = kmalloc(msglen, GFP_KERNEL);
5750 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
5756 opcode = __le16_to_cpu(hdr->opcode);
5757 index = __le16_to_cpu(hdr->index);
5758 len = __le16_to_cpu(hdr->len);
5760 if (len != msglen - sizeof(*hdr)) {
5765 if (index != MGMT_INDEX_NONE) {
5766 hdev = hci_dev_get(index);
5768 err = cmd_status(sk, index, opcode,
5769 MGMT_STATUS_INVALID_INDEX);
5773 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5774 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5775 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5776 err = cmd_status(sk, index, opcode,
5777 MGMT_STATUS_INVALID_INDEX);
5781 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
5782 opcode != MGMT_OP_READ_CONFIG_INFO &&
5783 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
5784 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
5785 err = cmd_status(sk, index, opcode,
5786 MGMT_STATUS_INVALID_INDEX);
5791 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5792 mgmt_handlers[opcode].func == NULL) {
5793 BT_DBG("Unknown op %u", opcode);
5794 err = cmd_status(sk, index, opcode,
5795 MGMT_STATUS_UNKNOWN_COMMAND);
5799 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
5800 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5801 err = cmd_status(sk, index, opcode,
5802 MGMT_STATUS_INVALID_INDEX);
5806 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
5807 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5808 err = cmd_status(sk, index, opcode,
5809 MGMT_STATUS_INVALID_INDEX);
5813 handler = &mgmt_handlers[opcode];
5815 if ((handler->var_len && len < handler->data_len) ||
5816 (!handler->var_len && len != handler->data_len)) {
5817 err = cmd_status(sk, index, opcode,
5818 MGMT_STATUS_INVALID_PARAMS);
5823 mgmt_init_hdev(sk, hdev);
5825 cp = buf + sizeof(*hdr);
5827 err = handler->func(sk, hdev, cp, len);
5841 void mgmt_index_added(struct hci_dev *hdev)
5843 if (hdev->dev_type != HCI_BREDR)
5846 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5849 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5850 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
5852 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
5855 void mgmt_index_removed(struct hci_dev *hdev)
5857 u8 status = MGMT_STATUS_INVALID_INDEX;
5859 if (hdev->dev_type != HCI_BREDR)
5862 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5865 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
5867 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5868 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
5870 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
5873 /* This function requires the caller holds hdev->lock */
5874 static void restart_le_actions(struct hci_dev *hdev)
5876 struct hci_conn_params *p;
5878 list_for_each_entry(p, &hdev->le_conn_params, list) {
5879 /* Needed for AUTO_OFF case where might not "really"
5880 * have been powered off.
5882 list_del_init(&p->action);
5884 switch (p->auto_connect) {
5885 case HCI_AUTO_CONN_DIRECT:
5886 case HCI_AUTO_CONN_ALWAYS:
5887 list_add(&p->action, &hdev->pend_le_conns);
5889 case HCI_AUTO_CONN_REPORT:
5890 list_add(&p->action, &hdev->pend_le_reports);
5897 hci_update_background_scan(hdev);
5900 static void powered_complete(struct hci_dev *hdev, u8 status)
5902 struct cmd_lookup match = { NULL, hdev };
5904 BT_DBG("status 0x%02x", status);
5908 restart_le_actions(hdev);
5910 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5912 new_settings(hdev, match.sk);
5914 hci_dev_unlock(hdev);
5920 static int powered_update_hci(struct hci_dev *hdev)
5922 struct hci_request req;
5925 hci_req_init(&req, hdev);
5927 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
5928 !lmp_host_ssp_capable(hdev)) {
5931 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
5934 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
5935 lmp_bredr_capable(hdev)) {
5936 struct hci_cp_write_le_host_supported cp;
5941 /* Check first if we already have the right
5942 * host state (host features set)
5944 if (cp.le != lmp_host_le_capable(hdev) ||
5945 cp.simul != lmp_host_le_br_capable(hdev))
5946 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5950 if (lmp_le_capable(hdev)) {
5951 /* Make sure the controller has a good default for
5952 * advertising data. This also applies to the case
5953 * where BR/EDR was toggled during the AUTO_OFF phase.
5955 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5956 update_adv_data(&req);
5957 update_scan_rsp_data(&req);
5960 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5961 enable_advertising(&req);
5964 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5965 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5966 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5967 sizeof(link_sec), &link_sec);
5969 if (lmp_bredr_capable(hdev)) {
5970 write_fast_connectable(&req, false);
5971 hci_update_page_scan(hdev, &req);
5977 return hci_req_run(&req, powered_complete);
5980 int mgmt_powered(struct hci_dev *hdev, u8 powered)
5982 struct cmd_lookup match = { NULL, hdev };
5983 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
5984 u8 zero_cod[] = { 0, 0, 0 };
5987 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
5991 if (powered_update_hci(hdev) == 0)
5994 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
5999 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6000 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
6002 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6003 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6004 zero_cod, sizeof(zero_cod), NULL);
6007 err = new_settings(hdev, match.sk);
6015 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6017 struct pending_cmd *cmd;
6020 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6024 if (err == -ERFKILL)
6025 status = MGMT_STATUS_RFKILLED;
6027 status = MGMT_STATUS_FAILED;
6029 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6031 mgmt_pending_remove(cmd);
6034 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6036 struct hci_request req;
6040 /* When discoverable timeout triggers, then just make sure
6041 * the limited discoverable flag is cleared. Even in the case
6042 * of a timeout triggered from general discoverable, it is
6043 * safe to unconditionally clear the flag.
6045 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
6046 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6048 hci_req_init(&req, hdev);
6049 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
6050 u8 scan = SCAN_PAGE;
6051 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6052 sizeof(scan), &scan);
6055 update_adv_data(&req);
6056 hci_req_run(&req, NULL);
6058 hdev->discov_timeout = 0;
6060 new_settings(hdev, NULL);
6062 hci_dev_unlock(hdev);
6065 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6068 struct mgmt_ev_new_link_key ev;
6070 memset(&ev, 0, sizeof(ev));
6072 ev.store_hint = persistent;
6073 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6074 ev.key.addr.type = BDADDR_BREDR;
6075 ev.key.type = key->type;
6076 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6077 ev.key.pin_len = key->pin_len;
6079 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6082 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6084 switch (ltk->type) {
6087 if (ltk->authenticated)
6088 return MGMT_LTK_AUTHENTICATED;
6089 return MGMT_LTK_UNAUTHENTICATED;
6091 if (ltk->authenticated)
6092 return MGMT_LTK_P256_AUTH;
6093 return MGMT_LTK_P256_UNAUTH;
6094 case SMP_LTK_P256_DEBUG:
6095 return MGMT_LTK_P256_DEBUG;
6098 return MGMT_LTK_UNAUTHENTICATED;
6101 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6103 struct mgmt_ev_new_long_term_key ev;
6105 memset(&ev, 0, sizeof(ev));
6107 /* Devices using resolvable or non-resolvable random addresses
6108 * without providing an indentity resolving key don't require
6109 * to store long term keys. Their addresses will change the
6112 * Only when a remote device provides an identity address
6113 * make sure the long term key is stored. If the remote
6114 * identity is known, the long term keys are internally
6115 * mapped to the identity address. So allow static random
6116 * and public addresses here.
6118 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6119 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6120 ev.store_hint = 0x00;
6122 ev.store_hint = persistent;
6124 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6125 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6126 ev.key.type = mgmt_ltk_type(key);
6127 ev.key.enc_size = key->enc_size;
6128 ev.key.ediv = key->ediv;
6129 ev.key.rand = key->rand;
6131 if (key->type == SMP_LTK)
6134 memcpy(ev.key.val, key->val, sizeof(key->val));
6136 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6139 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6141 struct mgmt_ev_new_irk ev;
6143 memset(&ev, 0, sizeof(ev));
6145 /* For identity resolving keys from devices that are already
6146 * using a public address or static random address, do not
6147 * ask for storing this key. The identity resolving key really
6148 * is only mandatory for devices using resovlable random
6151 * Storing all identity resolving keys has the downside that
6152 * they will be also loaded on next boot of they system. More
6153 * identity resolving keys, means more time during scanning is
6154 * needed to actually resolve these addresses.
6156 if (bacmp(&irk->rpa, BDADDR_ANY))
6157 ev.store_hint = 0x01;
6159 ev.store_hint = 0x00;
6161 bacpy(&ev.rpa, &irk->rpa);
6162 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6163 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6164 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6166 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6169 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6172 struct mgmt_ev_new_csrk ev;
6174 memset(&ev, 0, sizeof(ev));
6176 /* Devices using resolvable or non-resolvable random addresses
6177 * without providing an indentity resolving key don't require
6178 * to store signature resolving keys. Their addresses will change
6179 * the next time around.
6181 * Only when a remote device provides an identity address
6182 * make sure the signature resolving key is stored. So allow
6183 * static random and public addresses here.
6185 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6186 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6187 ev.store_hint = 0x00;
6189 ev.store_hint = persistent;
6191 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6192 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6193 ev.key.master = csrk->master;
6194 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6196 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6199 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6200 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6201 u16 max_interval, u16 latency, u16 timeout)
6203 struct mgmt_ev_new_conn_param ev;
6205 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6208 memset(&ev, 0, sizeof(ev));
6209 bacpy(&ev.addr.bdaddr, bdaddr);
6210 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6211 ev.store_hint = store_hint;
6212 ev.min_interval = cpu_to_le16(min_interval);
6213 ev.max_interval = cpu_to_le16(max_interval);
6214 ev.latency = cpu_to_le16(latency);
6215 ev.timeout = cpu_to_le16(timeout);
6217 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6220 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6223 eir[eir_len++] = sizeof(type) + data_len;
6224 eir[eir_len++] = type;
6225 memcpy(&eir[eir_len], data, data_len);
6226 eir_len += data_len;
6231 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6232 u32 flags, u8 *name, u8 name_len)
6235 struct mgmt_ev_device_connected *ev = (void *) buf;
6238 bacpy(&ev->addr.bdaddr, &conn->dst);
6239 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6241 ev->flags = __cpu_to_le32(flags);
6243 /* We must ensure that the EIR Data fields are ordered and
6244 * unique. Keep it simple for now and avoid the problem by not
6245 * adding any BR/EDR data to the LE adv.
6247 if (conn->le_adv_data_len > 0) {
6248 memcpy(&ev->eir[eir_len],
6249 conn->le_adv_data, conn->le_adv_data_len);
6250 eir_len = conn->le_adv_data_len;
6253 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6256 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6257 eir_len = eir_append_data(ev->eir, eir_len,
6259 conn->dev_class, 3);
6262 ev->eir_len = cpu_to_le16(eir_len);
6264 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6265 sizeof(*ev) + eir_len, NULL);
6268 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6270 struct mgmt_cp_disconnect *cp = cmd->param;
6271 struct sock **sk = data;
6272 struct mgmt_rp_disconnect rp;
6274 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6275 rp.addr.type = cp->addr.type;
6277 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
6283 mgmt_pending_remove(cmd);
6286 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6288 struct hci_dev *hdev = data;
6289 struct mgmt_cp_unpair_device *cp = cmd->param;
6290 struct mgmt_rp_unpair_device rp;
6292 memset(&rp, 0, sizeof(rp));
6293 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6294 rp.addr.type = cp->addr.type;
6296 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6298 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
6300 mgmt_pending_remove(cmd);
6303 bool mgmt_powering_down(struct hci_dev *hdev)
6305 struct pending_cmd *cmd;
6306 struct mgmt_mode *cp;
6308 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6319 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6320 u8 link_type, u8 addr_type, u8 reason,
6321 bool mgmt_connected)
6323 struct mgmt_ev_device_disconnected ev;
6324 struct sock *sk = NULL;
6326 /* The connection is still in hci_conn_hash so test for 1
6327 * instead of 0 to know if this is the last one.
6329 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6330 cancel_delayed_work(&hdev->power_off);
6331 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6334 if (!mgmt_connected)
6337 if (link_type != ACL_LINK && link_type != LE_LINK)
6340 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6342 bacpy(&ev.addr.bdaddr, bdaddr);
6343 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6346 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6351 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6355 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6356 u8 link_type, u8 addr_type, u8 status)
6358 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6359 struct mgmt_cp_disconnect *cp;
6360 struct mgmt_rp_disconnect rp;
6361 struct pending_cmd *cmd;
6363 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6366 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6372 if (bacmp(bdaddr, &cp->addr.bdaddr))
6375 if (cp->addr.type != bdaddr_type)
6378 bacpy(&rp.addr.bdaddr, bdaddr);
6379 rp.addr.type = bdaddr_type;
6381 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
6382 mgmt_status(status), &rp, sizeof(rp));
6384 mgmt_pending_remove(cmd);
6387 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6388 u8 addr_type, u8 status)
6390 struct mgmt_ev_connect_failed ev;
6392 /* The connection is still in hci_conn_hash so test for 1
6393 * instead of 0 to know if this is the last one.
6395 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6396 cancel_delayed_work(&hdev->power_off);
6397 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6400 bacpy(&ev.addr.bdaddr, bdaddr);
6401 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6402 ev.status = mgmt_status(status);
6404 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6407 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6409 struct mgmt_ev_pin_code_request ev;
6411 bacpy(&ev.addr.bdaddr, bdaddr);
6412 ev.addr.type = BDADDR_BREDR;
6415 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6418 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6421 struct pending_cmd *cmd;
6422 struct mgmt_rp_pin_code_reply rp;
6424 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6428 bacpy(&rp.addr.bdaddr, bdaddr);
6429 rp.addr.type = BDADDR_BREDR;
6431 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
6432 mgmt_status(status), &rp, sizeof(rp));
6434 mgmt_pending_remove(cmd);
6437 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6440 struct pending_cmd *cmd;
6441 struct mgmt_rp_pin_code_reply rp;
6443 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6447 bacpy(&rp.addr.bdaddr, bdaddr);
6448 rp.addr.type = BDADDR_BREDR;
6450 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
6451 mgmt_status(status), &rp, sizeof(rp));
6453 mgmt_pending_remove(cmd);
6456 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6457 u8 link_type, u8 addr_type, u32 value,
6460 struct mgmt_ev_user_confirm_request ev;
6462 BT_DBG("%s", hdev->name);
6464 bacpy(&ev.addr.bdaddr, bdaddr);
6465 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6466 ev.confirm_hint = confirm_hint;
6467 ev.value = cpu_to_le32(value);
6469 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6473 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6474 u8 link_type, u8 addr_type)
6476 struct mgmt_ev_user_passkey_request ev;
6478 BT_DBG("%s", hdev->name);
6480 bacpy(&ev.addr.bdaddr, bdaddr);
6481 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6483 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6487 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6488 u8 link_type, u8 addr_type, u8 status,
6491 struct pending_cmd *cmd;
6492 struct mgmt_rp_user_confirm_reply rp;
6495 cmd = mgmt_pending_find(opcode, hdev);
6499 bacpy(&rp.addr.bdaddr, bdaddr);
6500 rp.addr.type = link_to_bdaddr(link_type, addr_type);
6501 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
6504 mgmt_pending_remove(cmd);
6509 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6510 u8 link_type, u8 addr_type, u8 status)
6512 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6513 status, MGMT_OP_USER_CONFIRM_REPLY);
6516 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6517 u8 link_type, u8 addr_type, u8 status)
6519 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6521 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6524 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6525 u8 link_type, u8 addr_type, u8 status)
6527 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6528 status, MGMT_OP_USER_PASSKEY_REPLY);
6531 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6532 u8 link_type, u8 addr_type, u8 status)
6534 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6536 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6539 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6540 u8 link_type, u8 addr_type, u32 passkey,
6543 struct mgmt_ev_passkey_notify ev;
6545 BT_DBG("%s", hdev->name);
6547 bacpy(&ev.addr.bdaddr, bdaddr);
6548 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6549 ev.passkey = __cpu_to_le32(passkey);
6550 ev.entered = entered;
6552 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6555 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
6557 struct mgmt_ev_auth_failed ev;
6558 struct pending_cmd *cmd;
6559 u8 status = mgmt_status(hci_status);
6561 bacpy(&ev.addr.bdaddr, &conn->dst);
6562 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6565 cmd = find_pairing(conn);
6567 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
6568 cmd ? cmd->sk : NULL);
6571 pairing_complete(cmd, status);
6574 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6576 struct cmd_lookup match = { NULL, hdev };
6580 u8 mgmt_err = mgmt_status(status);
6581 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6582 cmd_status_rsp, &mgmt_err);
6586 if (test_bit(HCI_AUTH, &hdev->flags))
6587 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6590 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6593 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6597 new_settings(hdev, match.sk);
6603 static void clear_eir(struct hci_request *req)
6605 struct hci_dev *hdev = req->hdev;
6606 struct hci_cp_write_eir cp;
6608 if (!lmp_ext_inq_capable(hdev))
6611 memset(hdev->eir, 0, sizeof(hdev->eir));
6613 memset(&cp, 0, sizeof(cp));
6615 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6618 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6620 struct cmd_lookup match = { NULL, hdev };
6621 struct hci_request req;
6622 bool changed = false;
6625 u8 mgmt_err = mgmt_status(status);
6627 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6628 &hdev->dev_flags)) {
6629 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6630 new_settings(hdev, NULL);
6633 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6639 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6641 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6643 changed = test_and_clear_bit(HCI_HS_ENABLED,
6646 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6649 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6652 new_settings(hdev, match.sk);
6657 hci_req_init(&req, hdev);
6659 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6660 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6661 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6662 sizeof(enable), &enable);
6668 hci_req_run(&req, NULL);
6671 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6673 struct cmd_lookup match = { NULL, hdev };
6674 bool changed = false;
6677 u8 mgmt_err = mgmt_status(status);
6680 if (test_and_clear_bit(HCI_SC_ENABLED,
6682 new_settings(hdev, NULL);
6683 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6686 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6687 cmd_status_rsp, &mgmt_err);
6692 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6694 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6695 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6698 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6699 settings_rsp, &match);
6702 new_settings(hdev, match.sk);
6708 static void sk_lookup(struct pending_cmd *cmd, void *data)
6710 struct cmd_lookup *match = data;
6712 if (match->sk == NULL) {
6713 match->sk = cmd->sk;
6714 sock_hold(match->sk);
6718 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6721 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6723 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6724 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6725 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6728 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6735 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6737 struct mgmt_cp_set_local_name ev;
6738 struct pending_cmd *cmd;
6743 memset(&ev, 0, sizeof(ev));
6744 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6745 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6747 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6749 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6751 /* If this is a HCI command related to powering on the
6752 * HCI dev don't send any mgmt signals.
6754 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6758 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6759 cmd ? cmd->sk : NULL);
6762 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6763 u8 *rand192, u8 *hash256, u8 *rand256,
6766 struct pending_cmd *cmd;
6768 BT_DBG("%s status %u", hdev->name, status);
6770 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6775 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6776 mgmt_status(status));
6778 if (bredr_sc_enabled(hdev) && hash256 && rand256) {
6779 struct mgmt_rp_read_local_oob_ext_data rp;
6781 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6782 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
6784 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6785 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
6787 cmd_complete(cmd->sk, hdev->id,
6788 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6791 struct mgmt_rp_read_local_oob_data rp;
6793 memcpy(rp.hash, hash192, sizeof(rp.hash));
6794 memcpy(rp.rand, rand192, sizeof(rp.rand));
6796 cmd_complete(cmd->sk, hdev->id,
6797 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6802 mgmt_pending_remove(cmd);
6805 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6806 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
6807 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
6810 struct mgmt_ev_device_found *ev = (void *) buf;
6813 /* Don't send events for a non-kernel initiated discovery. With
6814 * LE one exception is if we have pend_le_reports > 0 in which
6815 * case we're doing passive scanning and want these events.
6817 if (!hci_discovery_active(hdev)) {
6818 if (link_type == ACL_LINK)
6820 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
6824 /* When using service discovery with a RSSI threshold, then check
6825 * if such a RSSI threshold is specified. If a RSSI threshold has
6826 * been specified, then all results with a RSSI smaller than the
6827 * RSSI threshold will be dropped.
6829 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
6830 rssi < hdev->discovery.rssi)
6833 /* Make sure that the buffer is big enough. The 5 extra bytes
6834 * are for the potential CoD field.
6836 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
6839 memset(buf, 0, sizeof(buf));
6841 bacpy(&ev->addr.bdaddr, bdaddr);
6842 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6844 ev->flags = cpu_to_le32(flags);
6847 memcpy(ev->eir, eir, eir_len);
6849 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
6850 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
6853 if (scan_rsp_len > 0)
6854 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
6856 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
6857 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
6859 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
6862 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6863 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
6865 struct mgmt_ev_device_found *ev;
6866 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
6869 ev = (struct mgmt_ev_device_found *) buf;
6871 memset(buf, 0, sizeof(buf));
6873 bacpy(&ev->addr.bdaddr, bdaddr);
6874 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6877 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
6880 ev->eir_len = cpu_to_le16(eir_len);
6882 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
6885 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
6887 struct mgmt_ev_discovering ev;
6889 BT_DBG("%s discovering %u", hdev->name, discovering);
6891 memset(&ev, 0, sizeof(ev));
6892 ev.type = hdev->discovery.type;
6893 ev.discovering = discovering;
6895 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6898 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6900 BT_DBG("%s status %u", hdev->name, status);
6903 void mgmt_reenable_advertising(struct hci_dev *hdev)
6905 struct hci_request req;
6907 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6910 hci_req_init(&req, hdev);
6911 enable_advertising(&req);
6912 hci_req_run(&req, adv_enable_complete);