2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 7
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
87 MGMT_OP_GET_CONN_INFO,
88 MGMT_OP_GET_CLOCK_INFO,
90 MGMT_OP_REMOVE_DEVICE,
91 MGMT_OP_LOAD_CONN_PARAM,
92 MGMT_OP_READ_UNCONF_INDEX_LIST,
93 MGMT_OP_READ_CONFIG_INFO,
94 MGMT_OP_SET_EXTERNAL_CONFIG,
95 MGMT_OP_SET_PUBLIC_ADDRESS,
98 static const u16 mgmt_events[] = {
99 MGMT_EV_CONTROLLER_ERROR,
101 MGMT_EV_INDEX_REMOVED,
102 MGMT_EV_NEW_SETTINGS,
103 MGMT_EV_CLASS_OF_DEV_CHANGED,
104 MGMT_EV_LOCAL_NAME_CHANGED,
105 MGMT_EV_NEW_LINK_KEY,
106 MGMT_EV_NEW_LONG_TERM_KEY,
107 MGMT_EV_DEVICE_CONNECTED,
108 MGMT_EV_DEVICE_DISCONNECTED,
109 MGMT_EV_CONNECT_FAILED,
110 MGMT_EV_PIN_CODE_REQUEST,
111 MGMT_EV_USER_CONFIRM_REQUEST,
112 MGMT_EV_USER_PASSKEY_REQUEST,
114 MGMT_EV_DEVICE_FOUND,
116 MGMT_EV_DEVICE_BLOCKED,
117 MGMT_EV_DEVICE_UNBLOCKED,
118 MGMT_EV_DEVICE_UNPAIRED,
119 MGMT_EV_PASSKEY_NOTIFY,
122 MGMT_EV_DEVICE_ADDED,
123 MGMT_EV_DEVICE_REMOVED,
124 MGMT_EV_NEW_CONN_PARAM,
125 MGMT_EV_UNCONF_INDEX_ADDED,
126 MGMT_EV_UNCONF_INDEX_REMOVED,
127 MGMT_EV_NEW_CONFIG_OPTIONS,
130 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
132 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
133 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
136 struct list_head list;
144 /* HCI to MGMT error code conversion table */
145 static u8 mgmt_status_table[] = {
147 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
148 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
149 MGMT_STATUS_FAILED, /* Hardware Failure */
150 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
151 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
152 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
153 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
154 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
155 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
156 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
157 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
158 MGMT_STATUS_BUSY, /* Command Disallowed */
159 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
160 MGMT_STATUS_REJECTED, /* Rejected Security */
161 MGMT_STATUS_REJECTED, /* Rejected Personal */
162 MGMT_STATUS_TIMEOUT, /* Host Timeout */
163 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
164 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
165 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
166 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
167 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
168 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
169 MGMT_STATUS_BUSY, /* Repeated Attempts */
170 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
171 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
172 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
173 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
174 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
175 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
176 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
177 MGMT_STATUS_FAILED, /* Unspecified Error */
178 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
179 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
180 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
181 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
182 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
183 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
184 MGMT_STATUS_FAILED, /* Unit Link Key Used */
185 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
186 MGMT_STATUS_TIMEOUT, /* Instant Passed */
187 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
188 MGMT_STATUS_FAILED, /* Transaction Collision */
189 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
190 MGMT_STATUS_REJECTED, /* QoS Rejected */
191 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
192 MGMT_STATUS_REJECTED, /* Insufficient Security */
193 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
194 MGMT_STATUS_BUSY, /* Role Switch Pending */
195 MGMT_STATUS_FAILED, /* Slot Violation */
196 MGMT_STATUS_FAILED, /* Role Switch Failed */
197 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
198 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
199 MGMT_STATUS_BUSY, /* Host Busy Pairing */
200 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
201 MGMT_STATUS_BUSY, /* Controller Busy */
202 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
203 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
204 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
205 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
206 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
209 static u8 mgmt_status(u8 hci_status)
211 if (hci_status < ARRAY_SIZE(mgmt_status_table))
212 return mgmt_status_table[hci_status];
214 return MGMT_STATUS_FAILED;
217 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
218 struct sock *skip_sk)
221 struct mgmt_hdr *hdr;
223 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
227 hdr = (void *) skb_put(skb, sizeof(*hdr));
228 hdr->opcode = cpu_to_le16(event);
230 hdr->index = cpu_to_le16(hdev->id);
232 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
233 hdr->len = cpu_to_le16(data_len);
236 memcpy(skb_put(skb, data_len), data, data_len);
239 __net_timestamp(skb);
241 hci_send_to_control(skb, skip_sk);
247 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
250 struct mgmt_hdr *hdr;
251 struct mgmt_ev_cmd_status *ev;
254 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
256 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
260 hdr = (void *) skb_put(skb, sizeof(*hdr));
262 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
263 hdr->index = cpu_to_le16(index);
264 hdr->len = cpu_to_le16(sizeof(*ev));
266 ev = (void *) skb_put(skb, sizeof(*ev));
268 ev->opcode = cpu_to_le16(cmd);
270 err = sock_queue_rcv_skb(sk, skb);
277 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
278 void *rp, size_t rp_len)
281 struct mgmt_hdr *hdr;
282 struct mgmt_ev_cmd_complete *ev;
285 BT_DBG("sock %p", sk);
287 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
291 hdr = (void *) skb_put(skb, sizeof(*hdr));
293 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
294 hdr->index = cpu_to_le16(index);
295 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
297 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
298 ev->opcode = cpu_to_le16(cmd);
302 memcpy(ev->data, rp, rp_len);
304 err = sock_queue_rcv_skb(sk, skb);
311 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
314 struct mgmt_rp_read_version rp;
316 BT_DBG("sock %p", sk);
318 rp.version = MGMT_VERSION;
319 rp.revision = cpu_to_le16(MGMT_REVISION);
321 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
325 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
328 struct mgmt_rp_read_commands *rp;
329 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
330 const u16 num_events = ARRAY_SIZE(mgmt_events);
335 BT_DBG("sock %p", sk);
337 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
339 rp = kmalloc(rp_size, GFP_KERNEL);
343 rp->num_commands = cpu_to_le16(num_commands);
344 rp->num_events = cpu_to_le16(num_events);
346 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
347 put_unaligned_le16(mgmt_commands[i], opcode);
349 for (i = 0; i < num_events; i++, opcode++)
350 put_unaligned_le16(mgmt_events[i], opcode);
352 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
359 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
362 struct mgmt_rp_read_index_list *rp;
368 BT_DBG("sock %p", sk);
370 read_lock(&hci_dev_list_lock);
373 list_for_each_entry(d, &hci_dev_list, list) {
374 if (d->dev_type == HCI_BREDR &&
375 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
379 rp_len = sizeof(*rp) + (2 * count);
380 rp = kmalloc(rp_len, GFP_ATOMIC);
382 read_unlock(&hci_dev_list_lock);
387 list_for_each_entry(d, &hci_dev_list, list) {
388 if (test_bit(HCI_SETUP, &d->dev_flags) ||
389 test_bit(HCI_CONFIG, &d->dev_flags) ||
390 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
393 /* Devices marked as raw-only are neither configured
394 * nor unconfigured controllers.
396 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
399 if (d->dev_type == HCI_BREDR &&
400 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
401 rp->index[count++] = cpu_to_le16(d->id);
402 BT_DBG("Added hci%u", d->id);
406 rp->num_controllers = cpu_to_le16(count);
407 rp_len = sizeof(*rp) + (2 * count);
409 read_unlock(&hci_dev_list_lock);
411 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
419 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
420 void *data, u16 data_len)
422 struct mgmt_rp_read_unconf_index_list *rp;
428 BT_DBG("sock %p", sk);
430 read_lock(&hci_dev_list_lock);
433 list_for_each_entry(d, &hci_dev_list, list) {
434 if (d->dev_type == HCI_BREDR &&
435 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
439 rp_len = sizeof(*rp) + (2 * count);
440 rp = kmalloc(rp_len, GFP_ATOMIC);
442 read_unlock(&hci_dev_list_lock);
447 list_for_each_entry(d, &hci_dev_list, list) {
448 if (test_bit(HCI_SETUP, &d->dev_flags) ||
449 test_bit(HCI_CONFIG, &d->dev_flags) ||
450 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
453 /* Devices marked as raw-only are neither configured
454 * nor unconfigured controllers.
456 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
459 if (d->dev_type == HCI_BREDR &&
460 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
461 rp->index[count++] = cpu_to_le16(d->id);
462 BT_DBG("Added hci%u", d->id);
466 rp->num_controllers = cpu_to_le16(count);
467 rp_len = sizeof(*rp) + (2 * count);
469 read_unlock(&hci_dev_list_lock);
471 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
479 static bool is_configured(struct hci_dev *hdev)
481 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
482 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
485 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
486 !bacmp(&hdev->public_addr, BDADDR_ANY))
492 static __le32 get_missing_options(struct hci_dev *hdev)
496 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
497 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
498 options |= MGMT_OPTION_EXTERNAL_CONFIG;
500 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
501 !bacmp(&hdev->public_addr, BDADDR_ANY))
502 options |= MGMT_OPTION_PUBLIC_ADDRESS;
504 return cpu_to_le32(options);
507 static int new_options(struct hci_dev *hdev, struct sock *skip)
509 __le32 options = get_missing_options(hdev);
511 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
512 sizeof(options), skip);
515 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
517 __le32 options = get_missing_options(hdev);
519 return cmd_complete(sk, hdev->id, opcode, 0, &options,
523 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
524 void *data, u16 data_len)
526 struct mgmt_rp_read_config_info rp;
529 BT_DBG("sock %p %s", sk, hdev->name);
533 memset(&rp, 0, sizeof(rp));
534 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
536 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
537 options |= MGMT_OPTION_EXTERNAL_CONFIG;
539 if (hdev->set_bdaddr)
540 options |= MGMT_OPTION_PUBLIC_ADDRESS;
542 rp.supported_options = cpu_to_le32(options);
543 rp.missing_options = get_missing_options(hdev);
545 hci_dev_unlock(hdev);
547 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
551 static u32 get_supported_settings(struct hci_dev *hdev)
555 settings |= MGMT_SETTING_POWERED;
556 settings |= MGMT_SETTING_PAIRABLE;
557 settings |= MGMT_SETTING_DEBUG_KEYS;
558 settings |= MGMT_SETTING_CONNECTABLE;
559 settings |= MGMT_SETTING_DISCOVERABLE;
561 if (lmp_bredr_capable(hdev)) {
562 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
563 settings |= MGMT_SETTING_FAST_CONNECTABLE;
564 settings |= MGMT_SETTING_BREDR;
565 settings |= MGMT_SETTING_LINK_SECURITY;
567 if (lmp_ssp_capable(hdev)) {
568 settings |= MGMT_SETTING_SSP;
569 settings |= MGMT_SETTING_HS;
572 if (lmp_sc_capable(hdev) ||
573 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
574 settings |= MGMT_SETTING_SECURE_CONN;
577 if (lmp_le_capable(hdev)) {
578 settings |= MGMT_SETTING_LE;
579 settings |= MGMT_SETTING_ADVERTISING;
580 settings |= MGMT_SETTING_PRIVACY;
583 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
585 settings |= MGMT_SETTING_CONFIGURATION;
590 static u32 get_current_settings(struct hci_dev *hdev)
594 if (hdev_is_powered(hdev))
595 settings |= MGMT_SETTING_POWERED;
597 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
598 settings |= MGMT_SETTING_CONNECTABLE;
600 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
601 settings |= MGMT_SETTING_FAST_CONNECTABLE;
603 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
604 settings |= MGMT_SETTING_DISCOVERABLE;
606 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
607 settings |= MGMT_SETTING_PAIRABLE;
609 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
610 settings |= MGMT_SETTING_BREDR;
612 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
613 settings |= MGMT_SETTING_LE;
615 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
616 settings |= MGMT_SETTING_LINK_SECURITY;
618 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
619 settings |= MGMT_SETTING_SSP;
621 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
622 settings |= MGMT_SETTING_HS;
624 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
625 settings |= MGMT_SETTING_ADVERTISING;
627 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
628 settings |= MGMT_SETTING_SECURE_CONN;
630 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
631 settings |= MGMT_SETTING_DEBUG_KEYS;
633 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
634 settings |= MGMT_SETTING_PRIVACY;
639 #define PNP_INFO_SVCLASS_ID 0x1200
641 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
643 u8 *ptr = data, *uuids_start = NULL;
644 struct bt_uuid *uuid;
649 list_for_each_entry(uuid, &hdev->uuids, list) {
652 if (uuid->size != 16)
655 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
659 if (uuid16 == PNP_INFO_SVCLASS_ID)
665 uuids_start[1] = EIR_UUID16_ALL;
669 /* Stop if not enough space to put next UUID */
670 if ((ptr - data) + sizeof(u16) > len) {
671 uuids_start[1] = EIR_UUID16_SOME;
675 *ptr++ = (uuid16 & 0x00ff);
676 *ptr++ = (uuid16 & 0xff00) >> 8;
677 uuids_start[0] += sizeof(uuid16);
683 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
685 u8 *ptr = data, *uuids_start = NULL;
686 struct bt_uuid *uuid;
691 list_for_each_entry(uuid, &hdev->uuids, list) {
692 if (uuid->size != 32)
698 uuids_start[1] = EIR_UUID32_ALL;
702 /* Stop if not enough space to put next UUID */
703 if ((ptr - data) + sizeof(u32) > len) {
704 uuids_start[1] = EIR_UUID32_SOME;
708 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
710 uuids_start[0] += sizeof(u32);
716 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
718 u8 *ptr = data, *uuids_start = NULL;
719 struct bt_uuid *uuid;
724 list_for_each_entry(uuid, &hdev->uuids, list) {
725 if (uuid->size != 128)
731 uuids_start[1] = EIR_UUID128_ALL;
735 /* Stop if not enough space to put next UUID */
736 if ((ptr - data) + 16 > len) {
737 uuids_start[1] = EIR_UUID128_SOME;
741 memcpy(ptr, uuid->uuid, 16);
743 uuids_start[0] += 16;
749 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
751 struct pending_cmd *cmd;
753 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
754 if (cmd->opcode == opcode)
761 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
762 struct hci_dev *hdev,
765 struct pending_cmd *cmd;
767 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
768 if (cmd->user_data != data)
770 if (cmd->opcode == opcode)
777 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
782 name_len = strlen(hdev->dev_name);
784 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
786 if (name_len > max_len) {
788 ptr[1] = EIR_NAME_SHORT;
790 ptr[1] = EIR_NAME_COMPLETE;
792 ptr[0] = name_len + 1;
794 memcpy(ptr + 2, hdev->dev_name, name_len);
796 ad_len += (name_len + 2);
797 ptr += (name_len + 2);
803 static void update_scan_rsp_data(struct hci_request *req)
805 struct hci_dev *hdev = req->hdev;
806 struct hci_cp_le_set_scan_rsp_data cp;
809 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
812 memset(&cp, 0, sizeof(cp));
814 len = create_scan_rsp_data(hdev, cp.data);
816 if (hdev->scan_rsp_data_len == len &&
817 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
820 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
821 hdev->scan_rsp_data_len = len;
825 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
828 static u8 get_adv_discov_flags(struct hci_dev *hdev)
830 struct pending_cmd *cmd;
832 /* If there's a pending mgmt command the flags will not yet have
833 * their final values, so check for this first.
835 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
837 struct mgmt_mode *cp = cmd->param;
839 return LE_AD_GENERAL;
840 else if (cp->val == 0x02)
841 return LE_AD_LIMITED;
843 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
844 return LE_AD_LIMITED;
845 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
846 return LE_AD_GENERAL;
852 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
854 u8 ad_len = 0, flags = 0;
856 flags |= get_adv_discov_flags(hdev);
858 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
859 flags |= LE_AD_NO_BREDR;
862 BT_DBG("adv flags 0x%02x", flags);
872 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
874 ptr[1] = EIR_TX_POWER;
875 ptr[2] = (u8) hdev->adv_tx_power;
884 static void update_adv_data(struct hci_request *req)
886 struct hci_dev *hdev = req->hdev;
887 struct hci_cp_le_set_adv_data cp;
890 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
893 memset(&cp, 0, sizeof(cp));
895 len = create_adv_data(hdev, cp.data);
897 if (hdev->adv_data_len == len &&
898 memcmp(cp.data, hdev->adv_data, len) == 0)
901 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
902 hdev->adv_data_len = len;
906 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
909 int mgmt_update_adv_data(struct hci_dev *hdev)
911 struct hci_request req;
913 hci_req_init(&req, hdev);
914 update_adv_data(&req);
916 return hci_req_run(&req, NULL);
919 static void create_eir(struct hci_dev *hdev, u8 *data)
924 name_len = strlen(hdev->dev_name);
930 ptr[1] = EIR_NAME_SHORT;
932 ptr[1] = EIR_NAME_COMPLETE;
934 /* EIR Data length */
935 ptr[0] = name_len + 1;
937 memcpy(ptr + 2, hdev->dev_name, name_len);
939 ptr += (name_len + 2);
942 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
944 ptr[1] = EIR_TX_POWER;
945 ptr[2] = (u8) hdev->inq_tx_power;
950 if (hdev->devid_source > 0) {
952 ptr[1] = EIR_DEVICE_ID;
954 put_unaligned_le16(hdev->devid_source, ptr + 2);
955 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
956 put_unaligned_le16(hdev->devid_product, ptr + 6);
957 put_unaligned_le16(hdev->devid_version, ptr + 8);
962 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
963 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
964 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
967 static void update_eir(struct hci_request *req)
969 struct hci_dev *hdev = req->hdev;
970 struct hci_cp_write_eir cp;
972 if (!hdev_is_powered(hdev))
975 if (!lmp_ext_inq_capable(hdev))
978 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
981 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
984 memset(&cp, 0, sizeof(cp));
986 create_eir(hdev, cp.data);
988 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
991 memcpy(hdev->eir, cp.data, sizeof(cp.data));
993 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
996 static u8 get_service_classes(struct hci_dev *hdev)
998 struct bt_uuid *uuid;
1001 list_for_each_entry(uuid, &hdev->uuids, list)
1002 val |= uuid->svc_hint;
1007 static void update_class(struct hci_request *req)
1009 struct hci_dev *hdev = req->hdev;
1012 BT_DBG("%s", hdev->name);
1014 if (!hdev_is_powered(hdev))
1017 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1020 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1023 cod[0] = hdev->minor_class;
1024 cod[1] = hdev->major_class;
1025 cod[2] = get_service_classes(hdev);
1027 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1030 if (memcmp(cod, hdev->dev_class, 3) == 0)
1033 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1036 static bool get_connectable(struct hci_dev *hdev)
1038 struct pending_cmd *cmd;
1040 /* If there's a pending mgmt command the flag will not yet have
1041 * it's final value, so check for this first.
1043 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1045 struct mgmt_mode *cp = cmd->param;
1049 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1052 static void disable_advertising(struct hci_request *req)
1056 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1059 static void enable_advertising(struct hci_request *req)
1061 struct hci_dev *hdev = req->hdev;
1062 struct hci_cp_le_set_adv_param cp;
1063 u8 own_addr_type, enable = 0x01;
1066 if (hci_conn_num(hdev, LE_LINK) > 0)
1069 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1070 disable_advertising(req);
1072 /* Clear the HCI_LE_ADV bit temporarily so that the
1073 * hci_update_random_address knows that it's safe to go ahead
1074 * and write a new random address. The flag will be set back on
1075 * as soon as the SET_ADV_ENABLE HCI command completes.
1077 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1079 connectable = get_connectable(hdev);
1081 /* Set require_privacy to true only when non-connectable
1082 * advertising is used. In that case it is fine to use a
1083 * non-resolvable private address.
1085 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1088 memset(&cp, 0, sizeof(cp));
1089 cp.min_interval = cpu_to_le16(0x0800);
1090 cp.max_interval = cpu_to_le16(0x0800);
1091 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1092 cp.own_address_type = own_addr_type;
1093 cp.channel_map = hdev->le_adv_channel_map;
1095 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1097 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1100 static void service_cache_off(struct work_struct *work)
1102 struct hci_dev *hdev = container_of(work, struct hci_dev,
1103 service_cache.work);
1104 struct hci_request req;
1106 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1109 hci_req_init(&req, hdev);
1116 hci_dev_unlock(hdev);
1118 hci_req_run(&req, NULL);
1121 static void rpa_expired(struct work_struct *work)
1123 struct hci_dev *hdev = container_of(work, struct hci_dev,
1125 struct hci_request req;
1129 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1131 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1134 /* The generation of a new RPA and programming it into the
1135 * controller happens in the enable_advertising() function.
1137 hci_req_init(&req, hdev);
1138 enable_advertising(&req);
1139 hci_req_run(&req, NULL);
1142 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1144 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1147 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1148 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1150 /* Non-mgmt controlled devices get this bit set
1151 * implicitly so that pairing works for them, however
1152 * for mgmt we require user-space to explicitly enable
1155 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1158 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1159 void *data, u16 data_len)
1161 struct mgmt_rp_read_info rp;
1163 BT_DBG("sock %p %s", sk, hdev->name);
1167 memset(&rp, 0, sizeof(rp));
1169 bacpy(&rp.bdaddr, &hdev->bdaddr);
1171 rp.version = hdev->hci_ver;
1172 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1174 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1175 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1177 memcpy(rp.dev_class, hdev->dev_class, 3);
1179 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1180 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1182 hci_dev_unlock(hdev);
1184 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1188 static void mgmt_pending_free(struct pending_cmd *cmd)
1195 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1196 struct hci_dev *hdev, void *data,
1199 struct pending_cmd *cmd;
1201 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1205 cmd->opcode = opcode;
1206 cmd->index = hdev->id;
1208 cmd->param = kmalloc(len, GFP_KERNEL);
1215 memcpy(cmd->param, data, len);
1220 list_add(&cmd->list, &hdev->mgmt_pending);
1225 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1226 void (*cb)(struct pending_cmd *cmd,
1230 struct pending_cmd *cmd, *tmp;
1232 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1233 if (opcode > 0 && cmd->opcode != opcode)
1240 static void mgmt_pending_remove(struct pending_cmd *cmd)
1242 list_del(&cmd->list);
1243 mgmt_pending_free(cmd);
1246 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1248 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1250 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1254 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1256 BT_DBG("%s status 0x%02x", hdev->name, status);
1258 if (hci_conn_count(hdev) == 0) {
1259 cancel_delayed_work(&hdev->power_off);
1260 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1264 static bool hci_stop_discovery(struct hci_request *req)
1266 struct hci_dev *hdev = req->hdev;
1267 struct hci_cp_remote_name_req_cancel cp;
1268 struct inquiry_entry *e;
1270 switch (hdev->discovery.state) {
1271 case DISCOVERY_FINDING:
1272 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1273 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1275 cancel_delayed_work(&hdev->le_scan_disable);
1276 hci_req_add_le_scan_disable(req);
1281 case DISCOVERY_RESOLVING:
1282 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1287 bacpy(&cp.bdaddr, &e->data.bdaddr);
1288 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1294 /* Passive scanning */
1295 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1296 hci_req_add_le_scan_disable(req);
1306 static int clean_up_hci_state(struct hci_dev *hdev)
1308 struct hci_request req;
1309 struct hci_conn *conn;
1310 bool discov_stopped;
1313 hci_req_init(&req, hdev);
1315 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1316 test_bit(HCI_PSCAN, &hdev->flags)) {
1318 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1321 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1322 disable_advertising(&req);
1324 discov_stopped = hci_stop_discovery(&req);
1326 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1327 struct hci_cp_disconnect dc;
1328 struct hci_cp_reject_conn_req rej;
1330 switch (conn->state) {
1333 dc.handle = cpu_to_le16(conn->handle);
1334 dc.reason = 0x15; /* Terminated due to Power Off */
1335 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1338 if (conn->type == LE_LINK)
1339 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1341 else if (conn->type == ACL_LINK)
1342 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1346 bacpy(&rej.bdaddr, &conn->dst);
1347 rej.reason = 0x15; /* Terminated due to Power Off */
1348 if (conn->type == ACL_LINK)
1349 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1351 else if (conn->type == SCO_LINK)
1352 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1358 err = hci_req_run(&req, clean_up_hci_complete);
1359 if (!err && discov_stopped)
1360 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1365 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1368 struct mgmt_mode *cp = data;
1369 struct pending_cmd *cmd;
1372 BT_DBG("request for %s", hdev->name);
1374 if (cp->val != 0x00 && cp->val != 0x01)
1375 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1376 MGMT_STATUS_INVALID_PARAMS);
1380 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1381 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1386 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1387 cancel_delayed_work(&hdev->power_off);
1390 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1392 err = mgmt_powered(hdev, 1);
1397 if (!!cp->val == hdev_is_powered(hdev)) {
1398 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1402 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1409 queue_work(hdev->req_workqueue, &hdev->power_on);
1412 /* Disconnect connections, stop scans, etc */
1413 err = clean_up_hci_state(hdev);
1415 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1416 HCI_POWER_OFF_TIMEOUT);
1418 /* ENODATA means there were no HCI commands queued */
1419 if (err == -ENODATA) {
1420 cancel_delayed_work(&hdev->power_off);
1421 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1427 hci_dev_unlock(hdev);
1431 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1435 ev = cpu_to_le32(get_current_settings(hdev));
1437 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1440 int mgmt_new_settings(struct hci_dev *hdev)
1442 return new_settings(hdev, NULL);
1447 struct hci_dev *hdev;
1451 static void settings_rsp(struct pending_cmd *cmd, void *data)
1453 struct cmd_lookup *match = data;
1455 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1457 list_del(&cmd->list);
1459 if (match->sk == NULL) {
1460 match->sk = cmd->sk;
1461 sock_hold(match->sk);
1464 mgmt_pending_free(cmd);
1467 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1471 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1472 mgmt_pending_remove(cmd);
1475 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1477 if (!lmp_bredr_capable(hdev))
1478 return MGMT_STATUS_NOT_SUPPORTED;
1479 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1480 return MGMT_STATUS_REJECTED;
1482 return MGMT_STATUS_SUCCESS;
1485 static u8 mgmt_le_support(struct hci_dev *hdev)
1487 if (!lmp_le_capable(hdev))
1488 return MGMT_STATUS_NOT_SUPPORTED;
1489 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1490 return MGMT_STATUS_REJECTED;
1492 return MGMT_STATUS_SUCCESS;
1495 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1497 struct pending_cmd *cmd;
1498 struct mgmt_mode *cp;
1499 struct hci_request req;
1502 BT_DBG("status 0x%02x", status);
1506 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1511 u8 mgmt_err = mgmt_status(status);
1512 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1513 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1519 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1522 if (hdev->discov_timeout > 0) {
1523 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1524 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1528 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1532 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1535 new_settings(hdev, cmd->sk);
1537 /* When the discoverable mode gets changed, make sure
1538 * that class of device has the limited discoverable
1539 * bit correctly set.
1541 hci_req_init(&req, hdev);
1543 hci_req_run(&req, NULL);
1546 mgmt_pending_remove(cmd);
1549 hci_dev_unlock(hdev);
1552 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1555 struct mgmt_cp_set_discoverable *cp = data;
1556 struct pending_cmd *cmd;
1557 struct hci_request req;
1562 BT_DBG("request for %s", hdev->name);
1564 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1565 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1566 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1567 MGMT_STATUS_REJECTED);
1569 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1570 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1571 MGMT_STATUS_INVALID_PARAMS);
1573 timeout = __le16_to_cpu(cp->timeout);
1575 /* Disabling discoverable requires that no timeout is set,
1576 * and enabling limited discoverable requires a timeout.
1578 if ((cp->val == 0x00 && timeout > 0) ||
1579 (cp->val == 0x02 && timeout == 0))
1580 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1581 MGMT_STATUS_INVALID_PARAMS);
1585 if (!hdev_is_powered(hdev) && timeout > 0) {
1586 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1587 MGMT_STATUS_NOT_POWERED);
1591 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1592 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1593 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1598 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1599 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1600 MGMT_STATUS_REJECTED);
1604 if (!hdev_is_powered(hdev)) {
1605 bool changed = false;
1607 /* Setting limited discoverable when powered off is
1608 * not a valid operation since it requires a timeout
1609 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1611 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1612 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1616 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1621 err = new_settings(hdev, sk);
1626 /* If the current mode is the same, then just update the timeout
1627 * value with the new value. And if only the timeout gets updated,
1628 * then no need for any HCI transactions.
1630 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1631 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1632 &hdev->dev_flags)) {
1633 cancel_delayed_work(&hdev->discov_off);
1634 hdev->discov_timeout = timeout;
1636 if (cp->val && hdev->discov_timeout > 0) {
1637 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1638 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1642 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1646 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1652 /* Cancel any potential discoverable timeout that might be
1653 * still active and store new timeout value. The arming of
1654 * the timeout happens in the complete handler.
1656 cancel_delayed_work(&hdev->discov_off);
1657 hdev->discov_timeout = timeout;
1659 /* Limited discoverable mode */
1660 if (cp->val == 0x02)
1661 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1663 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1665 hci_req_init(&req, hdev);
1667 /* The procedure for LE-only controllers is much simpler - just
1668 * update the advertising data.
1670 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1676 struct hci_cp_write_current_iac_lap hci_cp;
1678 if (cp->val == 0x02) {
1679 /* Limited discoverable mode */
1680 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1681 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1682 hci_cp.iac_lap[1] = 0x8b;
1683 hci_cp.iac_lap[2] = 0x9e;
1684 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1685 hci_cp.iac_lap[4] = 0x8b;
1686 hci_cp.iac_lap[5] = 0x9e;
1688 /* General discoverable mode */
1690 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1691 hci_cp.iac_lap[1] = 0x8b;
1692 hci_cp.iac_lap[2] = 0x9e;
1695 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1696 (hci_cp.num_iac * 3) + 1, &hci_cp);
1698 scan |= SCAN_INQUIRY;
1700 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1703 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1706 update_adv_data(&req);
1708 err = hci_req_run(&req, set_discoverable_complete);
1710 mgmt_pending_remove(cmd);
1713 hci_dev_unlock(hdev);
1717 static void write_fast_connectable(struct hci_request *req, bool enable)
1719 struct hci_dev *hdev = req->hdev;
1720 struct hci_cp_write_page_scan_activity acp;
1723 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1726 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1730 type = PAGE_SCAN_TYPE_INTERLACED;
1732 /* 160 msec page scan interval */
1733 acp.interval = cpu_to_le16(0x0100);
1735 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1737 /* default 1.28 sec page scan */
1738 acp.interval = cpu_to_le16(0x0800);
1741 acp.window = cpu_to_le16(0x0012);
1743 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1744 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1745 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1748 if (hdev->page_scan_type != type)
1749 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1752 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1754 struct pending_cmd *cmd;
1755 struct mgmt_mode *cp;
1756 bool conn_changed, discov_changed;
1758 BT_DBG("status 0x%02x", status);
1762 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1767 u8 mgmt_err = mgmt_status(status);
1768 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1774 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1776 discov_changed = false;
1778 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1780 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1784 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1786 if (conn_changed || discov_changed) {
1787 new_settings(hdev, cmd->sk);
1789 mgmt_update_adv_data(hdev);
1790 hci_update_background_scan(hdev);
1794 mgmt_pending_remove(cmd);
1797 hci_dev_unlock(hdev);
1800 static int set_connectable_update_settings(struct hci_dev *hdev,
1801 struct sock *sk, u8 val)
1803 bool changed = false;
1806 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1810 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1812 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1813 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1816 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1821 hci_update_background_scan(hdev);
1822 return new_settings(hdev, sk);
1828 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1831 struct mgmt_mode *cp = data;
1832 struct pending_cmd *cmd;
1833 struct hci_request req;
1837 BT_DBG("request for %s", hdev->name);
1839 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1840 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1841 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1842 MGMT_STATUS_REJECTED);
1844 if (cp->val != 0x00 && cp->val != 0x01)
1845 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1846 MGMT_STATUS_INVALID_PARAMS);
1850 if (!hdev_is_powered(hdev)) {
1851 err = set_connectable_update_settings(hdev, sk, cp->val);
1855 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1856 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1857 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1862 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1868 hci_req_init(&req, hdev);
1870 /* If BR/EDR is not enabled and we disable advertising as a
1871 * by-product of disabling connectable, we need to update the
1872 * advertising flags.
1874 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1876 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1877 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1879 update_adv_data(&req);
1880 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1886 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1887 hdev->discov_timeout > 0)
1888 cancel_delayed_work(&hdev->discov_off);
1891 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1894 /* If we're going from non-connectable to connectable or
1895 * vice-versa when fast connectable is enabled ensure that fast
1896 * connectable gets disabled. write_fast_connectable won't do
1897 * anything if the page scan parameters are already what they
1900 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1901 write_fast_connectable(&req, false);
1903 /* Update the advertising parameters if necessary */
1904 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1905 enable_advertising(&req);
1907 err = hci_req_run(&req, set_connectable_complete);
1909 mgmt_pending_remove(cmd);
1910 if (err == -ENODATA)
1911 err = set_connectable_update_settings(hdev, sk,
1917 hci_dev_unlock(hdev);
1921 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1924 struct mgmt_mode *cp = data;
1928 BT_DBG("request for %s", hdev->name);
1930 if (cp->val != 0x00 && cp->val != 0x01)
1931 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1932 MGMT_STATUS_INVALID_PARAMS);
1937 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1939 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1941 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1946 err = new_settings(hdev, sk);
1949 hci_dev_unlock(hdev);
1953 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1956 struct mgmt_mode *cp = data;
1957 struct pending_cmd *cmd;
1961 BT_DBG("request for %s", hdev->name);
1963 status = mgmt_bredr_support(hdev);
1965 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1968 if (cp->val != 0x00 && cp->val != 0x01)
1969 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1970 MGMT_STATUS_INVALID_PARAMS);
1974 if (!hdev_is_powered(hdev)) {
1975 bool changed = false;
1977 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1978 &hdev->dev_flags)) {
1979 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1983 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1988 err = new_settings(hdev, sk);
1993 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1994 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2001 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2002 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2006 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2012 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2014 mgmt_pending_remove(cmd);
2019 hci_dev_unlock(hdev);
2023 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2025 struct mgmt_mode *cp = data;
2026 struct pending_cmd *cmd;
2030 BT_DBG("request for %s", hdev->name);
2032 status = mgmt_bredr_support(hdev);
2034 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2036 if (!lmp_ssp_capable(hdev))
2037 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2038 MGMT_STATUS_NOT_SUPPORTED);
2040 if (cp->val != 0x00 && cp->val != 0x01)
2041 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2042 MGMT_STATUS_INVALID_PARAMS);
2046 if (!hdev_is_powered(hdev)) {
2050 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2053 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2056 changed = test_and_clear_bit(HCI_HS_ENABLED,
2059 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2062 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2067 err = new_settings(hdev, sk);
2072 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2073 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2074 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2079 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2080 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2084 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2090 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2091 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2092 sizeof(cp->val), &cp->val);
2094 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2096 mgmt_pending_remove(cmd);
2101 hci_dev_unlock(hdev);
2105 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2107 struct mgmt_mode *cp = data;
2112 BT_DBG("request for %s", hdev->name);
2114 status = mgmt_bredr_support(hdev);
2116 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2118 if (!lmp_ssp_capable(hdev))
2119 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2120 MGMT_STATUS_NOT_SUPPORTED);
2122 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2123 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2124 MGMT_STATUS_REJECTED);
2126 if (cp->val != 0x00 && cp->val != 0x01)
2127 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2128 MGMT_STATUS_INVALID_PARAMS);
2133 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2135 if (hdev_is_powered(hdev)) {
2136 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2137 MGMT_STATUS_REJECTED);
2141 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2144 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2149 err = new_settings(hdev, sk);
2152 hci_dev_unlock(hdev);
2156 static void le_enable_complete(struct hci_dev *hdev, u8 status)
2158 struct cmd_lookup match = { NULL, hdev };
2161 u8 mgmt_err = mgmt_status(status);
2163 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2168 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2170 new_settings(hdev, match.sk);
2175 /* Make sure the controller has a good default for
2176 * advertising data. Restrict the update to when LE
2177 * has actually been enabled. During power on, the
2178 * update in powered_update_hci will take care of it.
2180 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2181 struct hci_request req;
2185 hci_req_init(&req, hdev);
2186 update_adv_data(&req);
2187 update_scan_rsp_data(&req);
2188 hci_req_run(&req, NULL);
2190 hci_update_background_scan(hdev);
2192 hci_dev_unlock(hdev);
2196 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2198 struct mgmt_mode *cp = data;
2199 struct hci_cp_write_le_host_supported hci_cp;
2200 struct pending_cmd *cmd;
2201 struct hci_request req;
2205 BT_DBG("request for %s", hdev->name);
2207 if (!lmp_le_capable(hdev))
2208 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2209 MGMT_STATUS_NOT_SUPPORTED);
2211 if (cp->val != 0x00 && cp->val != 0x01)
2212 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2213 MGMT_STATUS_INVALID_PARAMS);
2215 /* LE-only devices do not allow toggling LE on/off */
2216 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2217 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2218 MGMT_STATUS_REJECTED);
2223 enabled = lmp_host_le_capable(hdev);
2225 if (!hdev_is_powered(hdev) || val == enabled) {
2226 bool changed = false;
2228 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2229 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2233 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2234 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2238 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2243 err = new_settings(hdev, sk);
2248 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2249 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2250 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2255 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2261 hci_req_init(&req, hdev);
2263 memset(&hci_cp, 0, sizeof(hci_cp));
2267 hci_cp.simul = lmp_le_br_capable(hdev);
2269 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2270 disable_advertising(&req);
2273 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2276 err = hci_req_run(&req, le_enable_complete);
2278 mgmt_pending_remove(cmd);
2281 hci_dev_unlock(hdev);
2285 /* This is a helper function to test for pending mgmt commands that can
2286 * cause CoD or EIR HCI commands. We can only allow one such pending
2287 * mgmt command at a time since otherwise we cannot easily track what
2288 * the current values are, will be, and based on that calculate if a new
2289 * HCI command needs to be sent and if yes with what value.
2291 static bool pending_eir_or_class(struct hci_dev *hdev)
2293 struct pending_cmd *cmd;
2295 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2296 switch (cmd->opcode) {
2297 case MGMT_OP_ADD_UUID:
2298 case MGMT_OP_REMOVE_UUID:
2299 case MGMT_OP_SET_DEV_CLASS:
2300 case MGMT_OP_SET_POWERED:
2308 static const u8 bluetooth_base_uuid[] = {
2309 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2310 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2313 static u8 get_uuid_size(const u8 *uuid)
2317 if (memcmp(uuid, bluetooth_base_uuid, 12))
2320 val = get_unaligned_le32(&uuid[12]);
2327 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2329 struct pending_cmd *cmd;
2333 cmd = mgmt_pending_find(mgmt_op, hdev);
2337 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2338 hdev->dev_class, 3);
2340 mgmt_pending_remove(cmd);
2343 hci_dev_unlock(hdev);
2346 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2348 BT_DBG("status 0x%02x", status);
2350 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2353 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2355 struct mgmt_cp_add_uuid *cp = data;
2356 struct pending_cmd *cmd;
2357 struct hci_request req;
2358 struct bt_uuid *uuid;
2361 BT_DBG("request for %s", hdev->name);
2365 if (pending_eir_or_class(hdev)) {
2366 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2371 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2377 memcpy(uuid->uuid, cp->uuid, 16);
2378 uuid->svc_hint = cp->svc_hint;
2379 uuid->size = get_uuid_size(cp->uuid);
2381 list_add_tail(&uuid->list, &hdev->uuids);
2383 hci_req_init(&req, hdev);
2388 err = hci_req_run(&req, add_uuid_complete);
2390 if (err != -ENODATA)
2393 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2394 hdev->dev_class, 3);
2398 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2407 hci_dev_unlock(hdev);
2411 static bool enable_service_cache(struct hci_dev *hdev)
2413 if (!hdev_is_powered(hdev))
2416 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2417 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2425 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2427 BT_DBG("status 0x%02x", status);
2429 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2432 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2435 struct mgmt_cp_remove_uuid *cp = data;
2436 struct pending_cmd *cmd;
2437 struct bt_uuid *match, *tmp;
2438 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2439 struct hci_request req;
2442 BT_DBG("request for %s", hdev->name);
2446 if (pending_eir_or_class(hdev)) {
2447 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2452 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2453 hci_uuids_clear(hdev);
2455 if (enable_service_cache(hdev)) {
2456 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2457 0, hdev->dev_class, 3);
2466 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2467 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2470 list_del(&match->list);
2476 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2477 MGMT_STATUS_INVALID_PARAMS);
2482 hci_req_init(&req, hdev);
2487 err = hci_req_run(&req, remove_uuid_complete);
2489 if (err != -ENODATA)
2492 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2493 hdev->dev_class, 3);
2497 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2506 hci_dev_unlock(hdev);
2510 static void set_class_complete(struct hci_dev *hdev, u8 status)
2512 BT_DBG("status 0x%02x", status);
2514 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2517 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2520 struct mgmt_cp_set_dev_class *cp = data;
2521 struct pending_cmd *cmd;
2522 struct hci_request req;
2525 BT_DBG("request for %s", hdev->name);
2527 if (!lmp_bredr_capable(hdev))
2528 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2529 MGMT_STATUS_NOT_SUPPORTED);
2533 if (pending_eir_or_class(hdev)) {
2534 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2539 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2540 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2541 MGMT_STATUS_INVALID_PARAMS);
2545 hdev->major_class = cp->major;
2546 hdev->minor_class = cp->minor;
2548 if (!hdev_is_powered(hdev)) {
2549 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2550 hdev->dev_class, 3);
2554 hci_req_init(&req, hdev);
2556 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2557 hci_dev_unlock(hdev);
2558 cancel_delayed_work_sync(&hdev->service_cache);
2565 err = hci_req_run(&req, set_class_complete);
2567 if (err != -ENODATA)
2570 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2571 hdev->dev_class, 3);
2575 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2584 hci_dev_unlock(hdev);
2588 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2591 struct mgmt_cp_load_link_keys *cp = data;
2592 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2593 sizeof(struct mgmt_link_key_info));
2594 u16 key_count, expected_len;
2598 BT_DBG("request for %s", hdev->name);
2600 if (!lmp_bredr_capable(hdev))
2601 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2602 MGMT_STATUS_NOT_SUPPORTED);
2604 key_count = __le16_to_cpu(cp->key_count);
2605 if (key_count > max_key_count) {
2606 BT_ERR("load_link_keys: too big key_count value %u",
2608 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2609 MGMT_STATUS_INVALID_PARAMS);
2612 expected_len = sizeof(*cp) + key_count *
2613 sizeof(struct mgmt_link_key_info);
2614 if (expected_len != len) {
2615 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2617 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2618 MGMT_STATUS_INVALID_PARAMS);
2621 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2622 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2623 MGMT_STATUS_INVALID_PARAMS);
2625 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2628 for (i = 0; i < key_count; i++) {
2629 struct mgmt_link_key_info *key = &cp->keys[i];
2631 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2632 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2633 MGMT_STATUS_INVALID_PARAMS);
2638 hci_link_keys_clear(hdev);
2641 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2644 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2648 new_settings(hdev, NULL);
2650 for (i = 0; i < key_count; i++) {
2651 struct mgmt_link_key_info *key = &cp->keys[i];
2653 /* Always ignore debug keys and require a new pairing if
2654 * the user wants to use them.
2656 if (key->type == HCI_LK_DEBUG_COMBINATION)
2659 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2660 key->type, key->pin_len, NULL);
2663 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2665 hci_dev_unlock(hdev);
2670 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2671 u8 addr_type, struct sock *skip_sk)
2673 struct mgmt_ev_device_unpaired ev;
2675 bacpy(&ev.addr.bdaddr, bdaddr);
2676 ev.addr.type = addr_type;
2678 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2682 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2685 struct mgmt_cp_unpair_device *cp = data;
2686 struct mgmt_rp_unpair_device rp;
2687 struct hci_cp_disconnect dc;
2688 struct pending_cmd *cmd;
2689 struct hci_conn *conn;
2692 memset(&rp, 0, sizeof(rp));
2693 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2694 rp.addr.type = cp->addr.type;
2696 if (!bdaddr_type_is_valid(cp->addr.type))
2697 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2698 MGMT_STATUS_INVALID_PARAMS,
2701 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2702 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2703 MGMT_STATUS_INVALID_PARAMS,
2708 if (!hdev_is_powered(hdev)) {
2709 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2710 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2714 if (cp->addr.type == BDADDR_BREDR) {
2715 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2719 if (cp->addr.type == BDADDR_LE_PUBLIC)
2720 addr_type = ADDR_LE_DEV_PUBLIC;
2722 addr_type = ADDR_LE_DEV_RANDOM;
2724 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2726 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2728 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2732 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2733 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2737 if (cp->disconnect) {
2738 if (cp->addr.type == BDADDR_BREDR)
2739 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2742 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2749 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2751 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2755 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2762 dc.handle = cpu_to_le16(conn->handle);
2763 dc.reason = 0x13; /* Remote User Terminated Connection */
2764 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2766 mgmt_pending_remove(cmd);
2769 hci_dev_unlock(hdev);
2773 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2776 struct mgmt_cp_disconnect *cp = data;
2777 struct mgmt_rp_disconnect rp;
2778 struct hci_cp_disconnect dc;
2779 struct pending_cmd *cmd;
2780 struct hci_conn *conn;
2785 memset(&rp, 0, sizeof(rp));
2786 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2787 rp.addr.type = cp->addr.type;
2789 if (!bdaddr_type_is_valid(cp->addr.type))
2790 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2791 MGMT_STATUS_INVALID_PARAMS,
2796 if (!test_bit(HCI_UP, &hdev->flags)) {
2797 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2798 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2802 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2803 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2804 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2808 if (cp->addr.type == BDADDR_BREDR)
2809 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2812 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2814 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2815 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2816 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2820 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2826 dc.handle = cpu_to_le16(conn->handle);
2827 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2829 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2831 mgmt_pending_remove(cmd);
2834 hci_dev_unlock(hdev);
2838 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2840 switch (link_type) {
2842 switch (addr_type) {
2843 case ADDR_LE_DEV_PUBLIC:
2844 return BDADDR_LE_PUBLIC;
2847 /* Fallback to LE Random address type */
2848 return BDADDR_LE_RANDOM;
2852 /* Fallback to BR/EDR type */
2853 return BDADDR_BREDR;
2857 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2860 struct mgmt_rp_get_connections *rp;
2870 if (!hdev_is_powered(hdev)) {
2871 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2872 MGMT_STATUS_NOT_POWERED);
2877 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2878 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2882 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2883 rp = kmalloc(rp_len, GFP_KERNEL);
2890 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2891 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2893 bacpy(&rp->addr[i].bdaddr, &c->dst);
2894 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2895 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2900 rp->conn_count = cpu_to_le16(i);
2902 /* Recalculate length in case of filtered SCO connections, etc */
2903 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2905 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2911 hci_dev_unlock(hdev);
2915 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2916 struct mgmt_cp_pin_code_neg_reply *cp)
2918 struct pending_cmd *cmd;
2921 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2926 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2927 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2929 mgmt_pending_remove(cmd);
2934 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2937 struct hci_conn *conn;
2938 struct mgmt_cp_pin_code_reply *cp = data;
2939 struct hci_cp_pin_code_reply reply;
2940 struct pending_cmd *cmd;
2947 if (!hdev_is_powered(hdev)) {
2948 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2949 MGMT_STATUS_NOT_POWERED);
2953 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2955 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2956 MGMT_STATUS_NOT_CONNECTED);
2960 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2961 struct mgmt_cp_pin_code_neg_reply ncp;
2963 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2965 BT_ERR("PIN code is not 16 bytes long");
2967 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2969 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2970 MGMT_STATUS_INVALID_PARAMS);
2975 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2981 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2982 reply.pin_len = cp->pin_len;
2983 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2985 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2987 mgmt_pending_remove(cmd);
2990 hci_dev_unlock(hdev);
2994 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2997 struct mgmt_cp_set_io_capability *cp = data;
3001 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3002 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3003 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3007 hdev->io_capability = cp->io_capability;
3009 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3010 hdev->io_capability);
3012 hci_dev_unlock(hdev);
3014 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
3018 static struct pending_cmd *find_pairing(struct hci_conn *conn)
3020 struct hci_dev *hdev = conn->hdev;
3021 struct pending_cmd *cmd;
3023 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3024 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3027 if (cmd->user_data != conn)
3036 static void pairing_complete(struct pending_cmd *cmd, u8 status)
3038 struct mgmt_rp_pair_device rp;
3039 struct hci_conn *conn = cmd->user_data;
3041 bacpy(&rp.addr.bdaddr, &conn->dst);
3042 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3044 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3047 /* So we don't get further callbacks for this connection */
3048 conn->connect_cfm_cb = NULL;
3049 conn->security_cfm_cb = NULL;
3050 conn->disconn_cfm_cb = NULL;
3052 hci_conn_drop(conn);
3054 mgmt_pending_remove(cmd);
3057 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3059 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3060 struct pending_cmd *cmd;
3062 cmd = find_pairing(conn);
3064 pairing_complete(cmd, status);
3067 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3069 struct pending_cmd *cmd;
3071 BT_DBG("status %u", status);
3073 cmd = find_pairing(conn);
3075 BT_DBG("Unable to find a pending command");
3077 pairing_complete(cmd, mgmt_status(status));
3080 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3082 struct pending_cmd *cmd;
3084 BT_DBG("status %u", status);
3089 cmd = find_pairing(conn);
3091 BT_DBG("Unable to find a pending command");
3093 pairing_complete(cmd, mgmt_status(status));
3096 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3099 struct mgmt_cp_pair_device *cp = data;
3100 struct mgmt_rp_pair_device rp;
3101 struct pending_cmd *cmd;
3102 u8 sec_level, auth_type;
3103 struct hci_conn *conn;
3108 memset(&rp, 0, sizeof(rp));
3109 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3110 rp.addr.type = cp->addr.type;
3112 if (!bdaddr_type_is_valid(cp->addr.type))
3113 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3114 MGMT_STATUS_INVALID_PARAMS,
3117 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3118 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3119 MGMT_STATUS_INVALID_PARAMS,
3124 if (!hdev_is_powered(hdev)) {
3125 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3126 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3130 sec_level = BT_SECURITY_MEDIUM;
3131 auth_type = HCI_AT_DEDICATED_BONDING;
3133 if (cp->addr.type == BDADDR_BREDR) {
3134 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3139 /* Convert from L2CAP channel address type to HCI address type
3141 if (cp->addr.type == BDADDR_LE_PUBLIC)
3142 addr_type = ADDR_LE_DEV_PUBLIC;
3144 addr_type = ADDR_LE_DEV_RANDOM;
3146 /* When pairing a new device, it is expected to remember
3147 * this device for future connections. Adding the connection
3148 * parameter information ahead of time allows tracking
3149 * of the slave preferred values and will speed up any
3150 * further connection establishment.
3152 * If connection parameters already exist, then they
3153 * will be kept and this function does nothing.
3155 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3157 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3158 sec_level, HCI_LE_CONN_TIMEOUT,
3165 if (PTR_ERR(conn) == -EBUSY)
3166 status = MGMT_STATUS_BUSY;
3168 status = MGMT_STATUS_CONNECT_FAILED;
3170 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3176 if (conn->connect_cfm_cb) {
3177 hci_conn_drop(conn);
3178 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3179 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3183 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3186 hci_conn_drop(conn);
3190 /* For LE, just connecting isn't a proof that the pairing finished */
3191 if (cp->addr.type == BDADDR_BREDR) {
3192 conn->connect_cfm_cb = pairing_complete_cb;
3193 conn->security_cfm_cb = pairing_complete_cb;
3194 conn->disconn_cfm_cb = pairing_complete_cb;
3196 conn->connect_cfm_cb = le_pairing_complete_cb;
3197 conn->security_cfm_cb = le_pairing_complete_cb;
3198 conn->disconn_cfm_cb = le_pairing_complete_cb;
3201 conn->io_capability = cp->io_cap;
3202 cmd->user_data = conn;
3204 if (conn->state == BT_CONNECTED &&
3205 hci_conn_security(conn, sec_level, auth_type))
3206 pairing_complete(cmd, 0);
3211 hci_dev_unlock(hdev);
3215 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3218 struct mgmt_addr_info *addr = data;
3219 struct pending_cmd *cmd;
3220 struct hci_conn *conn;
3227 if (!hdev_is_powered(hdev)) {
3228 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3229 MGMT_STATUS_NOT_POWERED);
3233 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3235 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3236 MGMT_STATUS_INVALID_PARAMS);
3240 conn = cmd->user_data;
3242 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3243 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3244 MGMT_STATUS_INVALID_PARAMS);
3248 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
3250 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3251 addr, sizeof(*addr));
3253 hci_dev_unlock(hdev);
3257 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3258 struct mgmt_addr_info *addr, u16 mgmt_op,
3259 u16 hci_op, __le32 passkey)
3261 struct pending_cmd *cmd;
3262 struct hci_conn *conn;
3267 if (!hdev_is_powered(hdev)) {
3268 err = cmd_complete(sk, hdev->id, mgmt_op,
3269 MGMT_STATUS_NOT_POWERED, addr,
3274 if (addr->type == BDADDR_BREDR)
3275 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3277 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3280 err = cmd_complete(sk, hdev->id, mgmt_op,
3281 MGMT_STATUS_NOT_CONNECTED, addr,
3286 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3287 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3289 err = cmd_complete(sk, hdev->id, mgmt_op,
3290 MGMT_STATUS_SUCCESS, addr,
3293 err = cmd_complete(sk, hdev->id, mgmt_op,
3294 MGMT_STATUS_FAILED, addr,
3300 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3306 /* Continue with pairing via HCI */
3307 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3308 struct hci_cp_user_passkey_reply cp;
3310 bacpy(&cp.bdaddr, &addr->bdaddr);
3311 cp.passkey = passkey;
3312 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3314 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3318 mgmt_pending_remove(cmd);
3321 hci_dev_unlock(hdev);
3325 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3326 void *data, u16 len)
3328 struct mgmt_cp_pin_code_neg_reply *cp = data;
3332 return user_pairing_resp(sk, hdev, &cp->addr,
3333 MGMT_OP_PIN_CODE_NEG_REPLY,
3334 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3337 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3340 struct mgmt_cp_user_confirm_reply *cp = data;
3344 if (len != sizeof(*cp))
3345 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3346 MGMT_STATUS_INVALID_PARAMS);
3348 return user_pairing_resp(sk, hdev, &cp->addr,
3349 MGMT_OP_USER_CONFIRM_REPLY,
3350 HCI_OP_USER_CONFIRM_REPLY, 0);
3353 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3354 void *data, u16 len)
3356 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3360 return user_pairing_resp(sk, hdev, &cp->addr,
3361 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3362 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3365 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3368 struct mgmt_cp_user_passkey_reply *cp = data;
3372 return user_pairing_resp(sk, hdev, &cp->addr,
3373 MGMT_OP_USER_PASSKEY_REPLY,
3374 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3377 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3378 void *data, u16 len)
3380 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3384 return user_pairing_resp(sk, hdev, &cp->addr,
3385 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3386 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3389 static void update_name(struct hci_request *req)
3391 struct hci_dev *hdev = req->hdev;
3392 struct hci_cp_write_local_name cp;
3394 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3396 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3399 static void set_name_complete(struct hci_dev *hdev, u8 status)
3401 struct mgmt_cp_set_local_name *cp;
3402 struct pending_cmd *cmd;
3404 BT_DBG("status 0x%02x", status);
3408 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3415 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3416 mgmt_status(status));
3418 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3421 mgmt_pending_remove(cmd);
3424 hci_dev_unlock(hdev);
3427 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3430 struct mgmt_cp_set_local_name *cp = data;
3431 struct pending_cmd *cmd;
3432 struct hci_request req;
3439 /* If the old values are the same as the new ones just return a
3440 * direct command complete event.
3442 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3443 !memcmp(hdev->short_name, cp->short_name,
3444 sizeof(hdev->short_name))) {
3445 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3450 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3452 if (!hdev_is_powered(hdev)) {
3453 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3455 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3460 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3466 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3472 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3474 hci_req_init(&req, hdev);
3476 if (lmp_bredr_capable(hdev)) {
3481 /* The name is stored in the scan response data and so
3482 * no need to udpate the advertising data here.
3484 if (lmp_le_capable(hdev))
3485 update_scan_rsp_data(&req);
3487 err = hci_req_run(&req, set_name_complete);
3489 mgmt_pending_remove(cmd);
3492 hci_dev_unlock(hdev);
3496 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3497 void *data, u16 data_len)
3499 struct pending_cmd *cmd;
3502 BT_DBG("%s", hdev->name);
3506 if (!hdev_is_powered(hdev)) {
3507 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3508 MGMT_STATUS_NOT_POWERED);
3512 if (!lmp_ssp_capable(hdev)) {
3513 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3514 MGMT_STATUS_NOT_SUPPORTED);
3518 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3519 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3524 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3530 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3531 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3534 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3537 mgmt_pending_remove(cmd);
3540 hci_dev_unlock(hdev);
3544 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3545 void *data, u16 len)
3549 BT_DBG("%s ", hdev->name);
3553 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3554 struct mgmt_cp_add_remote_oob_data *cp = data;
3557 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3558 cp->hash, cp->randomizer);
3560 status = MGMT_STATUS_FAILED;
3562 status = MGMT_STATUS_SUCCESS;
3564 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3565 status, &cp->addr, sizeof(cp->addr));
3566 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3567 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3570 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3576 status = MGMT_STATUS_FAILED;
3578 status = MGMT_STATUS_SUCCESS;
3580 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3581 status, &cp->addr, sizeof(cp->addr));
3583 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3584 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3585 MGMT_STATUS_INVALID_PARAMS);
3588 hci_dev_unlock(hdev);
3592 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3593 void *data, u16 len)
3595 struct mgmt_cp_remove_remote_oob_data *cp = data;
3599 BT_DBG("%s", hdev->name);
3603 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3605 status = MGMT_STATUS_INVALID_PARAMS;
3607 status = MGMT_STATUS_SUCCESS;
3609 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3610 status, &cp->addr, sizeof(cp->addr));
3612 hci_dev_unlock(hdev);
3616 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3618 struct pending_cmd *cmd;
3622 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3624 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3628 type = hdev->discovery.type;
3630 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3631 &type, sizeof(type));
3632 mgmt_pending_remove(cmd);
3637 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3639 unsigned long timeout = 0;
3641 BT_DBG("status %d", status);
3645 mgmt_start_discovery_failed(hdev, status);
3646 hci_dev_unlock(hdev);
3651 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3652 hci_dev_unlock(hdev);
3654 switch (hdev->discovery.type) {
3655 case DISCOV_TYPE_LE:
3656 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3659 case DISCOV_TYPE_INTERLEAVED:
3660 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3663 case DISCOV_TYPE_BREDR:
3667 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3673 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3676 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3677 void *data, u16 len)
3679 struct mgmt_cp_start_discovery *cp = data;
3680 struct pending_cmd *cmd;
3681 struct hci_cp_le_set_scan_param param_cp;
3682 struct hci_cp_le_set_scan_enable enable_cp;
3683 struct hci_cp_inquiry inq_cp;
3684 struct hci_request req;
3685 /* General inquiry access code (GIAC) */
3686 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3687 u8 status, own_addr_type;
3690 BT_DBG("%s", hdev->name);
3694 if (!hdev_is_powered(hdev)) {
3695 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3696 MGMT_STATUS_NOT_POWERED);
3700 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3701 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3706 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3707 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3712 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3718 hdev->discovery.type = cp->type;
3720 hci_req_init(&req, hdev);
3722 switch (hdev->discovery.type) {
3723 case DISCOV_TYPE_BREDR:
3724 status = mgmt_bredr_support(hdev);
3726 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3728 mgmt_pending_remove(cmd);
3732 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3733 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3735 mgmt_pending_remove(cmd);
3739 hci_inquiry_cache_flush(hdev);
3741 memset(&inq_cp, 0, sizeof(inq_cp));
3742 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3743 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3744 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3747 case DISCOV_TYPE_LE:
3748 case DISCOV_TYPE_INTERLEAVED:
3749 status = mgmt_le_support(hdev);
3751 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3753 mgmt_pending_remove(cmd);
3757 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3758 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3759 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3760 MGMT_STATUS_NOT_SUPPORTED);
3761 mgmt_pending_remove(cmd);
3765 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3766 /* Don't let discovery abort an outgoing
3767 * connection attempt that's using directed
3770 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3772 err = cmd_status(sk, hdev->id,
3773 MGMT_OP_START_DISCOVERY,
3774 MGMT_STATUS_REJECTED);
3775 mgmt_pending_remove(cmd);
3779 disable_advertising(&req);
3782 /* If controller is scanning, it means the background scanning
3783 * is running. Thus, we should temporarily stop it in order to
3784 * set the discovery scanning parameters.
3786 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3787 hci_req_add_le_scan_disable(&req);
3789 memset(¶m_cp, 0, sizeof(param_cp));
3791 /* All active scans will be done with either a resolvable
3792 * private address (when privacy feature has been enabled)
3793 * or unresolvable private address.
3795 err = hci_update_random_address(&req, true, &own_addr_type);
3797 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3798 MGMT_STATUS_FAILED);
3799 mgmt_pending_remove(cmd);
3803 param_cp.type = LE_SCAN_ACTIVE;
3804 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3805 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3806 param_cp.own_address_type = own_addr_type;
3807 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3810 memset(&enable_cp, 0, sizeof(enable_cp));
3811 enable_cp.enable = LE_SCAN_ENABLE;
3812 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3813 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3818 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3819 MGMT_STATUS_INVALID_PARAMS);
3820 mgmt_pending_remove(cmd);
3824 err = hci_req_run(&req, start_discovery_complete);
3826 mgmt_pending_remove(cmd);
3828 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3831 hci_dev_unlock(hdev);
3835 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3837 struct pending_cmd *cmd;
3840 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3844 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3845 &hdev->discovery.type, sizeof(hdev->discovery.type));
3846 mgmt_pending_remove(cmd);
3851 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3853 BT_DBG("status %d", status);
3858 mgmt_stop_discovery_failed(hdev, status);
3862 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3865 hci_dev_unlock(hdev);
3868 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3871 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3872 struct pending_cmd *cmd;
3873 struct hci_request req;
3876 BT_DBG("%s", hdev->name);
3880 if (!hci_discovery_active(hdev)) {
3881 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3882 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3883 sizeof(mgmt_cp->type));
3887 if (hdev->discovery.type != mgmt_cp->type) {
3888 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3889 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3890 sizeof(mgmt_cp->type));
3894 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3900 hci_req_init(&req, hdev);
3902 hci_stop_discovery(&req);
3904 err = hci_req_run(&req, stop_discovery_complete);
3906 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3910 mgmt_pending_remove(cmd);
3912 /* If no HCI commands were sent we're done */
3913 if (err == -ENODATA) {
3914 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3915 &mgmt_cp->type, sizeof(mgmt_cp->type));
3916 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3920 hci_dev_unlock(hdev);
3924 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3927 struct mgmt_cp_confirm_name *cp = data;
3928 struct inquiry_entry *e;
3931 BT_DBG("%s", hdev->name);
3935 if (!hci_discovery_active(hdev)) {
3936 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3937 MGMT_STATUS_FAILED, &cp->addr,
3942 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3944 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3945 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3950 if (cp->name_known) {
3951 e->name_state = NAME_KNOWN;
3954 e->name_state = NAME_NEEDED;
3955 hci_inquiry_cache_update_resolve(hdev, e);
3958 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3962 hci_dev_unlock(hdev);
3966 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3969 struct mgmt_cp_block_device *cp = data;
3973 BT_DBG("%s", hdev->name);
3975 if (!bdaddr_type_is_valid(cp->addr.type))
3976 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3977 MGMT_STATUS_INVALID_PARAMS,
3978 &cp->addr, sizeof(cp->addr));
3982 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
3985 status = MGMT_STATUS_FAILED;
3989 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3991 status = MGMT_STATUS_SUCCESS;
3994 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3995 &cp->addr, sizeof(cp->addr));
3997 hci_dev_unlock(hdev);
4002 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4005 struct mgmt_cp_unblock_device *cp = data;
4009 BT_DBG("%s", hdev->name);
4011 if (!bdaddr_type_is_valid(cp->addr.type))
4012 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4013 MGMT_STATUS_INVALID_PARAMS,
4014 &cp->addr, sizeof(cp->addr));
4018 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4021 status = MGMT_STATUS_INVALID_PARAMS;
4025 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4027 status = MGMT_STATUS_SUCCESS;
4030 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4031 &cp->addr, sizeof(cp->addr));
4033 hci_dev_unlock(hdev);
4038 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4041 struct mgmt_cp_set_device_id *cp = data;
4042 struct hci_request req;
4046 BT_DBG("%s", hdev->name);
4048 source = __le16_to_cpu(cp->source);
4050 if (source > 0x0002)
4051 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4052 MGMT_STATUS_INVALID_PARAMS);
4056 hdev->devid_source = source;
4057 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4058 hdev->devid_product = __le16_to_cpu(cp->product);
4059 hdev->devid_version = __le16_to_cpu(cp->version);
4061 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4063 hci_req_init(&req, hdev);
4065 hci_req_run(&req, NULL);
4067 hci_dev_unlock(hdev);
4072 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
4074 struct cmd_lookup match = { NULL, hdev };
4077 u8 mgmt_err = mgmt_status(status);
4079 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4080 cmd_status_rsp, &mgmt_err);
4084 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4085 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4087 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4089 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4092 new_settings(hdev, match.sk);
4098 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4101 struct mgmt_mode *cp = data;
4102 struct pending_cmd *cmd;
4103 struct hci_request req;
4104 u8 val, enabled, status;
4107 BT_DBG("request for %s", hdev->name);
4109 status = mgmt_le_support(hdev);
4111 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4114 if (cp->val != 0x00 && cp->val != 0x01)
4115 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4116 MGMT_STATUS_INVALID_PARAMS);
4121 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4123 /* The following conditions are ones which mean that we should
4124 * not do any HCI communication but directly send a mgmt
4125 * response to user space (after toggling the flag if
4128 if (!hdev_is_powered(hdev) || val == enabled ||
4129 hci_conn_num(hdev, LE_LINK) > 0 ||
4130 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4131 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4132 bool changed = false;
4134 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4135 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4139 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4144 err = new_settings(hdev, sk);
4149 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4150 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4151 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4156 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4162 hci_req_init(&req, hdev);
4165 enable_advertising(&req);
4167 disable_advertising(&req);
4169 err = hci_req_run(&req, set_advertising_complete);
4171 mgmt_pending_remove(cmd);
4174 hci_dev_unlock(hdev);
4178 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4179 void *data, u16 len)
4181 struct mgmt_cp_set_static_address *cp = data;
4184 BT_DBG("%s", hdev->name);
4186 if (!lmp_le_capable(hdev))
4187 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4188 MGMT_STATUS_NOT_SUPPORTED);
4190 if (hdev_is_powered(hdev))
4191 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4192 MGMT_STATUS_REJECTED);
4194 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4195 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4196 return cmd_status(sk, hdev->id,
4197 MGMT_OP_SET_STATIC_ADDRESS,
4198 MGMT_STATUS_INVALID_PARAMS);
4200 /* Two most significant bits shall be set */
4201 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4202 return cmd_status(sk, hdev->id,
4203 MGMT_OP_SET_STATIC_ADDRESS,
4204 MGMT_STATUS_INVALID_PARAMS);
4209 bacpy(&hdev->static_addr, &cp->bdaddr);
4211 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4213 hci_dev_unlock(hdev);
4218 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4219 void *data, u16 len)
4221 struct mgmt_cp_set_scan_params *cp = data;
4222 __u16 interval, window;
4225 BT_DBG("%s", hdev->name);
4227 if (!lmp_le_capable(hdev))
4228 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4229 MGMT_STATUS_NOT_SUPPORTED);
4231 interval = __le16_to_cpu(cp->interval);
4233 if (interval < 0x0004 || interval > 0x4000)
4234 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4235 MGMT_STATUS_INVALID_PARAMS);
4237 window = __le16_to_cpu(cp->window);
4239 if (window < 0x0004 || window > 0x4000)
4240 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4241 MGMT_STATUS_INVALID_PARAMS);
4243 if (window > interval)
4244 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4245 MGMT_STATUS_INVALID_PARAMS);
4249 hdev->le_scan_interval = interval;
4250 hdev->le_scan_window = window;
4252 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4254 /* If background scan is running, restart it so new parameters are
4257 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4258 hdev->discovery.state == DISCOVERY_STOPPED) {
4259 struct hci_request req;
4261 hci_req_init(&req, hdev);
4263 hci_req_add_le_scan_disable(&req);
4264 hci_req_add_le_passive_scan(&req);
4266 hci_req_run(&req, NULL);
4269 hci_dev_unlock(hdev);
4274 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4276 struct pending_cmd *cmd;
4278 BT_DBG("status 0x%02x", status);
4282 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4287 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4288 mgmt_status(status));
4290 struct mgmt_mode *cp = cmd->param;
4293 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4295 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4297 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4298 new_settings(hdev, cmd->sk);
4301 mgmt_pending_remove(cmd);
4304 hci_dev_unlock(hdev);
4307 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4308 void *data, u16 len)
4310 struct mgmt_mode *cp = data;
4311 struct pending_cmd *cmd;
4312 struct hci_request req;
4315 BT_DBG("%s", hdev->name);
4317 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4318 hdev->hci_ver < BLUETOOTH_VER_1_2)
4319 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4320 MGMT_STATUS_NOT_SUPPORTED);
4322 if (cp->val != 0x00 && cp->val != 0x01)
4323 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4324 MGMT_STATUS_INVALID_PARAMS);
4326 if (!hdev_is_powered(hdev))
4327 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4328 MGMT_STATUS_NOT_POWERED);
4330 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4331 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4332 MGMT_STATUS_REJECTED);
4336 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4337 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4342 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4343 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4348 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4355 hci_req_init(&req, hdev);
4357 write_fast_connectable(&req, cp->val);
4359 err = hci_req_run(&req, fast_connectable_complete);
4361 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4362 MGMT_STATUS_FAILED);
4363 mgmt_pending_remove(cmd);
4367 hci_dev_unlock(hdev);
4372 static void set_bredr_scan(struct hci_request *req)
4374 struct hci_dev *hdev = req->hdev;
4377 /* Ensure that fast connectable is disabled. This function will
4378 * not do anything if the page scan parameters are already what
4381 write_fast_connectable(req, false);
4383 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
4384 !list_empty(&hdev->whitelist))
4386 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4387 scan |= SCAN_INQUIRY;
4390 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4393 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4395 struct pending_cmd *cmd;
4397 BT_DBG("status 0x%02x", status);
4401 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4406 u8 mgmt_err = mgmt_status(status);
4408 /* We need to restore the flag if related HCI commands
4411 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4413 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4415 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4416 new_settings(hdev, cmd->sk);
4419 mgmt_pending_remove(cmd);
4422 hci_dev_unlock(hdev);
4425 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4427 struct mgmt_mode *cp = data;
4428 struct pending_cmd *cmd;
4429 struct hci_request req;
4432 BT_DBG("request for %s", hdev->name);
4434 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4435 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4436 MGMT_STATUS_NOT_SUPPORTED);
4438 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4439 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4440 MGMT_STATUS_REJECTED);
4442 if (cp->val != 0x00 && cp->val != 0x01)
4443 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4444 MGMT_STATUS_INVALID_PARAMS);
4448 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4449 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4453 if (!hdev_is_powered(hdev)) {
4455 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4456 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4457 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4458 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4459 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4462 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4464 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4468 err = new_settings(hdev, sk);
4472 /* Reject disabling when powered on */
4474 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4475 MGMT_STATUS_REJECTED);
4479 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4480 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4485 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4491 /* We need to flip the bit already here so that update_adv_data
4492 * generates the correct flags.
4494 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4496 hci_req_init(&req, hdev);
4498 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
4499 !list_empty(&hdev->whitelist))
4500 set_bredr_scan(&req);
4502 /* Since only the advertising data flags will change, there
4503 * is no need to update the scan response data.
4505 update_adv_data(&req);
4507 err = hci_req_run(&req, set_bredr_complete);
4509 mgmt_pending_remove(cmd);
4512 hci_dev_unlock(hdev);
4516 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4517 void *data, u16 len)
4519 struct mgmt_mode *cp = data;
4520 struct pending_cmd *cmd;
4524 BT_DBG("request for %s", hdev->name);
4526 status = mgmt_bredr_support(hdev);
4528 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4531 if (!lmp_sc_capable(hdev) &&
4532 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4533 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4534 MGMT_STATUS_NOT_SUPPORTED);
4536 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4537 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4538 MGMT_STATUS_INVALID_PARAMS);
4542 if (!hdev_is_powered(hdev)) {
4546 changed = !test_and_set_bit(HCI_SC_ENABLED,
4548 if (cp->val == 0x02)
4549 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4551 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4553 changed = test_and_clear_bit(HCI_SC_ENABLED,
4555 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4558 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4563 err = new_settings(hdev, sk);
4568 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4569 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4576 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4577 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4578 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4582 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4588 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4590 mgmt_pending_remove(cmd);
4594 if (cp->val == 0x02)
4595 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4597 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4600 hci_dev_unlock(hdev);
4604 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4605 void *data, u16 len)
4607 struct mgmt_mode *cp = data;
4608 bool changed, use_changed;
4611 BT_DBG("request for %s", hdev->name);
4613 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4614 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4615 MGMT_STATUS_INVALID_PARAMS);
4620 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4623 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4626 if (cp->val == 0x02)
4627 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4630 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4633 if (hdev_is_powered(hdev) && use_changed &&
4634 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4635 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4636 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4637 sizeof(mode), &mode);
4640 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4645 err = new_settings(hdev, sk);
4648 hci_dev_unlock(hdev);
4652 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4655 struct mgmt_cp_set_privacy *cp = cp_data;
4659 BT_DBG("request for %s", hdev->name);
4661 if (!lmp_le_capable(hdev))
4662 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4663 MGMT_STATUS_NOT_SUPPORTED);
4665 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4666 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4667 MGMT_STATUS_INVALID_PARAMS);
4669 if (hdev_is_powered(hdev))
4670 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4671 MGMT_STATUS_REJECTED);
4675 /* If user space supports this command it is also expected to
4676 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4678 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4681 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4682 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4683 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4685 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4686 memset(hdev->irk, 0, sizeof(hdev->irk));
4687 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4690 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4695 err = new_settings(hdev, sk);
4698 hci_dev_unlock(hdev);
4702 static bool irk_is_valid(struct mgmt_irk_info *irk)
4704 switch (irk->addr.type) {
4705 case BDADDR_LE_PUBLIC:
4708 case BDADDR_LE_RANDOM:
4709 /* Two most significant bits shall be set */
4710 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4718 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4721 struct mgmt_cp_load_irks *cp = cp_data;
4722 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4723 sizeof(struct mgmt_irk_info));
4724 u16 irk_count, expected_len;
4727 BT_DBG("request for %s", hdev->name);
4729 if (!lmp_le_capable(hdev))
4730 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4731 MGMT_STATUS_NOT_SUPPORTED);
4733 irk_count = __le16_to_cpu(cp->irk_count);
4734 if (irk_count > max_irk_count) {
4735 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4736 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4737 MGMT_STATUS_INVALID_PARAMS);
4740 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4741 if (expected_len != len) {
4742 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4744 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4745 MGMT_STATUS_INVALID_PARAMS);
4748 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4750 for (i = 0; i < irk_count; i++) {
4751 struct mgmt_irk_info *key = &cp->irks[i];
4753 if (!irk_is_valid(key))
4754 return cmd_status(sk, hdev->id,
4756 MGMT_STATUS_INVALID_PARAMS);
4761 hci_smp_irks_clear(hdev);
4763 for (i = 0; i < irk_count; i++) {
4764 struct mgmt_irk_info *irk = &cp->irks[i];
4767 if (irk->addr.type == BDADDR_LE_PUBLIC)
4768 addr_type = ADDR_LE_DEV_PUBLIC;
4770 addr_type = ADDR_LE_DEV_RANDOM;
4772 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4776 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4778 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4780 hci_dev_unlock(hdev);
4785 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4787 if (key->master != 0x00 && key->master != 0x01)
4790 switch (key->addr.type) {
4791 case BDADDR_LE_PUBLIC:
4794 case BDADDR_LE_RANDOM:
4795 /* Two most significant bits shall be set */
4796 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4804 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4805 void *cp_data, u16 len)
4807 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4808 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4809 sizeof(struct mgmt_ltk_info));
4810 u16 key_count, expected_len;
4813 BT_DBG("request for %s", hdev->name);
4815 if (!lmp_le_capable(hdev))
4816 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4817 MGMT_STATUS_NOT_SUPPORTED);
4819 key_count = __le16_to_cpu(cp->key_count);
4820 if (key_count > max_key_count) {
4821 BT_ERR("load_ltks: too big key_count value %u", key_count);
4822 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4823 MGMT_STATUS_INVALID_PARAMS);
4826 expected_len = sizeof(*cp) + key_count *
4827 sizeof(struct mgmt_ltk_info);
4828 if (expected_len != len) {
4829 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4831 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4832 MGMT_STATUS_INVALID_PARAMS);
4835 BT_DBG("%s key_count %u", hdev->name, key_count);
4837 for (i = 0; i < key_count; i++) {
4838 struct mgmt_ltk_info *key = &cp->keys[i];
4840 if (!ltk_is_valid(key))
4841 return cmd_status(sk, hdev->id,
4842 MGMT_OP_LOAD_LONG_TERM_KEYS,
4843 MGMT_STATUS_INVALID_PARAMS);
4848 hci_smp_ltks_clear(hdev);
4850 for (i = 0; i < key_count; i++) {
4851 struct mgmt_ltk_info *key = &cp->keys[i];
4852 u8 type, addr_type, authenticated;
4854 if (key->addr.type == BDADDR_LE_PUBLIC)
4855 addr_type = ADDR_LE_DEV_PUBLIC;
4857 addr_type = ADDR_LE_DEV_RANDOM;
4862 type = SMP_LTK_SLAVE;
4864 switch (key->type) {
4865 case MGMT_LTK_UNAUTHENTICATED:
4866 authenticated = 0x00;
4868 case MGMT_LTK_AUTHENTICATED:
4869 authenticated = 0x01;
4875 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4876 authenticated, key->val, key->enc_size, key->ediv,
4880 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4883 hci_dev_unlock(hdev);
4888 struct cmd_conn_lookup {
4889 struct hci_conn *conn;
4890 bool valid_tx_power;
4894 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4896 struct cmd_conn_lookup *match = data;
4897 struct mgmt_cp_get_conn_info *cp;
4898 struct mgmt_rp_get_conn_info rp;
4899 struct hci_conn *conn = cmd->user_data;
4901 if (conn != match->conn)
4904 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4906 memset(&rp, 0, sizeof(rp));
4907 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4908 rp.addr.type = cp->addr.type;
4910 if (!match->mgmt_status) {
4911 rp.rssi = conn->rssi;
4913 if (match->valid_tx_power) {
4914 rp.tx_power = conn->tx_power;
4915 rp.max_tx_power = conn->max_tx_power;
4917 rp.tx_power = HCI_TX_POWER_INVALID;
4918 rp.max_tx_power = HCI_TX_POWER_INVALID;
4922 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4923 match->mgmt_status, &rp, sizeof(rp));
4925 hci_conn_drop(conn);
4927 mgmt_pending_remove(cmd);
4930 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4932 struct hci_cp_read_rssi *cp;
4933 struct hci_conn *conn;
4934 struct cmd_conn_lookup match;
4937 BT_DBG("status 0x%02x", status);
4941 /* TX power data is valid in case request completed successfully,
4942 * otherwise we assume it's not valid. At the moment we assume that
4943 * either both or none of current and max values are valid to keep code
4946 match.valid_tx_power = !status;
4948 /* Commands sent in request are either Read RSSI or Read Transmit Power
4949 * Level so we check which one was last sent to retrieve connection
4950 * handle. Both commands have handle as first parameter so it's safe to
4951 * cast data on the same command struct.
4953 * First command sent is always Read RSSI and we fail only if it fails.
4954 * In other case we simply override error to indicate success as we
4955 * already remembered if TX power value is actually valid.
4957 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4959 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4964 BT_ERR("invalid sent_cmd in response");
4968 handle = __le16_to_cpu(cp->handle);
4969 conn = hci_conn_hash_lookup_handle(hdev, handle);
4971 BT_ERR("unknown handle (%d) in response", handle);
4976 match.mgmt_status = mgmt_status(status);
4978 /* Cache refresh is complete, now reply for mgmt request for given
4981 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4982 get_conn_info_complete, &match);
4985 hci_dev_unlock(hdev);
4988 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4991 struct mgmt_cp_get_conn_info *cp = data;
4992 struct mgmt_rp_get_conn_info rp;
4993 struct hci_conn *conn;
4994 unsigned long conn_info_age;
4997 BT_DBG("%s", hdev->name);
4999 memset(&rp, 0, sizeof(rp));
5000 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5001 rp.addr.type = cp->addr.type;
5003 if (!bdaddr_type_is_valid(cp->addr.type))
5004 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5005 MGMT_STATUS_INVALID_PARAMS,
5010 if (!hdev_is_powered(hdev)) {
5011 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5012 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5016 if (cp->addr.type == BDADDR_BREDR)
5017 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5020 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5022 if (!conn || conn->state != BT_CONNECTED) {
5023 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5024 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
5028 /* To avoid client trying to guess when to poll again for information we
5029 * calculate conn info age as random value between min/max set in hdev.
5031 conn_info_age = hdev->conn_info_min_age +
5032 prandom_u32_max(hdev->conn_info_max_age -
5033 hdev->conn_info_min_age);
5035 /* Query controller to refresh cached values if they are too old or were
5038 if (time_after(jiffies, conn->conn_info_timestamp +
5039 msecs_to_jiffies(conn_info_age)) ||
5040 !conn->conn_info_timestamp) {
5041 struct hci_request req;
5042 struct hci_cp_read_tx_power req_txp_cp;
5043 struct hci_cp_read_rssi req_rssi_cp;
5044 struct pending_cmd *cmd;
5046 hci_req_init(&req, hdev);
5047 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5048 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5051 /* For LE links TX power does not change thus we don't need to
5052 * query for it once value is known.
5054 if (!bdaddr_type_is_le(cp->addr.type) ||
5055 conn->tx_power == HCI_TX_POWER_INVALID) {
5056 req_txp_cp.handle = cpu_to_le16(conn->handle);
5057 req_txp_cp.type = 0x00;
5058 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5059 sizeof(req_txp_cp), &req_txp_cp);
5062 /* Max TX power needs to be read only once per connection */
5063 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5064 req_txp_cp.handle = cpu_to_le16(conn->handle);
5065 req_txp_cp.type = 0x01;
5066 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5067 sizeof(req_txp_cp), &req_txp_cp);
5070 err = hci_req_run(&req, conn_info_refresh_complete);
5074 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5081 hci_conn_hold(conn);
5082 cmd->user_data = conn;
5084 conn->conn_info_timestamp = jiffies;
5086 /* Cache is valid, just reply with values cached in hci_conn */
5087 rp.rssi = conn->rssi;
5088 rp.tx_power = conn->tx_power;
5089 rp.max_tx_power = conn->max_tx_power;
5091 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5092 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5096 hci_dev_unlock(hdev);
5100 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
5102 struct mgmt_cp_get_clock_info *cp;
5103 struct mgmt_rp_get_clock_info rp;
5104 struct hci_cp_read_clock *hci_cp;
5105 struct pending_cmd *cmd;
5106 struct hci_conn *conn;
5108 BT_DBG("%s status %u", hdev->name, status);
5112 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5116 if (hci_cp->which) {
5117 u16 handle = __le16_to_cpu(hci_cp->handle);
5118 conn = hci_conn_hash_lookup_handle(hdev, handle);
5123 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5129 memset(&rp, 0, sizeof(rp));
5130 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
5135 rp.local_clock = cpu_to_le32(hdev->clock);
5138 rp.piconet_clock = cpu_to_le32(conn->clock);
5139 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5143 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
5145 mgmt_pending_remove(cmd);
5147 hci_conn_drop(conn);
5150 hci_dev_unlock(hdev);
5153 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5156 struct mgmt_cp_get_clock_info *cp = data;
5157 struct mgmt_rp_get_clock_info rp;
5158 struct hci_cp_read_clock hci_cp;
5159 struct pending_cmd *cmd;
5160 struct hci_request req;
5161 struct hci_conn *conn;
5164 BT_DBG("%s", hdev->name);
5166 memset(&rp, 0, sizeof(rp));
5167 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5168 rp.addr.type = cp->addr.type;
5170 if (cp->addr.type != BDADDR_BREDR)
5171 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5172 MGMT_STATUS_INVALID_PARAMS,
5177 if (!hdev_is_powered(hdev)) {
5178 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5179 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5183 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5184 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5186 if (!conn || conn->state != BT_CONNECTED) {
5187 err = cmd_complete(sk, hdev->id,
5188 MGMT_OP_GET_CLOCK_INFO,
5189 MGMT_STATUS_NOT_CONNECTED,
5197 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5203 hci_req_init(&req, hdev);
5205 memset(&hci_cp, 0, sizeof(hci_cp));
5206 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5209 hci_conn_hold(conn);
5210 cmd->user_data = conn;
5212 hci_cp.handle = cpu_to_le16(conn->handle);
5213 hci_cp.which = 0x01; /* Piconet clock */
5214 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5217 err = hci_req_run(&req, get_clock_info_complete);
5219 mgmt_pending_remove(cmd);
5222 hci_dev_unlock(hdev);
5226 /* Helper for Add/Remove Device commands */
5227 static void update_page_scan(struct hci_dev *hdev, u8 scan)
5229 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5232 if (!hdev_is_powered(hdev))
5235 /* If HCI_CONNECTABLE is set then Add/Remove Device should not
5236 * make any changes to page scanning.
5238 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
5241 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5242 scan |= SCAN_INQUIRY;
5244 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5247 static void device_added(struct sock *sk, struct hci_dev *hdev,
5248 bdaddr_t *bdaddr, u8 type, u8 action)
5250 struct mgmt_ev_device_added ev;
5252 bacpy(&ev.addr.bdaddr, bdaddr);
5253 ev.addr.type = type;
5256 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5259 static int add_device(struct sock *sk, struct hci_dev *hdev,
5260 void *data, u16 len)
5262 struct mgmt_cp_add_device *cp = data;
5263 u8 auto_conn, addr_type;
5266 BT_DBG("%s", hdev->name);
5268 if (!bdaddr_type_is_valid(cp->addr.type) ||
5269 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5270 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5271 MGMT_STATUS_INVALID_PARAMS,
5272 &cp->addr, sizeof(cp->addr));
5274 if (cp->action != 0x00 && cp->action != 0x01)
5275 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5276 MGMT_STATUS_INVALID_PARAMS,
5277 &cp->addr, sizeof(cp->addr));
5281 if (cp->addr.type == BDADDR_BREDR) {
5284 /* Only "connect" action supported for now */
5285 if (cp->action != 0x01) {
5286 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5287 MGMT_STATUS_INVALID_PARAMS,
5288 &cp->addr, sizeof(cp->addr));
5292 update_scan = list_empty(&hdev->whitelist);
5294 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5300 update_page_scan(hdev, SCAN_PAGE);
5305 if (cp->addr.type == BDADDR_LE_PUBLIC)
5306 addr_type = ADDR_LE_DEV_PUBLIC;
5308 addr_type = ADDR_LE_DEV_RANDOM;
5311 auto_conn = HCI_AUTO_CONN_ALWAYS;
5313 auto_conn = HCI_AUTO_CONN_REPORT;
5315 /* If the connection parameters don't exist for this device,
5316 * they will be created and configured with defaults.
5318 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5320 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5322 &cp->addr, sizeof(cp->addr));
5327 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5329 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5330 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5333 hci_dev_unlock(hdev);
5337 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5338 bdaddr_t *bdaddr, u8 type)
5340 struct mgmt_ev_device_removed ev;
5342 bacpy(&ev.addr.bdaddr, bdaddr);
5343 ev.addr.type = type;
5345 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5348 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5349 void *data, u16 len)
5351 struct mgmt_cp_remove_device *cp = data;
5354 BT_DBG("%s", hdev->name);
5358 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5359 struct hci_conn_params *params;
5362 if (!bdaddr_type_is_valid(cp->addr.type)) {
5363 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5364 MGMT_STATUS_INVALID_PARAMS,
5365 &cp->addr, sizeof(cp->addr));
5369 if (cp->addr.type == BDADDR_BREDR) {
5370 err = hci_bdaddr_list_del(&hdev->whitelist,
5374 err = cmd_complete(sk, hdev->id,
5375 MGMT_OP_REMOVE_DEVICE,
5376 MGMT_STATUS_INVALID_PARAMS,
5377 &cp->addr, sizeof(cp->addr));
5381 if (list_empty(&hdev->whitelist))
5382 update_page_scan(hdev, SCAN_DISABLED);
5384 device_removed(sk, hdev, &cp->addr.bdaddr,
5389 if (cp->addr.type == BDADDR_LE_PUBLIC)
5390 addr_type = ADDR_LE_DEV_PUBLIC;
5392 addr_type = ADDR_LE_DEV_RANDOM;
5394 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5397 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5398 MGMT_STATUS_INVALID_PARAMS,
5399 &cp->addr, sizeof(cp->addr));
5403 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5404 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5405 MGMT_STATUS_INVALID_PARAMS,
5406 &cp->addr, sizeof(cp->addr));
5410 list_del(¶ms->action);
5411 list_del(¶ms->list);
5413 hci_update_background_scan(hdev);
5415 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5417 struct hci_conn_params *p, *tmp;
5418 struct bdaddr_list *b, *btmp;
5420 if (cp->addr.type) {
5421 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5422 MGMT_STATUS_INVALID_PARAMS,
5423 &cp->addr, sizeof(cp->addr));
5427 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5428 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5433 update_page_scan(hdev, SCAN_DISABLED);
5435 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5436 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5438 device_removed(sk, hdev, &p->addr, p->addr_type);
5439 list_del(&p->action);
5444 BT_DBG("All LE connection parameters were removed");
5446 hci_update_background_scan(hdev);
5450 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5451 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5454 hci_dev_unlock(hdev);
5458 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5461 struct mgmt_cp_load_conn_param *cp = data;
5462 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5463 sizeof(struct mgmt_conn_param));
5464 u16 param_count, expected_len;
5467 if (!lmp_le_capable(hdev))
5468 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5469 MGMT_STATUS_NOT_SUPPORTED);
5471 param_count = __le16_to_cpu(cp->param_count);
5472 if (param_count > max_param_count) {
5473 BT_ERR("load_conn_param: too big param_count value %u",
5475 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5476 MGMT_STATUS_INVALID_PARAMS);
5479 expected_len = sizeof(*cp) + param_count *
5480 sizeof(struct mgmt_conn_param);
5481 if (expected_len != len) {
5482 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5484 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5485 MGMT_STATUS_INVALID_PARAMS);
5488 BT_DBG("%s param_count %u", hdev->name, param_count);
5492 hci_conn_params_clear_disabled(hdev);
5494 for (i = 0; i < param_count; i++) {
5495 struct mgmt_conn_param *param = &cp->params[i];
5496 struct hci_conn_params *hci_param;
5497 u16 min, max, latency, timeout;
5500 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
5503 if (param->addr.type == BDADDR_LE_PUBLIC) {
5504 addr_type = ADDR_LE_DEV_PUBLIC;
5505 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5506 addr_type = ADDR_LE_DEV_RANDOM;
5508 BT_ERR("Ignoring invalid connection parameters");
5512 min = le16_to_cpu(param->min_interval);
5513 max = le16_to_cpu(param->max_interval);
5514 latency = le16_to_cpu(param->latency);
5515 timeout = le16_to_cpu(param->timeout);
5517 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5518 min, max, latency, timeout);
5520 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5521 BT_ERR("Ignoring invalid connection parameters");
5525 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
5528 BT_ERR("Failed to add connection parameters");
5532 hci_param->conn_min_interval = min;
5533 hci_param->conn_max_interval = max;
5534 hci_param->conn_latency = latency;
5535 hci_param->supervision_timeout = timeout;
5538 hci_dev_unlock(hdev);
5540 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5543 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5544 void *data, u16 len)
5546 struct mgmt_cp_set_external_config *cp = data;
5550 BT_DBG("%s", hdev->name);
5552 if (hdev_is_powered(hdev))
5553 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5554 MGMT_STATUS_REJECTED);
5556 if (cp->config != 0x00 && cp->config != 0x01)
5557 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5558 MGMT_STATUS_INVALID_PARAMS);
5560 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5561 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5562 MGMT_STATUS_NOT_SUPPORTED);
5567 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5570 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5573 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5580 err = new_options(hdev, sk);
5582 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5583 mgmt_index_removed(hdev);
5585 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5586 set_bit(HCI_CONFIG, &hdev->dev_flags);
5587 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5589 queue_work(hdev->req_workqueue, &hdev->power_on);
5591 set_bit(HCI_RAW, &hdev->flags);
5592 mgmt_index_added(hdev);
5597 hci_dev_unlock(hdev);
5601 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5602 void *data, u16 len)
5604 struct mgmt_cp_set_public_address *cp = data;
5608 BT_DBG("%s", hdev->name);
5610 if (hdev_is_powered(hdev))
5611 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5612 MGMT_STATUS_REJECTED);
5614 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5615 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5616 MGMT_STATUS_INVALID_PARAMS);
5618 if (!hdev->set_bdaddr)
5619 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5620 MGMT_STATUS_NOT_SUPPORTED);
5624 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5625 bacpy(&hdev->public_addr, &cp->bdaddr);
5627 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5634 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5635 err = new_options(hdev, sk);
5637 if (is_configured(hdev)) {
5638 mgmt_index_removed(hdev);
5640 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5642 set_bit(HCI_CONFIG, &hdev->dev_flags);
5643 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5645 queue_work(hdev->req_workqueue, &hdev->power_on);
5649 hci_dev_unlock(hdev);
5653 static const struct mgmt_handler {
5654 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5658 } mgmt_handlers[] = {
5659 { NULL }, /* 0x0000 (no command) */
5660 { read_version, false, MGMT_READ_VERSION_SIZE },
5661 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
5662 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
5663 { read_controller_info, false, MGMT_READ_INFO_SIZE },
5664 { set_powered, false, MGMT_SETTING_SIZE },
5665 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
5666 { set_connectable, false, MGMT_SETTING_SIZE },
5667 { set_fast_connectable, false, MGMT_SETTING_SIZE },
5668 { set_pairable, false, MGMT_SETTING_SIZE },
5669 { set_link_security, false, MGMT_SETTING_SIZE },
5670 { set_ssp, false, MGMT_SETTING_SIZE },
5671 { set_hs, false, MGMT_SETTING_SIZE },
5672 { set_le, false, MGMT_SETTING_SIZE },
5673 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
5674 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
5675 { add_uuid, false, MGMT_ADD_UUID_SIZE },
5676 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
5677 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
5678 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5679 { disconnect, false, MGMT_DISCONNECT_SIZE },
5680 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
5681 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
5682 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5683 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
5684 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
5685 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5686 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
5687 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
5688 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5689 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
5690 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5691 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5692 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5693 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5694 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
5695 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
5696 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
5697 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
5698 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
5699 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
5700 { set_advertising, false, MGMT_SETTING_SIZE },
5701 { set_bredr, false, MGMT_SETTING_SIZE },
5702 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
5703 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
5704 { set_secure_conn, false, MGMT_SETTING_SIZE },
5705 { set_debug_keys, false, MGMT_SETTING_SIZE },
5706 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
5707 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
5708 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5709 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5710 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5711 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5712 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
5713 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
5714 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
5715 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
5716 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
5719 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5723 struct mgmt_hdr *hdr;
5724 u16 opcode, index, len;
5725 struct hci_dev *hdev = NULL;
5726 const struct mgmt_handler *handler;
5729 BT_DBG("got %zu bytes", msglen);
5731 if (msglen < sizeof(*hdr))
5734 buf = kmalloc(msglen, GFP_KERNEL);
5738 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
5744 opcode = __le16_to_cpu(hdr->opcode);
5745 index = __le16_to_cpu(hdr->index);
5746 len = __le16_to_cpu(hdr->len);
5748 if (len != msglen - sizeof(*hdr)) {
5753 if (index != MGMT_INDEX_NONE) {
5754 hdev = hci_dev_get(index);
5756 err = cmd_status(sk, index, opcode,
5757 MGMT_STATUS_INVALID_INDEX);
5761 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5762 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5763 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5764 err = cmd_status(sk, index, opcode,
5765 MGMT_STATUS_INVALID_INDEX);
5769 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
5770 opcode != MGMT_OP_READ_CONFIG_INFO &&
5771 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
5772 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
5773 err = cmd_status(sk, index, opcode,
5774 MGMT_STATUS_INVALID_INDEX);
5779 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5780 mgmt_handlers[opcode].func == NULL) {
5781 BT_DBG("Unknown op %u", opcode);
5782 err = cmd_status(sk, index, opcode,
5783 MGMT_STATUS_UNKNOWN_COMMAND);
5787 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
5788 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5789 err = cmd_status(sk, index, opcode,
5790 MGMT_STATUS_INVALID_INDEX);
5794 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
5795 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5796 err = cmd_status(sk, index, opcode,
5797 MGMT_STATUS_INVALID_INDEX);
5801 handler = &mgmt_handlers[opcode];
5803 if ((handler->var_len && len < handler->data_len) ||
5804 (!handler->var_len && len != handler->data_len)) {
5805 err = cmd_status(sk, index, opcode,
5806 MGMT_STATUS_INVALID_PARAMS);
5811 mgmt_init_hdev(sk, hdev);
5813 cp = buf + sizeof(*hdr);
5815 err = handler->func(sk, hdev, cp, len);
5829 void mgmt_index_added(struct hci_dev *hdev)
5831 if (hdev->dev_type != HCI_BREDR)
5834 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5837 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5838 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
5840 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
5843 void mgmt_index_removed(struct hci_dev *hdev)
5845 u8 status = MGMT_STATUS_INVALID_INDEX;
5847 if (hdev->dev_type != HCI_BREDR)
5850 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5853 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
5855 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5856 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
5858 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
5861 /* This function requires the caller holds hdev->lock */
5862 static void restart_le_actions(struct hci_dev *hdev)
5864 struct hci_conn_params *p;
5866 list_for_each_entry(p, &hdev->le_conn_params, list) {
5867 /* Needed for AUTO_OFF case where might not "really"
5868 * have been powered off.
5870 list_del_init(&p->action);
5872 switch (p->auto_connect) {
5873 case HCI_AUTO_CONN_ALWAYS:
5874 list_add(&p->action, &hdev->pend_le_conns);
5876 case HCI_AUTO_CONN_REPORT:
5877 list_add(&p->action, &hdev->pend_le_reports);
5884 hci_update_background_scan(hdev);
5887 static void powered_complete(struct hci_dev *hdev, u8 status)
5889 struct cmd_lookup match = { NULL, hdev };
5891 BT_DBG("status 0x%02x", status);
5895 restart_le_actions(hdev);
5897 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5899 new_settings(hdev, match.sk);
5901 hci_dev_unlock(hdev);
5907 static int powered_update_hci(struct hci_dev *hdev)
5909 struct hci_request req;
5912 hci_req_init(&req, hdev);
5914 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
5915 !lmp_host_ssp_capable(hdev)) {
5918 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
5921 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
5922 lmp_bredr_capable(hdev)) {
5923 struct hci_cp_write_le_host_supported cp;
5926 cp.simul = lmp_le_br_capable(hdev);
5928 /* Check first if we already have the right
5929 * host state (host features set)
5931 if (cp.le != lmp_host_le_capable(hdev) ||
5932 cp.simul != lmp_host_le_br_capable(hdev))
5933 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5937 if (lmp_le_capable(hdev)) {
5938 /* Make sure the controller has a good default for
5939 * advertising data. This also applies to the case
5940 * where BR/EDR was toggled during the AUTO_OFF phase.
5942 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5943 update_adv_data(&req);
5944 update_scan_rsp_data(&req);
5947 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5948 enable_advertising(&req);
5951 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5952 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5953 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5954 sizeof(link_sec), &link_sec);
5956 if (lmp_bredr_capable(hdev)) {
5957 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5958 set_bredr_scan(&req);
5964 return hci_req_run(&req, powered_complete);
5967 int mgmt_powered(struct hci_dev *hdev, u8 powered)
5969 struct cmd_lookup match = { NULL, hdev };
5970 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
5971 u8 zero_cod[] = { 0, 0, 0 };
5974 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
5978 if (powered_update_hci(hdev) == 0)
5981 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
5986 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5987 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
5989 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
5990 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
5991 zero_cod, sizeof(zero_cod), NULL);
5994 err = new_settings(hdev, match.sk);
6002 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6004 struct pending_cmd *cmd;
6007 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6011 if (err == -ERFKILL)
6012 status = MGMT_STATUS_RFKILLED;
6014 status = MGMT_STATUS_FAILED;
6016 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6018 mgmt_pending_remove(cmd);
6021 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6023 struct hci_request req;
6027 /* When discoverable timeout triggers, then just make sure
6028 * the limited discoverable flag is cleared. Even in the case
6029 * of a timeout triggered from general discoverable, it is
6030 * safe to unconditionally clear the flag.
6032 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
6033 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6035 hci_req_init(&req, hdev);
6036 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
6037 u8 scan = SCAN_PAGE;
6038 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6039 sizeof(scan), &scan);
6042 update_adv_data(&req);
6043 hci_req_run(&req, NULL);
6045 hdev->discov_timeout = 0;
6047 new_settings(hdev, NULL);
6049 hci_dev_unlock(hdev);
6052 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6055 struct mgmt_ev_new_link_key ev;
6057 memset(&ev, 0, sizeof(ev));
6059 ev.store_hint = persistent;
6060 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6061 ev.key.addr.type = BDADDR_BREDR;
6062 ev.key.type = key->type;
6063 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6064 ev.key.pin_len = key->pin_len;
6066 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6069 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6071 if (ltk->authenticated)
6072 return MGMT_LTK_AUTHENTICATED;
6074 return MGMT_LTK_UNAUTHENTICATED;
6077 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6079 struct mgmt_ev_new_long_term_key ev;
6081 memset(&ev, 0, sizeof(ev));
6083 /* Devices using resolvable or non-resolvable random addresses
6084 * without providing an indentity resolving key don't require
6085 * to store long term keys. Their addresses will change the
6088 * Only when a remote device provides an identity address
6089 * make sure the long term key is stored. If the remote
6090 * identity is known, the long term keys are internally
6091 * mapped to the identity address. So allow static random
6092 * and public addresses here.
6094 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6095 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6096 ev.store_hint = 0x00;
6098 ev.store_hint = persistent;
6100 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6101 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6102 ev.key.type = mgmt_ltk_type(key);
6103 ev.key.enc_size = key->enc_size;
6104 ev.key.ediv = key->ediv;
6105 ev.key.rand = key->rand;
6107 if (key->type == SMP_LTK)
6110 memcpy(ev.key.val, key->val, sizeof(key->val));
6112 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6115 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6117 struct mgmt_ev_new_irk ev;
6119 memset(&ev, 0, sizeof(ev));
6121 /* For identity resolving keys from devices that are already
6122 * using a public address or static random address, do not
6123 * ask for storing this key. The identity resolving key really
6124 * is only mandatory for devices using resovlable random
6127 * Storing all identity resolving keys has the downside that
6128 * they will be also loaded on next boot of they system. More
6129 * identity resolving keys, means more time during scanning is
6130 * needed to actually resolve these addresses.
6132 if (bacmp(&irk->rpa, BDADDR_ANY))
6133 ev.store_hint = 0x01;
6135 ev.store_hint = 0x00;
6137 bacpy(&ev.rpa, &irk->rpa);
6138 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6139 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6140 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6142 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6145 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6148 struct mgmt_ev_new_csrk ev;
6150 memset(&ev, 0, sizeof(ev));
6152 /* Devices using resolvable or non-resolvable random addresses
6153 * without providing an indentity resolving key don't require
6154 * to store signature resolving keys. Their addresses will change
6155 * the next time around.
6157 * Only when a remote device provides an identity address
6158 * make sure the signature resolving key is stored. So allow
6159 * static random and public addresses here.
6161 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6162 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6163 ev.store_hint = 0x00;
6165 ev.store_hint = persistent;
6167 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6168 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6169 ev.key.master = csrk->master;
6170 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6172 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6175 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6176 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6177 u16 max_interval, u16 latency, u16 timeout)
6179 struct mgmt_ev_new_conn_param ev;
6181 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6184 memset(&ev, 0, sizeof(ev));
6185 bacpy(&ev.addr.bdaddr, bdaddr);
6186 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6187 ev.store_hint = store_hint;
6188 ev.min_interval = cpu_to_le16(min_interval);
6189 ev.max_interval = cpu_to_le16(max_interval);
6190 ev.latency = cpu_to_le16(latency);
6191 ev.timeout = cpu_to_le16(timeout);
6193 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6196 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6199 eir[eir_len++] = sizeof(type) + data_len;
6200 eir[eir_len++] = type;
6201 memcpy(&eir[eir_len], data, data_len);
6202 eir_len += data_len;
6207 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6208 u8 addr_type, u32 flags, u8 *name, u8 name_len,
6212 struct mgmt_ev_device_connected *ev = (void *) buf;
6215 bacpy(&ev->addr.bdaddr, bdaddr);
6216 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6218 ev->flags = __cpu_to_le32(flags);
6221 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6224 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
6225 eir_len = eir_append_data(ev->eir, eir_len,
6226 EIR_CLASS_OF_DEV, dev_class, 3);
6228 ev->eir_len = cpu_to_le16(eir_len);
6230 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6231 sizeof(*ev) + eir_len, NULL);
6234 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6236 struct mgmt_cp_disconnect *cp = cmd->param;
6237 struct sock **sk = data;
6238 struct mgmt_rp_disconnect rp;
6240 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6241 rp.addr.type = cp->addr.type;
6243 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
6249 mgmt_pending_remove(cmd);
6252 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6254 struct hci_dev *hdev = data;
6255 struct mgmt_cp_unpair_device *cp = cmd->param;
6256 struct mgmt_rp_unpair_device rp;
6258 memset(&rp, 0, sizeof(rp));
6259 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6260 rp.addr.type = cp->addr.type;
6262 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6264 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
6266 mgmt_pending_remove(cmd);
6269 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6270 u8 link_type, u8 addr_type, u8 reason,
6271 bool mgmt_connected)
6273 struct mgmt_ev_device_disconnected ev;
6274 struct pending_cmd *power_off;
6275 struct sock *sk = NULL;
6277 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6279 struct mgmt_mode *cp = power_off->param;
6281 /* The connection is still in hci_conn_hash so test for 1
6282 * instead of 0 to know if this is the last one.
6284 if (!cp->val && hci_conn_count(hdev) == 1) {
6285 cancel_delayed_work(&hdev->power_off);
6286 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6290 if (!mgmt_connected)
6293 if (link_type != ACL_LINK && link_type != LE_LINK)
6296 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6298 bacpy(&ev.addr.bdaddr, bdaddr);
6299 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6302 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6307 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6311 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6312 u8 link_type, u8 addr_type, u8 status)
6314 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6315 struct mgmt_cp_disconnect *cp;
6316 struct mgmt_rp_disconnect rp;
6317 struct pending_cmd *cmd;
6319 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6322 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6328 if (bacmp(bdaddr, &cp->addr.bdaddr))
6331 if (cp->addr.type != bdaddr_type)
6334 bacpy(&rp.addr.bdaddr, bdaddr);
6335 rp.addr.type = bdaddr_type;
6337 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
6338 mgmt_status(status), &rp, sizeof(rp));
6340 mgmt_pending_remove(cmd);
6343 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6344 u8 addr_type, u8 status)
6346 struct mgmt_ev_connect_failed ev;
6347 struct pending_cmd *power_off;
6349 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6351 struct mgmt_mode *cp = power_off->param;
6353 /* The connection is still in hci_conn_hash so test for 1
6354 * instead of 0 to know if this is the last one.
6356 if (!cp->val && hci_conn_count(hdev) == 1) {
6357 cancel_delayed_work(&hdev->power_off);
6358 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6362 bacpy(&ev.addr.bdaddr, bdaddr);
6363 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6364 ev.status = mgmt_status(status);
6366 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6369 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6371 struct mgmt_ev_pin_code_request ev;
6373 bacpy(&ev.addr.bdaddr, bdaddr);
6374 ev.addr.type = BDADDR_BREDR;
6377 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6380 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6383 struct pending_cmd *cmd;
6384 struct mgmt_rp_pin_code_reply rp;
6386 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6390 bacpy(&rp.addr.bdaddr, bdaddr);
6391 rp.addr.type = BDADDR_BREDR;
6393 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
6394 mgmt_status(status), &rp, sizeof(rp));
6396 mgmt_pending_remove(cmd);
6399 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6402 struct pending_cmd *cmd;
6403 struct mgmt_rp_pin_code_reply rp;
6405 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6409 bacpy(&rp.addr.bdaddr, bdaddr);
6410 rp.addr.type = BDADDR_BREDR;
6412 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
6413 mgmt_status(status), &rp, sizeof(rp));
6415 mgmt_pending_remove(cmd);
6418 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6419 u8 link_type, u8 addr_type, u32 value,
6422 struct mgmt_ev_user_confirm_request ev;
6424 BT_DBG("%s", hdev->name);
6426 bacpy(&ev.addr.bdaddr, bdaddr);
6427 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6428 ev.confirm_hint = confirm_hint;
6429 ev.value = cpu_to_le32(value);
6431 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6435 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6436 u8 link_type, u8 addr_type)
6438 struct mgmt_ev_user_passkey_request ev;
6440 BT_DBG("%s", hdev->name);
6442 bacpy(&ev.addr.bdaddr, bdaddr);
6443 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6445 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6449 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6450 u8 link_type, u8 addr_type, u8 status,
6453 struct pending_cmd *cmd;
6454 struct mgmt_rp_user_confirm_reply rp;
6457 cmd = mgmt_pending_find(opcode, hdev);
6461 bacpy(&rp.addr.bdaddr, bdaddr);
6462 rp.addr.type = link_to_bdaddr(link_type, addr_type);
6463 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
6466 mgmt_pending_remove(cmd);
6471 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6472 u8 link_type, u8 addr_type, u8 status)
6474 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6475 status, MGMT_OP_USER_CONFIRM_REPLY);
6478 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6479 u8 link_type, u8 addr_type, u8 status)
6481 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6483 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6486 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6487 u8 link_type, u8 addr_type, u8 status)
6489 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6490 status, MGMT_OP_USER_PASSKEY_REPLY);
6493 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6494 u8 link_type, u8 addr_type, u8 status)
6496 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6498 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6501 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6502 u8 link_type, u8 addr_type, u32 passkey,
6505 struct mgmt_ev_passkey_notify ev;
6507 BT_DBG("%s", hdev->name);
6509 bacpy(&ev.addr.bdaddr, bdaddr);
6510 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6511 ev.passkey = __cpu_to_le32(passkey);
6512 ev.entered = entered;
6514 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6517 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6518 u8 addr_type, u8 status)
6520 struct mgmt_ev_auth_failed ev;
6522 bacpy(&ev.addr.bdaddr, bdaddr);
6523 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6524 ev.status = mgmt_status(status);
6526 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
6529 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6531 struct cmd_lookup match = { NULL, hdev };
6535 u8 mgmt_err = mgmt_status(status);
6536 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6537 cmd_status_rsp, &mgmt_err);
6541 if (test_bit(HCI_AUTH, &hdev->flags))
6542 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6545 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6548 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6552 new_settings(hdev, match.sk);
6558 static void clear_eir(struct hci_request *req)
6560 struct hci_dev *hdev = req->hdev;
6561 struct hci_cp_write_eir cp;
6563 if (!lmp_ext_inq_capable(hdev))
6566 memset(hdev->eir, 0, sizeof(hdev->eir));
6568 memset(&cp, 0, sizeof(cp));
6570 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6573 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6575 struct cmd_lookup match = { NULL, hdev };
6576 struct hci_request req;
6577 bool changed = false;
6580 u8 mgmt_err = mgmt_status(status);
6582 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6583 &hdev->dev_flags)) {
6584 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6585 new_settings(hdev, NULL);
6588 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6594 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6596 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6598 changed = test_and_clear_bit(HCI_HS_ENABLED,
6601 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6604 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6607 new_settings(hdev, match.sk);
6612 hci_req_init(&req, hdev);
6614 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6615 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6616 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6617 sizeof(enable), &enable);
6623 hci_req_run(&req, NULL);
6626 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6628 struct cmd_lookup match = { NULL, hdev };
6629 bool changed = false;
6632 u8 mgmt_err = mgmt_status(status);
6635 if (test_and_clear_bit(HCI_SC_ENABLED,
6637 new_settings(hdev, NULL);
6638 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6641 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6642 cmd_status_rsp, &mgmt_err);
6647 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6649 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6650 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6653 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6654 settings_rsp, &match);
6657 new_settings(hdev, match.sk);
6663 static void sk_lookup(struct pending_cmd *cmd, void *data)
6665 struct cmd_lookup *match = data;
6667 if (match->sk == NULL) {
6668 match->sk = cmd->sk;
6669 sock_hold(match->sk);
6673 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6676 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6678 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6679 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6680 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6683 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6690 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6692 struct mgmt_cp_set_local_name ev;
6693 struct pending_cmd *cmd;
6698 memset(&ev, 0, sizeof(ev));
6699 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6700 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6702 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6704 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6706 /* If this is a HCI command related to powering on the
6707 * HCI dev don't send any mgmt signals.
6709 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6713 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6714 cmd ? cmd->sk : NULL);
6717 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6718 u8 *randomizer192, u8 *hash256,
6719 u8 *randomizer256, u8 status)
6721 struct pending_cmd *cmd;
6723 BT_DBG("%s status %u", hdev->name, status);
6725 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6730 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6731 mgmt_status(status));
6733 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
6734 hash256 && randomizer256) {
6735 struct mgmt_rp_read_local_oob_ext_data rp;
6737 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6738 memcpy(rp.randomizer192, randomizer192,
6739 sizeof(rp.randomizer192));
6741 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6742 memcpy(rp.randomizer256, randomizer256,
6743 sizeof(rp.randomizer256));
6745 cmd_complete(cmd->sk, hdev->id,
6746 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6749 struct mgmt_rp_read_local_oob_data rp;
6751 memcpy(rp.hash, hash192, sizeof(rp.hash));
6752 memcpy(rp.randomizer, randomizer192,
6753 sizeof(rp.randomizer));
6755 cmd_complete(cmd->sk, hdev->id,
6756 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6761 mgmt_pending_remove(cmd);
6764 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6765 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
6766 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
6769 struct mgmt_ev_device_found *ev = (void *) buf;
6772 /* Don't send events for a non-kernel initiated discovery. With
6773 * LE one exception is if we have pend_le_reports > 0 in which
6774 * case we're doing passive scanning and want these events.
6776 if (!hci_discovery_active(hdev)) {
6777 if (link_type == ACL_LINK)
6779 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
6783 /* Make sure that the buffer is big enough. The 5 extra bytes
6784 * are for the potential CoD field.
6786 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
6789 memset(buf, 0, sizeof(buf));
6791 bacpy(&ev->addr.bdaddr, bdaddr);
6792 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6794 ev->flags = cpu_to_le32(flags);
6797 memcpy(ev->eir, eir, eir_len);
6799 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
6800 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
6803 if (scan_rsp_len > 0)
6804 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
6806 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
6807 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
6809 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
6812 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6813 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
6815 struct mgmt_ev_device_found *ev;
6816 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
6819 ev = (struct mgmt_ev_device_found *) buf;
6821 memset(buf, 0, sizeof(buf));
6823 bacpy(&ev->addr.bdaddr, bdaddr);
6824 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6827 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
6830 ev->eir_len = cpu_to_le16(eir_len);
6832 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
6835 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
6837 struct mgmt_ev_discovering ev;
6838 struct pending_cmd *cmd;
6840 BT_DBG("%s discovering %u", hdev->name, discovering);
6843 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
6845 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6848 u8 type = hdev->discovery.type;
6850 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
6852 mgmt_pending_remove(cmd);
6855 memset(&ev, 0, sizeof(ev));
6856 ev.type = hdev->discovery.type;
6857 ev.discovering = discovering;
6859 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6862 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6864 BT_DBG("%s status %u", hdev->name, status);
6867 void mgmt_reenable_advertising(struct hci_dev *hdev)
6869 struct hci_request req;
6871 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6874 hci_req_init(&req, hdev);
6875 enable_advertising(&req);
6876 hci_req_run(&req, adv_enable_complete);