2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
40 #define MGMT_VERSION 1
41 #define MGMT_REVISION 13
43 static const u16 mgmt_commands[] = {
44 MGMT_OP_READ_INDEX_LIST,
47 MGMT_OP_SET_DISCOVERABLE,
48 MGMT_OP_SET_CONNECTABLE,
49 MGMT_OP_SET_FAST_CONNECTABLE,
51 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_DEV_CLASS,
56 MGMT_OP_SET_LOCAL_NAME,
59 MGMT_OP_LOAD_LINK_KEYS,
60 MGMT_OP_LOAD_LONG_TERM_KEYS,
62 MGMT_OP_GET_CONNECTIONS,
63 MGMT_OP_PIN_CODE_REPLY,
64 MGMT_OP_PIN_CODE_NEG_REPLY,
65 MGMT_OP_SET_IO_CAPABILITY,
67 MGMT_OP_CANCEL_PAIR_DEVICE,
68 MGMT_OP_UNPAIR_DEVICE,
69 MGMT_OP_USER_CONFIRM_REPLY,
70 MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 MGMT_OP_USER_PASSKEY_REPLY,
72 MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 MGMT_OP_READ_LOCAL_OOB_DATA,
74 MGMT_OP_ADD_REMOTE_OOB_DATA,
75 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 MGMT_OP_START_DISCOVERY,
77 MGMT_OP_STOP_DISCOVERY,
80 MGMT_OP_UNBLOCK_DEVICE,
81 MGMT_OP_SET_DEVICE_ID,
82 MGMT_OP_SET_ADVERTISING,
84 MGMT_OP_SET_STATIC_ADDRESS,
85 MGMT_OP_SET_SCAN_PARAMS,
86 MGMT_OP_SET_SECURE_CONN,
87 MGMT_OP_SET_DEBUG_KEYS,
90 MGMT_OP_GET_CONN_INFO,
91 MGMT_OP_GET_CLOCK_INFO,
93 MGMT_OP_REMOVE_DEVICE,
94 MGMT_OP_LOAD_CONN_PARAM,
95 MGMT_OP_READ_UNCONF_INDEX_LIST,
96 MGMT_OP_READ_CONFIG_INFO,
97 MGMT_OP_SET_EXTERNAL_CONFIG,
98 MGMT_OP_SET_PUBLIC_ADDRESS,
99 MGMT_OP_START_SERVICE_DISCOVERY,
100 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 MGMT_OP_READ_EXT_INDEX_LIST,
102 MGMT_OP_READ_ADV_FEATURES,
103 MGMT_OP_ADD_ADVERTISING,
104 MGMT_OP_REMOVE_ADVERTISING,
105 MGMT_OP_GET_ADV_SIZE_INFO,
106 MGMT_OP_START_LIMITED_DISCOVERY,
109 static const u16 mgmt_events[] = {
110 MGMT_EV_CONTROLLER_ERROR,
112 MGMT_EV_INDEX_REMOVED,
113 MGMT_EV_NEW_SETTINGS,
114 MGMT_EV_CLASS_OF_DEV_CHANGED,
115 MGMT_EV_LOCAL_NAME_CHANGED,
116 MGMT_EV_NEW_LINK_KEY,
117 MGMT_EV_NEW_LONG_TERM_KEY,
118 MGMT_EV_DEVICE_CONNECTED,
119 MGMT_EV_DEVICE_DISCONNECTED,
120 MGMT_EV_CONNECT_FAILED,
121 MGMT_EV_PIN_CODE_REQUEST,
122 MGMT_EV_USER_CONFIRM_REQUEST,
123 MGMT_EV_USER_PASSKEY_REQUEST,
125 MGMT_EV_DEVICE_FOUND,
127 MGMT_EV_DEVICE_BLOCKED,
128 MGMT_EV_DEVICE_UNBLOCKED,
129 MGMT_EV_DEVICE_UNPAIRED,
130 MGMT_EV_PASSKEY_NOTIFY,
133 MGMT_EV_DEVICE_ADDED,
134 MGMT_EV_DEVICE_REMOVED,
135 MGMT_EV_NEW_CONN_PARAM,
136 MGMT_EV_UNCONF_INDEX_ADDED,
137 MGMT_EV_UNCONF_INDEX_REMOVED,
138 MGMT_EV_NEW_CONFIG_OPTIONS,
139 MGMT_EV_EXT_INDEX_ADDED,
140 MGMT_EV_EXT_INDEX_REMOVED,
141 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
142 MGMT_EV_ADVERTISING_ADDED,
143 MGMT_EV_ADVERTISING_REMOVED,
146 static const u16 mgmt_untrusted_commands[] = {
147 MGMT_OP_READ_INDEX_LIST,
149 MGMT_OP_READ_UNCONF_INDEX_LIST,
150 MGMT_OP_READ_CONFIG_INFO,
151 MGMT_OP_READ_EXT_INDEX_LIST,
154 static const u16 mgmt_untrusted_events[] = {
156 MGMT_EV_INDEX_REMOVED,
157 MGMT_EV_NEW_SETTINGS,
158 MGMT_EV_CLASS_OF_DEV_CHANGED,
159 MGMT_EV_LOCAL_NAME_CHANGED,
160 MGMT_EV_UNCONF_INDEX_ADDED,
161 MGMT_EV_UNCONF_INDEX_REMOVED,
162 MGMT_EV_NEW_CONFIG_OPTIONS,
163 MGMT_EV_EXT_INDEX_ADDED,
164 MGMT_EV_EXT_INDEX_REMOVED,
167 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
169 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
170 "\x00\x00\x00\x00\x00\x00\x00\x00"
172 /* HCI to MGMT error code conversion table */
173 static u8 mgmt_status_table[] = {
175 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
176 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
177 MGMT_STATUS_FAILED, /* Hardware Failure */
178 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
179 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
180 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
181 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
182 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
183 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
184 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
185 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
186 MGMT_STATUS_BUSY, /* Command Disallowed */
187 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
188 MGMT_STATUS_REJECTED, /* Rejected Security */
189 MGMT_STATUS_REJECTED, /* Rejected Personal */
190 MGMT_STATUS_TIMEOUT, /* Host Timeout */
191 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
192 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
193 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
194 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
195 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
196 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
197 MGMT_STATUS_BUSY, /* Repeated Attempts */
198 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
199 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
200 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
201 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
202 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
203 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
204 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
205 MGMT_STATUS_FAILED, /* Unspecified Error */
206 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
207 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
208 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
209 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
210 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
211 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
212 MGMT_STATUS_FAILED, /* Unit Link Key Used */
213 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
214 MGMT_STATUS_TIMEOUT, /* Instant Passed */
215 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
216 MGMT_STATUS_FAILED, /* Transaction Collision */
217 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
218 MGMT_STATUS_REJECTED, /* QoS Rejected */
219 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
220 MGMT_STATUS_REJECTED, /* Insufficient Security */
221 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
222 MGMT_STATUS_BUSY, /* Role Switch Pending */
223 MGMT_STATUS_FAILED, /* Slot Violation */
224 MGMT_STATUS_FAILED, /* Role Switch Failed */
225 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
226 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
227 MGMT_STATUS_BUSY, /* Host Busy Pairing */
228 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
229 MGMT_STATUS_BUSY, /* Controller Busy */
230 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
231 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
232 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
233 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
234 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
237 static u8 mgmt_status(u8 hci_status)
239 if (hci_status < ARRAY_SIZE(mgmt_status_table))
240 return mgmt_status_table[hci_status];
242 return MGMT_STATUS_FAILED;
245 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
248 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
252 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
253 u16 len, int flag, struct sock *skip_sk)
255 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
259 static int mgmt_generic_event(u16 event, struct hci_dev *hdev, void *data,
260 u16 len, struct sock *skip_sk)
262 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
263 HCI_MGMT_GENERIC_EVENTS, skip_sk);
266 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
267 struct sock *skip_sk)
269 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
270 HCI_SOCK_TRUSTED, skip_sk);
273 static u8 le_addr_type(u8 mgmt_addr_type)
275 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
276 return ADDR_LE_DEV_PUBLIC;
278 return ADDR_LE_DEV_RANDOM;
281 void mgmt_fill_version_info(void *ver)
283 struct mgmt_rp_read_version *rp = ver;
285 rp->version = MGMT_VERSION;
286 rp->revision = cpu_to_le16(MGMT_REVISION);
289 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
292 struct mgmt_rp_read_version rp;
294 BT_DBG("sock %p", sk);
296 mgmt_fill_version_info(&rp);
298 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
302 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
305 struct mgmt_rp_read_commands *rp;
306 u16 num_commands, num_events;
310 BT_DBG("sock %p", sk);
312 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
313 num_commands = ARRAY_SIZE(mgmt_commands);
314 num_events = ARRAY_SIZE(mgmt_events);
316 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
317 num_events = ARRAY_SIZE(mgmt_untrusted_events);
320 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
322 rp = kmalloc(rp_size, GFP_KERNEL);
326 rp->num_commands = cpu_to_le16(num_commands);
327 rp->num_events = cpu_to_le16(num_events);
329 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
330 __le16 *opcode = rp->opcodes;
332 for (i = 0; i < num_commands; i++, opcode++)
333 put_unaligned_le16(mgmt_commands[i], opcode);
335 for (i = 0; i < num_events; i++, opcode++)
336 put_unaligned_le16(mgmt_events[i], opcode);
338 __le16 *opcode = rp->opcodes;
340 for (i = 0; i < num_commands; i++, opcode++)
341 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
343 for (i = 0; i < num_events; i++, opcode++)
344 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
347 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
354 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
357 struct mgmt_rp_read_index_list *rp;
363 BT_DBG("sock %p", sk);
365 read_lock(&hci_dev_list_lock);
368 list_for_each_entry(d, &hci_dev_list, list) {
369 if (d->dev_type == HCI_PRIMARY &&
370 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
374 rp_len = sizeof(*rp) + (2 * count);
375 rp = kmalloc(rp_len, GFP_ATOMIC);
377 read_unlock(&hci_dev_list_lock);
382 list_for_each_entry(d, &hci_dev_list, list) {
383 if (hci_dev_test_flag(d, HCI_SETUP) ||
384 hci_dev_test_flag(d, HCI_CONFIG) ||
385 hci_dev_test_flag(d, HCI_USER_CHANNEL))
388 /* Devices marked as raw-only are neither configured
389 * nor unconfigured controllers.
391 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
394 if (d->dev_type == HCI_PRIMARY &&
395 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
396 rp->index[count++] = cpu_to_le16(d->id);
397 BT_DBG("Added hci%u", d->id);
401 rp->num_controllers = cpu_to_le16(count);
402 rp_len = sizeof(*rp) + (2 * count);
404 read_unlock(&hci_dev_list_lock);
406 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
414 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
415 void *data, u16 data_len)
417 struct mgmt_rp_read_unconf_index_list *rp;
423 BT_DBG("sock %p", sk);
425 read_lock(&hci_dev_list_lock);
428 list_for_each_entry(d, &hci_dev_list, list) {
429 if (d->dev_type == HCI_PRIMARY &&
430 hci_dev_test_flag(d, HCI_UNCONFIGURED))
434 rp_len = sizeof(*rp) + (2 * count);
435 rp = kmalloc(rp_len, GFP_ATOMIC);
437 read_unlock(&hci_dev_list_lock);
442 list_for_each_entry(d, &hci_dev_list, list) {
443 if (hci_dev_test_flag(d, HCI_SETUP) ||
444 hci_dev_test_flag(d, HCI_CONFIG) ||
445 hci_dev_test_flag(d, HCI_USER_CHANNEL))
448 /* Devices marked as raw-only are neither configured
449 * nor unconfigured controllers.
451 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
454 if (d->dev_type == HCI_PRIMARY &&
455 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
456 rp->index[count++] = cpu_to_le16(d->id);
457 BT_DBG("Added hci%u", d->id);
461 rp->num_controllers = cpu_to_le16(count);
462 rp_len = sizeof(*rp) + (2 * count);
464 read_unlock(&hci_dev_list_lock);
466 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
467 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
474 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
475 void *data, u16 data_len)
477 struct mgmt_rp_read_ext_index_list *rp;
483 BT_DBG("sock %p", sk);
485 read_lock(&hci_dev_list_lock);
488 list_for_each_entry(d, &hci_dev_list, list) {
489 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
493 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
494 rp = kmalloc(rp_len, GFP_ATOMIC);
496 read_unlock(&hci_dev_list_lock);
501 list_for_each_entry(d, &hci_dev_list, list) {
502 if (hci_dev_test_flag(d, HCI_SETUP) ||
503 hci_dev_test_flag(d, HCI_CONFIG) ||
504 hci_dev_test_flag(d, HCI_USER_CHANNEL))
507 /* Devices marked as raw-only are neither configured
508 * nor unconfigured controllers.
510 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
513 if (d->dev_type == HCI_PRIMARY) {
514 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
515 rp->entry[count].type = 0x01;
517 rp->entry[count].type = 0x00;
518 } else if (d->dev_type == HCI_AMP) {
519 rp->entry[count].type = 0x02;
524 rp->entry[count].bus = d->bus;
525 rp->entry[count++].index = cpu_to_le16(d->id);
526 BT_DBG("Added hci%u", d->id);
529 rp->num_controllers = cpu_to_le16(count);
530 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
532 read_unlock(&hci_dev_list_lock);
534 /* If this command is called at least once, then all the
535 * default index and unconfigured index events are disabled
536 * and from now on only extended index events are used.
538 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
539 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
540 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
542 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
543 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);
550 static bool is_configured(struct hci_dev *hdev)
552 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
553 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
556 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
557 !bacmp(&hdev->public_addr, BDADDR_ANY))
563 static __le32 get_missing_options(struct hci_dev *hdev)
567 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
568 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
569 options |= MGMT_OPTION_EXTERNAL_CONFIG;
571 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
572 !bacmp(&hdev->public_addr, BDADDR_ANY))
573 options |= MGMT_OPTION_PUBLIC_ADDRESS;
575 return cpu_to_le32(options);
578 static int new_options(struct hci_dev *hdev, struct sock *skip)
580 __le32 options = get_missing_options(hdev);
582 return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
583 sizeof(options), skip);
586 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
588 __le32 options = get_missing_options(hdev);
590 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
594 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
595 void *data, u16 data_len)
597 struct mgmt_rp_read_config_info rp;
600 BT_DBG("sock %p %s", sk, hdev->name);
604 memset(&rp, 0, sizeof(rp));
605 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
607 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
608 options |= MGMT_OPTION_EXTERNAL_CONFIG;
610 if (hdev->set_bdaddr)
611 options |= MGMT_OPTION_PUBLIC_ADDRESS;
613 rp.supported_options = cpu_to_le32(options);
614 rp.missing_options = get_missing_options(hdev);
616 hci_dev_unlock(hdev);
618 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
622 static u32 get_supported_settings(struct hci_dev *hdev)
626 settings |= MGMT_SETTING_POWERED;
627 settings |= MGMT_SETTING_BONDABLE;
628 settings |= MGMT_SETTING_DEBUG_KEYS;
629 settings |= MGMT_SETTING_CONNECTABLE;
630 settings |= MGMT_SETTING_DISCOVERABLE;
632 if (lmp_bredr_capable(hdev)) {
633 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
634 settings |= MGMT_SETTING_FAST_CONNECTABLE;
635 settings |= MGMT_SETTING_BREDR;
636 settings |= MGMT_SETTING_LINK_SECURITY;
638 if (lmp_ssp_capable(hdev)) {
639 settings |= MGMT_SETTING_SSP;
640 settings |= MGMT_SETTING_HS;
643 if (lmp_sc_capable(hdev))
644 settings |= MGMT_SETTING_SECURE_CONN;
647 if (lmp_le_capable(hdev)) {
648 settings |= MGMT_SETTING_LE;
649 settings |= MGMT_SETTING_ADVERTISING;
650 settings |= MGMT_SETTING_SECURE_CONN;
651 settings |= MGMT_SETTING_PRIVACY;
652 settings |= MGMT_SETTING_STATIC_ADDRESS;
655 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
657 settings |= MGMT_SETTING_CONFIGURATION;
662 static u32 get_current_settings(struct hci_dev *hdev)
666 if (hdev_is_powered(hdev))
667 settings |= MGMT_SETTING_POWERED;
669 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
670 settings |= MGMT_SETTING_CONNECTABLE;
672 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
673 settings |= MGMT_SETTING_FAST_CONNECTABLE;
675 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
676 settings |= MGMT_SETTING_DISCOVERABLE;
678 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
679 settings |= MGMT_SETTING_BONDABLE;
681 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
682 settings |= MGMT_SETTING_BREDR;
684 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
685 settings |= MGMT_SETTING_LE;
687 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
688 settings |= MGMT_SETTING_LINK_SECURITY;
690 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
691 settings |= MGMT_SETTING_SSP;
693 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
694 settings |= MGMT_SETTING_HS;
696 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
697 settings |= MGMT_SETTING_ADVERTISING;
699 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
700 settings |= MGMT_SETTING_SECURE_CONN;
702 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
703 settings |= MGMT_SETTING_DEBUG_KEYS;
705 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
706 settings |= MGMT_SETTING_PRIVACY;
708 /* The current setting for static address has two purposes. The
709 * first is to indicate if the static address will be used and
710 * the second is to indicate if it is actually set.
712 * This means if the static address is not configured, this flag
713 * will never be set. If the address is configured, then if the
714 * address is actually used decides if the flag is set or not.
716 * For single mode LE only controllers and dual-mode controllers
717 * with BR/EDR disabled, the existence of the static address will
720 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
721 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
722 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
723 if (bacmp(&hdev->static_addr, BDADDR_ANY))
724 settings |= MGMT_SETTING_STATIC_ADDRESS;
730 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
732 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
735 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
736 struct hci_dev *hdev,
739 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
742 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
744 struct mgmt_pending_cmd *cmd;
746 /* If there's a pending mgmt command the flags will not yet have
747 * their final values, so check for this first.
749 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
751 struct mgmt_mode *cp = cmd->param;
753 return LE_AD_GENERAL;
754 else if (cp->val == 0x02)
755 return LE_AD_LIMITED;
757 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
758 return LE_AD_LIMITED;
759 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
760 return LE_AD_GENERAL;
766 bool mgmt_get_connectable(struct hci_dev *hdev)
768 struct mgmt_pending_cmd *cmd;
770 /* If there's a pending mgmt command the flag will not yet have
771 * it's final value, so check for this first.
773 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
775 struct mgmt_mode *cp = cmd->param;
780 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
783 static void service_cache_off(struct work_struct *work)
785 struct hci_dev *hdev = container_of(work, struct hci_dev,
787 struct hci_request req;
789 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
792 hci_req_init(&req, hdev);
796 __hci_req_update_eir(&req);
797 __hci_req_update_class(&req);
799 hci_dev_unlock(hdev);
801 hci_req_run(&req, NULL);
804 static void rpa_expired(struct work_struct *work)
806 struct hci_dev *hdev = container_of(work, struct hci_dev,
808 struct hci_request req;
812 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
814 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
817 /* The generation of a new RPA and programming it into the
818 * controller happens in the hci_req_enable_advertising()
821 hci_req_init(&req, hdev);
822 __hci_req_enable_advertising(&req);
823 hci_req_run(&req, NULL);
826 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
828 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
831 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
832 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
834 /* Non-mgmt controlled devices get this bit set
835 * implicitly so that pairing works for them, however
836 * for mgmt we require user-space to explicitly enable
839 hci_dev_clear_flag(hdev, HCI_BONDABLE);
842 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
843 void *data, u16 data_len)
845 struct mgmt_rp_read_info rp;
847 BT_DBG("sock %p %s", sk, hdev->name);
851 memset(&rp, 0, sizeof(rp));
853 bacpy(&rp.bdaddr, &hdev->bdaddr);
855 rp.version = hdev->hci_ver;
856 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
858 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
859 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
861 memcpy(rp.dev_class, hdev->dev_class, 3);
863 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
864 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
866 hci_dev_unlock(hdev);
868 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
872 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
874 __le32 settings = cpu_to_le32(get_current_settings(hdev));
876 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
880 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
882 BT_DBG("%s status 0x%02x", hdev->name, status);
884 if (hci_conn_count(hdev) == 0) {
885 cancel_delayed_work(&hdev->power_off);
886 queue_work(hdev->req_workqueue, &hdev->power_off.work);
890 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
892 struct mgmt_ev_advertising_added ev;
894 ev.instance = instance;
896 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
899 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
902 struct mgmt_ev_advertising_removed ev;
904 ev.instance = instance;
906 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
909 static void cancel_adv_timeout(struct hci_dev *hdev)
911 if (hdev->adv_instance_timeout) {
912 hdev->adv_instance_timeout = 0;
913 cancel_delayed_work(&hdev->adv_instance_expire);
917 static int clean_up_hci_state(struct hci_dev *hdev)
919 struct hci_request req;
920 struct hci_conn *conn;
924 hci_req_init(&req, hdev);
926 if (test_bit(HCI_ISCAN, &hdev->flags) ||
927 test_bit(HCI_PSCAN, &hdev->flags)) {
929 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
932 hci_req_clear_adv_instance(hdev, NULL, 0x00, false);
934 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
935 __hci_req_disable_advertising(&req);
937 discov_stopped = hci_req_stop_discovery(&req);
939 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
940 /* 0x15 == Terminated due to Power Off */
941 __hci_abort_conn(&req, conn, 0x15);
944 err = hci_req_run(&req, clean_up_hci_complete);
945 if (!err && discov_stopped)
946 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
951 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
954 struct mgmt_mode *cp = data;
955 struct mgmt_pending_cmd *cmd;
958 BT_DBG("request for %s", hdev->name);
960 if (cp->val != 0x00 && cp->val != 0x01)
961 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
962 MGMT_STATUS_INVALID_PARAMS);
966 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
967 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
972 if (!!cp->val == hdev_is_powered(hdev)) {
973 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
977 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
984 queue_work(hdev->req_workqueue, &hdev->power_on);
987 /* Disconnect connections, stop scans, etc */
988 err = clean_up_hci_state(hdev);
990 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
991 HCI_POWER_OFF_TIMEOUT);
993 /* ENODATA means there were no HCI commands queued */
994 if (err == -ENODATA) {
995 cancel_delayed_work(&hdev->power_off);
996 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1002 hci_dev_unlock(hdev);
1006 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1008 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1010 return mgmt_generic_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1014 int mgmt_new_settings(struct hci_dev *hdev)
1016 return new_settings(hdev, NULL);
1021 struct hci_dev *hdev;
1025 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1027 struct cmd_lookup *match = data;
1029 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1031 list_del(&cmd->list);
1033 if (match->sk == NULL) {
1034 match->sk = cmd->sk;
1035 sock_hold(match->sk);
1038 mgmt_pending_free(cmd);
1041 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1045 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1046 mgmt_pending_remove(cmd);
1049 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1051 if (cmd->cmd_complete) {
1054 cmd->cmd_complete(cmd, *status);
1055 mgmt_pending_remove(cmd);
1060 cmd_status_rsp(cmd, data);
1063 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1065 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1066 cmd->param, cmd->param_len);
1069 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1071 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1072 cmd->param, sizeof(struct mgmt_addr_info));
1075 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1077 if (!lmp_bredr_capable(hdev))
1078 return MGMT_STATUS_NOT_SUPPORTED;
1079 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1080 return MGMT_STATUS_REJECTED;
1082 return MGMT_STATUS_SUCCESS;
1085 static u8 mgmt_le_support(struct hci_dev *hdev)
1087 if (!lmp_le_capable(hdev))
1088 return MGMT_STATUS_NOT_SUPPORTED;
1089 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1090 return MGMT_STATUS_REJECTED;
1092 return MGMT_STATUS_SUCCESS;
1095 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1097 struct mgmt_pending_cmd *cmd;
1099 BT_DBG("status 0x%02x", status);
1103 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1108 u8 mgmt_err = mgmt_status(status);
1109 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1110 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1114 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1115 hdev->discov_timeout > 0) {
1116 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1117 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1120 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1121 new_settings(hdev, cmd->sk);
1124 mgmt_pending_remove(cmd);
1127 hci_dev_unlock(hdev);
1130 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1133 struct mgmt_cp_set_discoverable *cp = data;
1134 struct mgmt_pending_cmd *cmd;
1138 BT_DBG("request for %s", hdev->name);
1140 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1141 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1142 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1143 MGMT_STATUS_REJECTED);
1145 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1146 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1147 MGMT_STATUS_INVALID_PARAMS);
1149 timeout = __le16_to_cpu(cp->timeout);
1151 /* Disabling discoverable requires that no timeout is set,
1152 * and enabling limited discoverable requires a timeout.
1154 if ((cp->val == 0x00 && timeout > 0) ||
1155 (cp->val == 0x02 && timeout == 0))
1156 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1157 MGMT_STATUS_INVALID_PARAMS);
1161 if (!hdev_is_powered(hdev) && timeout > 0) {
1162 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1163 MGMT_STATUS_NOT_POWERED);
1167 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1168 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1169 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1174 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1175 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1176 MGMT_STATUS_REJECTED);
1180 if (!hdev_is_powered(hdev)) {
1181 bool changed = false;
1183 /* Setting limited discoverable when powered off is
1184 * not a valid operation since it requires a timeout
1185 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1187 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1188 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1192 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1197 err = new_settings(hdev, sk);
1202 /* If the current mode is the same, then just update the timeout
1203 * value with the new value. And if only the timeout gets updated,
1204 * then no need for any HCI transactions.
1206 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1207 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1208 HCI_LIMITED_DISCOVERABLE)) {
1209 cancel_delayed_work(&hdev->discov_off);
1210 hdev->discov_timeout = timeout;
1212 if (cp->val && hdev->discov_timeout > 0) {
1213 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1214 queue_delayed_work(hdev->req_workqueue,
1215 &hdev->discov_off, to);
1218 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1222 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1228 /* Cancel any potential discoverable timeout that might be
1229 * still active and store new timeout value. The arming of
1230 * the timeout happens in the complete handler.
1232 cancel_delayed_work(&hdev->discov_off);
1233 hdev->discov_timeout = timeout;
1236 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1238 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1240 /* Limited discoverable mode */
1241 if (cp->val == 0x02)
1242 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1244 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1246 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1250 hci_dev_unlock(hdev);
1254 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1256 struct mgmt_pending_cmd *cmd;
1258 BT_DBG("status 0x%02x", status);
1262 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1267 u8 mgmt_err = mgmt_status(status);
1268 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1272 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1273 new_settings(hdev, cmd->sk);
1276 mgmt_pending_remove(cmd);
1279 hci_dev_unlock(hdev);
1282 static int set_connectable_update_settings(struct hci_dev *hdev,
1283 struct sock *sk, u8 val)
1285 bool changed = false;
1288 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1292 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1294 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1295 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1298 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1303 hci_req_update_scan(hdev);
1304 hci_update_background_scan(hdev);
1305 return new_settings(hdev, sk);
1311 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1314 struct mgmt_mode *cp = data;
1315 struct mgmt_pending_cmd *cmd;
1318 BT_DBG("request for %s", hdev->name);
1320 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1321 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1322 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1323 MGMT_STATUS_REJECTED);
1325 if (cp->val != 0x00 && cp->val != 0x01)
1326 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1327 MGMT_STATUS_INVALID_PARAMS);
1331 if (!hdev_is_powered(hdev)) {
1332 err = set_connectable_update_settings(hdev, sk, cp->val);
1336 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1337 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1338 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1343 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1350 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1352 if (hdev->discov_timeout > 0)
1353 cancel_delayed_work(&hdev->discov_off);
1355 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1356 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1357 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1360 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1364 hci_dev_unlock(hdev);
1368 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1371 struct mgmt_mode *cp = data;
1375 BT_DBG("request for %s", hdev->name);
1377 if (cp->val != 0x00 && cp->val != 0x01)
1378 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1379 MGMT_STATUS_INVALID_PARAMS);
1384 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1386 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1388 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1393 /* In limited privacy mode the change of bondable mode
1394 * may affect the local advertising address.
1396 if (hdev_is_powered(hdev) &&
1397 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1398 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1399 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1400 queue_work(hdev->req_workqueue,
1401 &hdev->discoverable_update);
1403 err = new_settings(hdev, sk);
1407 hci_dev_unlock(hdev);
1411 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1414 struct mgmt_mode *cp = data;
1415 struct mgmt_pending_cmd *cmd;
1419 BT_DBG("request for %s", hdev->name);
1421 status = mgmt_bredr_support(hdev);
1423 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1426 if (cp->val != 0x00 && cp->val != 0x01)
1427 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1428 MGMT_STATUS_INVALID_PARAMS);
1432 if (!hdev_is_powered(hdev)) {
1433 bool changed = false;
1435 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1436 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1440 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1445 err = new_settings(hdev, sk);
1450 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1451 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1458 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1459 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1463 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1469 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1471 mgmt_pending_remove(cmd);
1476 hci_dev_unlock(hdev);
1480 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1482 struct mgmt_mode *cp = data;
1483 struct mgmt_pending_cmd *cmd;
1487 BT_DBG("request for %s", hdev->name);
1489 status = mgmt_bredr_support(hdev);
1491 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1493 if (!lmp_ssp_capable(hdev))
1494 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1495 MGMT_STATUS_NOT_SUPPORTED);
1497 if (cp->val != 0x00 && cp->val != 0x01)
1498 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1499 MGMT_STATUS_INVALID_PARAMS);
1503 if (!hdev_is_powered(hdev)) {
1507 changed = !hci_dev_test_and_set_flag(hdev,
1510 changed = hci_dev_test_and_clear_flag(hdev,
1513 changed = hci_dev_test_and_clear_flag(hdev,
1516 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1519 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1524 err = new_settings(hdev, sk);
1529 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1530 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1535 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1536 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1540 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1546 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1547 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1548 sizeof(cp->val), &cp->val);
1550 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1552 mgmt_pending_remove(cmd);
1557 hci_dev_unlock(hdev);
1561 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1563 struct mgmt_mode *cp = data;
1568 BT_DBG("request for %s", hdev->name);
1570 status = mgmt_bredr_support(hdev);
1572 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1574 if (!lmp_ssp_capable(hdev))
1575 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1576 MGMT_STATUS_NOT_SUPPORTED);
1578 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1579 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1580 MGMT_STATUS_REJECTED);
1582 if (cp->val != 0x00 && cp->val != 0x01)
1583 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1584 MGMT_STATUS_INVALID_PARAMS);
1588 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1589 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1595 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1597 if (hdev_is_powered(hdev)) {
1598 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1599 MGMT_STATUS_REJECTED);
1603 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1606 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1611 err = new_settings(hdev, sk);
1614 hci_dev_unlock(hdev);
1618 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1620 struct cmd_lookup match = { NULL, hdev };
1625 u8 mgmt_err = mgmt_status(status);
1627 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1632 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1634 new_settings(hdev, match.sk);
1639 /* Make sure the controller has a good default for
1640 * advertising data. Restrict the update to when LE
1641 * has actually been enabled. During power on, the
1642 * update in powered_update_hci will take care of it.
1644 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1645 struct hci_request req;
1647 hci_req_init(&req, hdev);
1648 __hci_req_update_adv_data(&req, 0x00);
1649 __hci_req_update_scan_rsp_data(&req, 0x00);
1650 hci_req_run(&req, NULL);
1651 hci_update_background_scan(hdev);
1655 hci_dev_unlock(hdev);
1658 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1660 struct mgmt_mode *cp = data;
1661 struct hci_cp_write_le_host_supported hci_cp;
1662 struct mgmt_pending_cmd *cmd;
1663 struct hci_request req;
1667 BT_DBG("request for %s", hdev->name);
1669 if (!lmp_le_capable(hdev))
1670 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1671 MGMT_STATUS_NOT_SUPPORTED);
1673 if (cp->val != 0x00 && cp->val != 0x01)
1674 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1675 MGMT_STATUS_INVALID_PARAMS);
1677 /* Bluetooth single mode LE only controllers or dual-mode
1678 * controllers configured as LE only devices, do not allow
1679 * switching LE off. These have either LE enabled explicitly
1680 * or BR/EDR has been previously switched off.
1682 * When trying to enable an already enabled LE, then gracefully
1683 * send a positive response. Trying to disable it however will
1684 * result into rejection.
1686 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1687 if (cp->val == 0x01)
1688 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1690 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1691 MGMT_STATUS_REJECTED);
1697 enabled = lmp_host_le_capable(hdev);
1700 hci_req_clear_adv_instance(hdev, NULL, 0x00, true);
1702 if (!hdev_is_powered(hdev) || val == enabled) {
1703 bool changed = false;
1705 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1706 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1710 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1711 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1715 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1720 err = new_settings(hdev, sk);
1725 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1726 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1727 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1732 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1738 hci_req_init(&req, hdev);
1740 memset(&hci_cp, 0, sizeof(hci_cp));
1744 hci_cp.simul = 0x00;
1746 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1747 __hci_req_disable_advertising(&req);
1750 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1753 err = hci_req_run(&req, le_enable_complete);
1755 mgmt_pending_remove(cmd);
1758 hci_dev_unlock(hdev);
1762 /* This is a helper function to test for pending mgmt commands that can
1763 * cause CoD or EIR HCI commands. We can only allow one such pending
1764 * mgmt command at a time since otherwise we cannot easily track what
1765 * the current values are, will be, and based on that calculate if a new
1766 * HCI command needs to be sent and if yes with what value.
1768 static bool pending_eir_or_class(struct hci_dev *hdev)
1770 struct mgmt_pending_cmd *cmd;
1772 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1773 switch (cmd->opcode) {
1774 case MGMT_OP_ADD_UUID:
1775 case MGMT_OP_REMOVE_UUID:
1776 case MGMT_OP_SET_DEV_CLASS:
1777 case MGMT_OP_SET_POWERED:
1785 static const u8 bluetooth_base_uuid[] = {
1786 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1787 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1790 static u8 get_uuid_size(const u8 *uuid)
1794 if (memcmp(uuid, bluetooth_base_uuid, 12))
1797 val = get_unaligned_le32(&uuid[12]);
1804 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1806 struct mgmt_pending_cmd *cmd;
1810 cmd = pending_find(mgmt_op, hdev);
1814 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
1815 mgmt_status(status), hdev->dev_class, 3);
1817 mgmt_pending_remove(cmd);
1820 hci_dev_unlock(hdev);
1823 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1825 BT_DBG("status 0x%02x", status);
1827 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1830 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1832 struct mgmt_cp_add_uuid *cp = data;
1833 struct mgmt_pending_cmd *cmd;
1834 struct hci_request req;
1835 struct bt_uuid *uuid;
1838 BT_DBG("request for %s", hdev->name);
1842 if (pending_eir_or_class(hdev)) {
1843 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1848 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1854 memcpy(uuid->uuid, cp->uuid, 16);
1855 uuid->svc_hint = cp->svc_hint;
1856 uuid->size = get_uuid_size(cp->uuid);
1858 list_add_tail(&uuid->list, &hdev->uuids);
1860 hci_req_init(&req, hdev);
1862 __hci_req_update_class(&req);
1863 __hci_req_update_eir(&req);
1865 err = hci_req_run(&req, add_uuid_complete);
1867 if (err != -ENODATA)
1870 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1871 hdev->dev_class, 3);
1875 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1884 hci_dev_unlock(hdev);
1888 static bool enable_service_cache(struct hci_dev *hdev)
1890 if (!hdev_is_powered(hdev))
1893 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
1894 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1902 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1904 BT_DBG("status 0x%02x", status);
1906 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
1909 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1912 struct mgmt_cp_remove_uuid *cp = data;
1913 struct mgmt_pending_cmd *cmd;
1914 struct bt_uuid *match, *tmp;
1915 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1916 struct hci_request req;
1919 BT_DBG("request for %s", hdev->name);
1923 if (pending_eir_or_class(hdev)) {
1924 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1929 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
1930 hci_uuids_clear(hdev);
1932 if (enable_service_cache(hdev)) {
1933 err = mgmt_cmd_complete(sk, hdev->id,
1934 MGMT_OP_REMOVE_UUID,
1935 0, hdev->dev_class, 3);
1944 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
1945 if (memcmp(match->uuid, cp->uuid, 16) != 0)
1948 list_del(&match->list);
1954 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1955 MGMT_STATUS_INVALID_PARAMS);
1960 hci_req_init(&req, hdev);
1962 __hci_req_update_class(&req);
1963 __hci_req_update_eir(&req);
1965 err = hci_req_run(&req, remove_uuid_complete);
1967 if (err != -ENODATA)
1970 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
1971 hdev->dev_class, 3);
1975 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
1984 hci_dev_unlock(hdev);
1988 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1990 BT_DBG("status 0x%02x", status);
1992 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
1995 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1998 struct mgmt_cp_set_dev_class *cp = data;
1999 struct mgmt_pending_cmd *cmd;
2000 struct hci_request req;
2003 BT_DBG("request for %s", hdev->name);
2005 if (!lmp_bredr_capable(hdev))
2006 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2007 MGMT_STATUS_NOT_SUPPORTED);
2011 if (pending_eir_or_class(hdev)) {
2012 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2017 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2018 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2019 MGMT_STATUS_INVALID_PARAMS);
2023 hdev->major_class = cp->major;
2024 hdev->minor_class = cp->minor;
2026 if (!hdev_is_powered(hdev)) {
2027 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2028 hdev->dev_class, 3);
2032 hci_req_init(&req, hdev);
2034 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2035 hci_dev_unlock(hdev);
2036 cancel_delayed_work_sync(&hdev->service_cache);
2038 __hci_req_update_eir(&req);
2041 __hci_req_update_class(&req);
2043 err = hci_req_run(&req, set_class_complete);
2045 if (err != -ENODATA)
2048 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2049 hdev->dev_class, 3);
2053 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2062 hci_dev_unlock(hdev);
2066 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2069 struct mgmt_cp_load_link_keys *cp = data;
2070 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2071 sizeof(struct mgmt_link_key_info));
2072 u16 key_count, expected_len;
2076 BT_DBG("request for %s", hdev->name);
2078 if (!lmp_bredr_capable(hdev))
2079 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2080 MGMT_STATUS_NOT_SUPPORTED);
2082 key_count = __le16_to_cpu(cp->key_count);
2083 if (key_count > max_key_count) {
2084 BT_ERR("load_link_keys: too big key_count value %u",
2086 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2087 MGMT_STATUS_INVALID_PARAMS);
2090 expected_len = sizeof(*cp) + key_count *
2091 sizeof(struct mgmt_link_key_info);
2092 if (expected_len != len) {
2093 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2095 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2096 MGMT_STATUS_INVALID_PARAMS);
2099 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2100 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2101 MGMT_STATUS_INVALID_PARAMS);
2103 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2106 for (i = 0; i < key_count; i++) {
2107 struct mgmt_link_key_info *key = &cp->keys[i];
2109 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2110 return mgmt_cmd_status(sk, hdev->id,
2111 MGMT_OP_LOAD_LINK_KEYS,
2112 MGMT_STATUS_INVALID_PARAMS);
2117 hci_link_keys_clear(hdev);
2120 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2122 changed = hci_dev_test_and_clear_flag(hdev,
2123 HCI_KEEP_DEBUG_KEYS);
2126 new_settings(hdev, NULL);
2128 for (i = 0; i < key_count; i++) {
2129 struct mgmt_link_key_info *key = &cp->keys[i];
2131 /* Always ignore debug keys and require a new pairing if
2132 * the user wants to use them.
2134 if (key->type == HCI_LK_DEBUG_COMBINATION)
2137 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2138 key->type, key->pin_len, NULL);
2141 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2143 hci_dev_unlock(hdev);
2148 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2149 u8 addr_type, struct sock *skip_sk)
2151 struct mgmt_ev_device_unpaired ev;
2153 bacpy(&ev.addr.bdaddr, bdaddr);
2154 ev.addr.type = addr_type;
2156 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2160 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2163 struct mgmt_cp_unpair_device *cp = data;
2164 struct mgmt_rp_unpair_device rp;
2165 struct hci_conn_params *params;
2166 struct mgmt_pending_cmd *cmd;
2167 struct hci_conn *conn;
2171 memset(&rp, 0, sizeof(rp));
2172 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2173 rp.addr.type = cp->addr.type;
2175 if (!bdaddr_type_is_valid(cp->addr.type))
2176 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2177 MGMT_STATUS_INVALID_PARAMS,
2180 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2181 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2182 MGMT_STATUS_INVALID_PARAMS,
2187 if (!hdev_is_powered(hdev)) {
2188 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2189 MGMT_STATUS_NOT_POWERED, &rp,
2194 if (cp->addr.type == BDADDR_BREDR) {
2195 /* If disconnection is requested, then look up the
2196 * connection. If the remote device is connected, it
2197 * will be later used to terminate the link.
2199 * Setting it to NULL explicitly will cause no
2200 * termination of the link.
2203 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2208 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2210 err = mgmt_cmd_complete(sk, hdev->id,
2211 MGMT_OP_UNPAIR_DEVICE,
2212 MGMT_STATUS_NOT_PAIRED, &rp,
2220 /* LE address type */
2221 addr_type = le_addr_type(cp->addr.type);
2223 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2225 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2227 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2228 MGMT_STATUS_NOT_PAIRED, &rp,
2233 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2235 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2239 /* Abort any ongoing SMP pairing */
2240 smp_cancel_pairing(conn);
2242 /* Defer clearing up the connection parameters until closing to
2243 * give a chance of keeping them if a repairing happens.
2245 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2247 /* Disable auto-connection parameters if present */
2248 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2250 if (params->explicit_connect)
2251 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2253 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2256 /* If disconnection is not requested, then clear the connection
2257 * variable so that the link is not terminated.
2259 if (!cp->disconnect)
2263 /* If the connection variable is set, then termination of the
2264 * link is requested.
2267 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2269 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2273 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2280 cmd->cmd_complete = addr_cmd_complete;
2282 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2284 mgmt_pending_remove(cmd);
2287 hci_dev_unlock(hdev);
2291 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2294 struct mgmt_cp_disconnect *cp = data;
2295 struct mgmt_rp_disconnect rp;
2296 struct mgmt_pending_cmd *cmd;
2297 struct hci_conn *conn;
2302 memset(&rp, 0, sizeof(rp));
2303 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2304 rp.addr.type = cp->addr.type;
2306 if (!bdaddr_type_is_valid(cp->addr.type))
2307 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2308 MGMT_STATUS_INVALID_PARAMS,
2313 if (!test_bit(HCI_UP, &hdev->flags)) {
2314 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2315 MGMT_STATUS_NOT_POWERED, &rp,
2320 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2321 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2322 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2326 if (cp->addr.type == BDADDR_BREDR)
2327 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2330 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2331 le_addr_type(cp->addr.type));
2333 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2334 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2335 MGMT_STATUS_NOT_CONNECTED, &rp,
2340 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2346 cmd->cmd_complete = generic_cmd_complete;
2348 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2350 mgmt_pending_remove(cmd);
2353 hci_dev_unlock(hdev);
2357 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2359 switch (link_type) {
2361 switch (addr_type) {
2362 case ADDR_LE_DEV_PUBLIC:
2363 return BDADDR_LE_PUBLIC;
2366 /* Fallback to LE Random address type */
2367 return BDADDR_LE_RANDOM;
2371 /* Fallback to BR/EDR type */
2372 return BDADDR_BREDR;
2376 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2379 struct mgmt_rp_get_connections *rp;
2389 if (!hdev_is_powered(hdev)) {
2390 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2391 MGMT_STATUS_NOT_POWERED);
2396 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2397 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2401 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2402 rp = kmalloc(rp_len, GFP_KERNEL);
2409 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2410 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2412 bacpy(&rp->addr[i].bdaddr, &c->dst);
2413 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2414 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2419 rp->conn_count = cpu_to_le16(i);
2421 /* Recalculate length in case of filtered SCO connections, etc */
2422 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2424 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2430 hci_dev_unlock(hdev);
2434 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2435 struct mgmt_cp_pin_code_neg_reply *cp)
2437 struct mgmt_pending_cmd *cmd;
2440 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2445 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2446 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2448 mgmt_pending_remove(cmd);
2453 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2456 struct hci_conn *conn;
2457 struct mgmt_cp_pin_code_reply *cp = data;
2458 struct hci_cp_pin_code_reply reply;
2459 struct mgmt_pending_cmd *cmd;
2466 if (!hdev_is_powered(hdev)) {
2467 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2468 MGMT_STATUS_NOT_POWERED);
2472 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2474 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2475 MGMT_STATUS_NOT_CONNECTED);
2479 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2480 struct mgmt_cp_pin_code_neg_reply ncp;
2482 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2484 BT_ERR("PIN code is not 16 bytes long");
2486 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2488 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2489 MGMT_STATUS_INVALID_PARAMS);
2494 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2500 cmd->cmd_complete = addr_cmd_complete;
2502 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2503 reply.pin_len = cp->pin_len;
2504 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2506 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2508 mgmt_pending_remove(cmd);
2511 hci_dev_unlock(hdev);
2515 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2518 struct mgmt_cp_set_io_capability *cp = data;
2522 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2523 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2524 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
2528 hdev->io_capability = cp->io_capability;
2530 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2531 hdev->io_capability);
2533 hci_dev_unlock(hdev);
2535 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2539 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2541 struct hci_dev *hdev = conn->hdev;
2542 struct mgmt_pending_cmd *cmd;
2544 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2545 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2548 if (cmd->user_data != conn)
2557 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2559 struct mgmt_rp_pair_device rp;
2560 struct hci_conn *conn = cmd->user_data;
2563 bacpy(&rp.addr.bdaddr, &conn->dst);
2564 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2566 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2567 status, &rp, sizeof(rp));
2569 /* So we don't get further callbacks for this connection */
2570 conn->connect_cfm_cb = NULL;
2571 conn->security_cfm_cb = NULL;
2572 conn->disconn_cfm_cb = NULL;
2574 hci_conn_drop(conn);
2576 /* The device is paired so there is no need to remove
2577 * its connection parameters anymore.
2579 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2586 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2588 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2589 struct mgmt_pending_cmd *cmd;
2591 cmd = find_pairing(conn);
2593 cmd->cmd_complete(cmd, status);
2594 mgmt_pending_remove(cmd);
2598 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2600 struct mgmt_pending_cmd *cmd;
2602 BT_DBG("status %u", status);
2604 cmd = find_pairing(conn);
2606 BT_DBG("Unable to find a pending command");
2610 cmd->cmd_complete(cmd, mgmt_status(status));
2611 mgmt_pending_remove(cmd);
2614 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2616 struct mgmt_pending_cmd *cmd;
2618 BT_DBG("status %u", status);
2623 cmd = find_pairing(conn);
2625 BT_DBG("Unable to find a pending command");
2629 cmd->cmd_complete(cmd, mgmt_status(status));
2630 mgmt_pending_remove(cmd);
2633 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2636 struct mgmt_cp_pair_device *cp = data;
2637 struct mgmt_rp_pair_device rp;
2638 struct mgmt_pending_cmd *cmd;
2639 u8 sec_level, auth_type;
2640 struct hci_conn *conn;
2645 memset(&rp, 0, sizeof(rp));
2646 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2647 rp.addr.type = cp->addr.type;
2649 if (!bdaddr_type_is_valid(cp->addr.type))
2650 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2651 MGMT_STATUS_INVALID_PARAMS,
2654 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2655 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2656 MGMT_STATUS_INVALID_PARAMS,
2661 if (!hdev_is_powered(hdev)) {
2662 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2663 MGMT_STATUS_NOT_POWERED, &rp,
2668 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2669 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2670 MGMT_STATUS_ALREADY_PAIRED, &rp,
2675 sec_level = BT_SECURITY_MEDIUM;
2676 auth_type = HCI_AT_DEDICATED_BONDING;
2678 if (cp->addr.type == BDADDR_BREDR) {
2679 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2682 u8 addr_type = le_addr_type(cp->addr.type);
2683 struct hci_conn_params *p;
2685 /* When pairing a new device, it is expected to remember
2686 * this device for future connections. Adding the connection
2687 * parameter information ahead of time allows tracking
2688 * of the slave preferred values and will speed up any
2689 * further connection establishment.
2691 * If connection parameters already exist, then they
2692 * will be kept and this function does nothing.
2694 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2696 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2697 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2699 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
2700 addr_type, sec_level,
2701 HCI_LE_CONN_TIMEOUT);
2707 if (PTR_ERR(conn) == -EBUSY)
2708 status = MGMT_STATUS_BUSY;
2709 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2710 status = MGMT_STATUS_NOT_SUPPORTED;
2711 else if (PTR_ERR(conn) == -ECONNREFUSED)
2712 status = MGMT_STATUS_REJECTED;
2714 status = MGMT_STATUS_CONNECT_FAILED;
2716 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2717 status, &rp, sizeof(rp));
2721 if (conn->connect_cfm_cb) {
2722 hci_conn_drop(conn);
2723 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2724 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2728 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2731 hci_conn_drop(conn);
2735 cmd->cmd_complete = pairing_complete;
2737 /* For LE, just connecting isn't a proof that the pairing finished */
2738 if (cp->addr.type == BDADDR_BREDR) {
2739 conn->connect_cfm_cb = pairing_complete_cb;
2740 conn->security_cfm_cb = pairing_complete_cb;
2741 conn->disconn_cfm_cb = pairing_complete_cb;
2743 conn->connect_cfm_cb = le_pairing_complete_cb;
2744 conn->security_cfm_cb = le_pairing_complete_cb;
2745 conn->disconn_cfm_cb = le_pairing_complete_cb;
2748 conn->io_capability = cp->io_cap;
2749 cmd->user_data = hci_conn_get(conn);
2751 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
2752 hci_conn_security(conn, sec_level, auth_type, true)) {
2753 cmd->cmd_complete(cmd, 0);
2754 mgmt_pending_remove(cmd);
2760 hci_dev_unlock(hdev);
2764 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2767 struct mgmt_addr_info *addr = data;
2768 struct mgmt_pending_cmd *cmd;
2769 struct hci_conn *conn;
2776 if (!hdev_is_powered(hdev)) {
2777 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2778 MGMT_STATUS_NOT_POWERED);
2782 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2784 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2785 MGMT_STATUS_INVALID_PARAMS);
2789 conn = cmd->user_data;
2791 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2792 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2793 MGMT_STATUS_INVALID_PARAMS);
2797 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
2798 mgmt_pending_remove(cmd);
2800 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2801 addr, sizeof(*addr));
2803 hci_dev_unlock(hdev);
2807 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2808 struct mgmt_addr_info *addr, u16 mgmt_op,
2809 u16 hci_op, __le32 passkey)
2811 struct mgmt_pending_cmd *cmd;
2812 struct hci_conn *conn;
2817 if (!hdev_is_powered(hdev)) {
2818 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
2819 MGMT_STATUS_NOT_POWERED, addr,
2824 if (addr->type == BDADDR_BREDR)
2825 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2827 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
2828 le_addr_type(addr->type));
2831 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
2832 MGMT_STATUS_NOT_CONNECTED, addr,
2837 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2838 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2840 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
2841 MGMT_STATUS_SUCCESS, addr,
2844 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
2845 MGMT_STATUS_FAILED, addr,
2851 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2857 cmd->cmd_complete = addr_cmd_complete;
2859 /* Continue with pairing via HCI */
2860 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2861 struct hci_cp_user_passkey_reply cp;
2863 bacpy(&cp.bdaddr, &addr->bdaddr);
2864 cp.passkey = passkey;
2865 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2867 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2871 mgmt_pending_remove(cmd);
2874 hci_dev_unlock(hdev);
2878 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2879 void *data, u16 len)
2881 struct mgmt_cp_pin_code_neg_reply *cp = data;
2885 return user_pairing_resp(sk, hdev, &cp->addr,
2886 MGMT_OP_PIN_CODE_NEG_REPLY,
2887 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2890 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2893 struct mgmt_cp_user_confirm_reply *cp = data;
2897 if (len != sizeof(*cp))
2898 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2899 MGMT_STATUS_INVALID_PARAMS);
2901 return user_pairing_resp(sk, hdev, &cp->addr,
2902 MGMT_OP_USER_CONFIRM_REPLY,
2903 HCI_OP_USER_CONFIRM_REPLY, 0);
2906 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2907 void *data, u16 len)
2909 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2913 return user_pairing_resp(sk, hdev, &cp->addr,
2914 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2915 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2918 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2921 struct mgmt_cp_user_passkey_reply *cp = data;
2925 return user_pairing_resp(sk, hdev, &cp->addr,
2926 MGMT_OP_USER_PASSKEY_REPLY,
2927 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2930 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2931 void *data, u16 len)
2933 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2937 return user_pairing_resp(sk, hdev, &cp->addr,
2938 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2939 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2942 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2944 struct mgmt_cp_set_local_name *cp;
2945 struct mgmt_pending_cmd *cmd;
2947 BT_DBG("status 0x%02x", status);
2951 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2958 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2959 mgmt_status(status));
2961 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2964 mgmt_pending_remove(cmd);
2967 hci_dev_unlock(hdev);
2970 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2973 struct mgmt_cp_set_local_name *cp = data;
2974 struct mgmt_pending_cmd *cmd;
2975 struct hci_request req;
2982 /* If the old values are the same as the new ones just return a
2983 * direct command complete event.
2985 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
2986 !memcmp(hdev->short_name, cp->short_name,
2987 sizeof(hdev->short_name))) {
2988 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2993 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2995 if (!hdev_is_powered(hdev)) {
2996 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2998 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3003 err = mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev,
3009 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3015 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3017 hci_req_init(&req, hdev);
3019 if (lmp_bredr_capable(hdev)) {
3020 __hci_req_update_name(&req);
3021 __hci_req_update_eir(&req);
3024 /* The name is stored in the scan response data and so
3025 * no need to udpate the advertising data here.
3027 if (lmp_le_capable(hdev))
3028 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3030 err = hci_req_run(&req, set_name_complete);
3032 mgmt_pending_remove(cmd);
3035 hci_dev_unlock(hdev);
3039 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
3040 u16 opcode, struct sk_buff *skb)
3042 struct mgmt_rp_read_local_oob_data mgmt_rp;
3043 size_t rp_size = sizeof(mgmt_rp);
3044 struct mgmt_pending_cmd *cmd;
3046 BT_DBG("%s status %u", hdev->name, status);
3048 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3052 if (status || !skb) {
3053 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3054 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
3058 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
3060 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
3061 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
3063 if (skb->len < sizeof(*rp)) {
3064 mgmt_cmd_status(cmd->sk, hdev->id,
3065 MGMT_OP_READ_LOCAL_OOB_DATA,
3066 MGMT_STATUS_FAILED);
3070 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
3071 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
3073 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
3075 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
3077 if (skb->len < sizeof(*rp)) {
3078 mgmt_cmd_status(cmd->sk, hdev->id,
3079 MGMT_OP_READ_LOCAL_OOB_DATA,
3080 MGMT_STATUS_FAILED);
3084 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
3085 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
3087 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
3088 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
3091 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3092 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
3095 mgmt_pending_remove(cmd);
3098 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3099 void *data, u16 data_len)
3101 struct mgmt_pending_cmd *cmd;
3102 struct hci_request req;
3105 BT_DBG("%s", hdev->name);
3109 if (!hdev_is_powered(hdev)) {
3110 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3111 MGMT_STATUS_NOT_POWERED);
3115 if (!lmp_ssp_capable(hdev)) {
3116 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3117 MGMT_STATUS_NOT_SUPPORTED);
3121 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3122 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3127 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3133 hci_req_init(&req, hdev);
3135 if (bredr_sc_enabled(hdev))
3136 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3138 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3140 err = hci_req_run_skb(&req, read_local_oob_data_complete);
3142 mgmt_pending_remove(cmd);
3145 hci_dev_unlock(hdev);
3149 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3150 void *data, u16 len)
3152 struct mgmt_addr_info *addr = data;
3155 BT_DBG("%s ", hdev->name);
3157 if (!bdaddr_type_is_valid(addr->type))
3158 return mgmt_cmd_complete(sk, hdev->id,
3159 MGMT_OP_ADD_REMOTE_OOB_DATA,
3160 MGMT_STATUS_INVALID_PARAMS,
3161 addr, sizeof(*addr));
3165 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3166 struct mgmt_cp_add_remote_oob_data *cp = data;
3169 if (cp->addr.type != BDADDR_BREDR) {
3170 err = mgmt_cmd_complete(sk, hdev->id,
3171 MGMT_OP_ADD_REMOTE_OOB_DATA,
3172 MGMT_STATUS_INVALID_PARAMS,
3173 &cp->addr, sizeof(cp->addr));
3177 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3178 cp->addr.type, cp->hash,
3179 cp->rand, NULL, NULL);
3181 status = MGMT_STATUS_FAILED;
3183 status = MGMT_STATUS_SUCCESS;
3185 err = mgmt_cmd_complete(sk, hdev->id,
3186 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3187 &cp->addr, sizeof(cp->addr));
3188 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3189 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3190 u8 *rand192, *hash192, *rand256, *hash256;
3193 if (bdaddr_type_is_le(cp->addr.type)) {
3194 /* Enforce zero-valued 192-bit parameters as
3195 * long as legacy SMP OOB isn't implemented.
3197 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3198 memcmp(cp->hash192, ZERO_KEY, 16)) {
3199 err = mgmt_cmd_complete(sk, hdev->id,
3200 MGMT_OP_ADD_REMOTE_OOB_DATA,
3201 MGMT_STATUS_INVALID_PARAMS,
3202 addr, sizeof(*addr));
3209 /* In case one of the P-192 values is set to zero,
3210 * then just disable OOB data for P-192.
3212 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3213 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3217 rand192 = cp->rand192;
3218 hash192 = cp->hash192;
3222 /* In case one of the P-256 values is set to zero, then just
3223 * disable OOB data for P-256.
3225 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3226 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3230 rand256 = cp->rand256;
3231 hash256 = cp->hash256;
3234 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3235 cp->addr.type, hash192, rand192,
3238 status = MGMT_STATUS_FAILED;
3240 status = MGMT_STATUS_SUCCESS;
3242 err = mgmt_cmd_complete(sk, hdev->id,
3243 MGMT_OP_ADD_REMOTE_OOB_DATA,
3244 status, &cp->addr, sizeof(cp->addr));
3246 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3247 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3248 MGMT_STATUS_INVALID_PARAMS);
3252 hci_dev_unlock(hdev);
3256 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3257 void *data, u16 len)
3259 struct mgmt_cp_remove_remote_oob_data *cp = data;
3263 BT_DBG("%s", hdev->name);
3265 if (cp->addr.type != BDADDR_BREDR)
3266 return mgmt_cmd_complete(sk, hdev->id,
3267 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3268 MGMT_STATUS_INVALID_PARAMS,
3269 &cp->addr, sizeof(cp->addr));
3273 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3274 hci_remote_oob_data_clear(hdev);
3275 status = MGMT_STATUS_SUCCESS;
3279 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3281 status = MGMT_STATUS_INVALID_PARAMS;
3283 status = MGMT_STATUS_SUCCESS;
3286 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3287 status, &cp->addr, sizeof(cp->addr));
3289 hci_dev_unlock(hdev);
3293 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
3295 struct mgmt_pending_cmd *cmd;
3297 BT_DBG("status %d", status);
3301 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
3303 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3306 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
3309 cmd->cmd_complete(cmd, mgmt_status(status));
3310 mgmt_pending_remove(cmd);
3313 hci_dev_unlock(hdev);
3316 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
3317 uint8_t *mgmt_status)
3320 case DISCOV_TYPE_LE:
3321 *mgmt_status = mgmt_le_support(hdev);
3325 case DISCOV_TYPE_INTERLEAVED:
3326 *mgmt_status = mgmt_le_support(hdev);
3329 /* Intentional fall-through */
3330 case DISCOV_TYPE_BREDR:
3331 *mgmt_status = mgmt_bredr_support(hdev);
3336 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
3343 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
3344 u16 op, void *data, u16 len)
3346 struct mgmt_cp_start_discovery *cp = data;
3347 struct mgmt_pending_cmd *cmd;
3351 BT_DBG("%s", hdev->name);
3355 if (!hdev_is_powered(hdev)) {
3356 err = mgmt_cmd_complete(sk, hdev->id, op,
3357 MGMT_STATUS_NOT_POWERED,
3358 &cp->type, sizeof(cp->type));
3362 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3363 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3364 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
3365 &cp->type, sizeof(cp->type));
3369 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3370 err = mgmt_cmd_complete(sk, hdev->id, op, status,
3371 &cp->type, sizeof(cp->type));
3375 /* Clear the discovery filter first to free any previously
3376 * allocated memory for the UUID list.
3378 hci_discovery_filter_clear(hdev);
3380 hdev->discovery.type = cp->type;
3381 hdev->discovery.report_invalid_rssi = false;
3382 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
3383 hdev->discovery.limited = true;
3385 hdev->discovery.limited = false;
3387 cmd = mgmt_pending_add(sk, op, hdev, data, len);
3393 cmd->cmd_complete = generic_cmd_complete;
3395 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3396 queue_work(hdev->req_workqueue, &hdev->discov_update);
3400 hci_dev_unlock(hdev);
3404 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3405 void *data, u16 len)
3407 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
3411 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
3412 void *data, u16 len)
3414 return start_discovery_internal(sk, hdev,
3415 MGMT_OP_START_LIMITED_DISCOVERY,
3419 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
3422 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
3426 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
3427 void *data, u16 len)
3429 struct mgmt_cp_start_service_discovery *cp = data;
3430 struct mgmt_pending_cmd *cmd;
3431 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
3432 u16 uuid_count, expected_len;
3436 BT_DBG("%s", hdev->name);
3440 if (!hdev_is_powered(hdev)) {
3441 err = mgmt_cmd_complete(sk, hdev->id,
3442 MGMT_OP_START_SERVICE_DISCOVERY,
3443 MGMT_STATUS_NOT_POWERED,
3444 &cp->type, sizeof(cp->type));
3448 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3449 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3450 err = mgmt_cmd_complete(sk, hdev->id,
3451 MGMT_OP_START_SERVICE_DISCOVERY,
3452 MGMT_STATUS_BUSY, &cp->type,
3457 uuid_count = __le16_to_cpu(cp->uuid_count);
3458 if (uuid_count > max_uuid_count) {
3459 BT_ERR("service_discovery: too big uuid_count value %u",
3461 err = mgmt_cmd_complete(sk, hdev->id,
3462 MGMT_OP_START_SERVICE_DISCOVERY,
3463 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3468 expected_len = sizeof(*cp) + uuid_count * 16;
3469 if (expected_len != len) {
3470 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
3472 err = mgmt_cmd_complete(sk, hdev->id,
3473 MGMT_OP_START_SERVICE_DISCOVERY,
3474 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3479 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3480 err = mgmt_cmd_complete(sk, hdev->id,
3481 MGMT_OP_START_SERVICE_DISCOVERY,
3482 status, &cp->type, sizeof(cp->type));
3486 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
3493 cmd->cmd_complete = service_discovery_cmd_complete;
3495 /* Clear the discovery filter first to free any previously
3496 * allocated memory for the UUID list.
3498 hci_discovery_filter_clear(hdev);
3500 hdev->discovery.result_filtering = true;
3501 hdev->discovery.type = cp->type;
3502 hdev->discovery.rssi = cp->rssi;
3503 hdev->discovery.uuid_count = uuid_count;
3505 if (uuid_count > 0) {
3506 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
3508 if (!hdev->discovery.uuids) {
3509 err = mgmt_cmd_complete(sk, hdev->id,
3510 MGMT_OP_START_SERVICE_DISCOVERY,
3512 &cp->type, sizeof(cp->type));
3513 mgmt_pending_remove(cmd);
3518 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3519 queue_work(hdev->req_workqueue, &hdev->discov_update);
3523 hci_dev_unlock(hdev);
3527 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
3529 struct mgmt_pending_cmd *cmd;
3531 BT_DBG("status %d", status);
3535 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3537 cmd->cmd_complete(cmd, mgmt_status(status));
3538 mgmt_pending_remove(cmd);
3541 hci_dev_unlock(hdev);
3544 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3547 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3548 struct mgmt_pending_cmd *cmd;
3551 BT_DBG("%s", hdev->name);
3555 if (!hci_discovery_active(hdev)) {
3556 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3557 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3558 sizeof(mgmt_cp->type));
3562 if (hdev->discovery.type != mgmt_cp->type) {
3563 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3564 MGMT_STATUS_INVALID_PARAMS,
3565 &mgmt_cp->type, sizeof(mgmt_cp->type));
3569 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
3575 cmd->cmd_complete = generic_cmd_complete;
3577 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3578 queue_work(hdev->req_workqueue, &hdev->discov_update);
3582 hci_dev_unlock(hdev);
3586 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3589 struct mgmt_cp_confirm_name *cp = data;
3590 struct inquiry_entry *e;
3593 BT_DBG("%s", hdev->name);
3597 if (!hci_discovery_active(hdev)) {
3598 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3599 MGMT_STATUS_FAILED, &cp->addr,
3604 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3606 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3607 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3612 if (cp->name_known) {
3613 e->name_state = NAME_KNOWN;
3616 e->name_state = NAME_NEEDED;
3617 hci_inquiry_cache_update_resolve(hdev, e);
3620 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
3621 &cp->addr, sizeof(cp->addr));
3624 hci_dev_unlock(hdev);
3628 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3631 struct mgmt_cp_block_device *cp = data;
3635 BT_DBG("%s", hdev->name);
3637 if (!bdaddr_type_is_valid(cp->addr.type))
3638 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3639 MGMT_STATUS_INVALID_PARAMS,
3640 &cp->addr, sizeof(cp->addr));
3644 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
3647 status = MGMT_STATUS_FAILED;
3651 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3653 status = MGMT_STATUS_SUCCESS;
3656 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3657 &cp->addr, sizeof(cp->addr));
3659 hci_dev_unlock(hdev);
3664 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3667 struct mgmt_cp_unblock_device *cp = data;
3671 BT_DBG("%s", hdev->name);
3673 if (!bdaddr_type_is_valid(cp->addr.type))
3674 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3675 MGMT_STATUS_INVALID_PARAMS,
3676 &cp->addr, sizeof(cp->addr));
3680 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
3683 status = MGMT_STATUS_INVALID_PARAMS;
3687 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3689 status = MGMT_STATUS_SUCCESS;
3692 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3693 &cp->addr, sizeof(cp->addr));
3695 hci_dev_unlock(hdev);
3700 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3703 struct mgmt_cp_set_device_id *cp = data;
3704 struct hci_request req;
3708 BT_DBG("%s", hdev->name);
3710 source = __le16_to_cpu(cp->source);
3712 if (source > 0x0002)
3713 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3714 MGMT_STATUS_INVALID_PARAMS);
3718 hdev->devid_source = source;
3719 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3720 hdev->devid_product = __le16_to_cpu(cp->product);
3721 hdev->devid_version = __le16_to_cpu(cp->version);
3723 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
3726 hci_req_init(&req, hdev);
3727 __hci_req_update_eir(&req);
3728 hci_req_run(&req, NULL);
3730 hci_dev_unlock(hdev);
3735 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
3738 BT_DBG("status %d", status);
3741 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
3744 struct cmd_lookup match = { NULL, hdev };
3745 struct hci_request req;
3747 struct adv_info *adv_instance;
3753 u8 mgmt_err = mgmt_status(status);
3755 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3756 cmd_status_rsp, &mgmt_err);
3760 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3761 hci_dev_set_flag(hdev, HCI_ADVERTISING);
3763 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
3765 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3768 new_settings(hdev, match.sk);
3773 /* If "Set Advertising" was just disabled and instance advertising was
3774 * set up earlier, then re-enable multi-instance advertising.
3776 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3777 list_empty(&hdev->adv_instances))
3780 instance = hdev->cur_adv_instance;
3782 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
3783 struct adv_info, list);
3787 instance = adv_instance->instance;
3790 hci_req_init(&req, hdev);
3792 err = __hci_req_schedule_adv_instance(&req, instance, true);
3795 err = hci_req_run(&req, enable_advertising_instance);
3798 BT_ERR("Failed to re-configure advertising");
3801 hci_dev_unlock(hdev);
3804 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3807 struct mgmt_mode *cp = data;
3808 struct mgmt_pending_cmd *cmd;
3809 struct hci_request req;
3813 BT_DBG("request for %s", hdev->name);
3815 status = mgmt_le_support(hdev);
3817 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3820 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
3821 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3822 MGMT_STATUS_INVALID_PARAMS);
3828 /* The following conditions are ones which mean that we should
3829 * not do any HCI communication but directly send a mgmt
3830 * response to user space (after toggling the flag if
3833 if (!hdev_is_powered(hdev) ||
3834 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
3835 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
3836 hci_conn_num(hdev, LE_LINK) > 0 ||
3837 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
3838 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
3842 hdev->cur_adv_instance = 0x00;
3843 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
3844 if (cp->val == 0x02)
3845 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
3847 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
3849 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
3850 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
3853 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3858 err = new_settings(hdev, sk);
3863 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3864 pending_find(MGMT_OP_SET_LE, hdev)) {
3865 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3870 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3876 hci_req_init(&req, hdev);
3878 if (cp->val == 0x02)
3879 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
3881 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
3883 cancel_adv_timeout(hdev);
3886 /* Switch to instance "0" for the Set Advertising setting.
3887 * We cannot use update_[adv|scan_rsp]_data() here as the
3888 * HCI_ADVERTISING flag is not yet set.
3890 hdev->cur_adv_instance = 0x00;
3891 __hci_req_update_adv_data(&req, 0x00);
3892 __hci_req_update_scan_rsp_data(&req, 0x00);
3893 __hci_req_enable_advertising(&req);
3895 __hci_req_disable_advertising(&req);
3898 err = hci_req_run(&req, set_advertising_complete);
3900 mgmt_pending_remove(cmd);
3903 hci_dev_unlock(hdev);
3907 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3908 void *data, u16 len)
3910 struct mgmt_cp_set_static_address *cp = data;
3913 BT_DBG("%s", hdev->name);
3915 if (!lmp_le_capable(hdev))
3916 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3917 MGMT_STATUS_NOT_SUPPORTED);
3919 if (hdev_is_powered(hdev))
3920 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3921 MGMT_STATUS_REJECTED);
3923 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3924 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3925 return mgmt_cmd_status(sk, hdev->id,
3926 MGMT_OP_SET_STATIC_ADDRESS,
3927 MGMT_STATUS_INVALID_PARAMS);
3929 /* Two most significant bits shall be set */
3930 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3931 return mgmt_cmd_status(sk, hdev->id,
3932 MGMT_OP_SET_STATIC_ADDRESS,
3933 MGMT_STATUS_INVALID_PARAMS);
3938 bacpy(&hdev->static_addr, &cp->bdaddr);
3940 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
3944 err = new_settings(hdev, sk);
3947 hci_dev_unlock(hdev);
3951 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3952 void *data, u16 len)
3954 struct mgmt_cp_set_scan_params *cp = data;
3955 __u16 interval, window;
3958 BT_DBG("%s", hdev->name);
3960 if (!lmp_le_capable(hdev))
3961 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3962 MGMT_STATUS_NOT_SUPPORTED);
3964 interval = __le16_to_cpu(cp->interval);
3966 if (interval < 0x0004 || interval > 0x4000)
3967 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3968 MGMT_STATUS_INVALID_PARAMS);
3970 window = __le16_to_cpu(cp->window);
3972 if (window < 0x0004 || window > 0x4000)
3973 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3974 MGMT_STATUS_INVALID_PARAMS);
3976 if (window > interval)
3977 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3978 MGMT_STATUS_INVALID_PARAMS);
3982 hdev->le_scan_interval = interval;
3983 hdev->le_scan_window = window;
3985 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
3988 /* If background scan is running, restart it so new parameters are
3991 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
3992 hdev->discovery.state == DISCOVERY_STOPPED) {
3993 struct hci_request req;
3995 hci_req_init(&req, hdev);
3997 hci_req_add_le_scan_disable(&req);
3998 hci_req_add_le_passive_scan(&req);
4000 hci_req_run(&req, NULL);
4003 hci_dev_unlock(hdev);
4008 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4011 struct mgmt_pending_cmd *cmd;
4013 BT_DBG("status 0x%02x", status);
4017 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4022 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4023 mgmt_status(status));
4025 struct mgmt_mode *cp = cmd->param;
4028 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4030 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4032 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4033 new_settings(hdev, cmd->sk);
4036 mgmt_pending_remove(cmd);
4039 hci_dev_unlock(hdev);
4042 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4043 void *data, u16 len)
4045 struct mgmt_mode *cp = data;
4046 struct mgmt_pending_cmd *cmd;
4047 struct hci_request req;
4050 BT_DBG("%s", hdev->name);
4052 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4053 hdev->hci_ver < BLUETOOTH_VER_1_2)
4054 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4055 MGMT_STATUS_NOT_SUPPORTED);
4057 if (cp->val != 0x00 && cp->val != 0x01)
4058 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4059 MGMT_STATUS_INVALID_PARAMS);
4063 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4064 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4069 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4070 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4075 if (!hdev_is_powered(hdev)) {
4076 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4077 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4079 new_settings(hdev, sk);
4083 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4090 hci_req_init(&req, hdev);
4092 __hci_req_write_fast_connectable(&req, cp->val);
4094 err = hci_req_run(&req, fast_connectable_complete);
4096 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4097 MGMT_STATUS_FAILED);
4098 mgmt_pending_remove(cmd);
4102 hci_dev_unlock(hdev);
4107 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4109 struct mgmt_pending_cmd *cmd;
4111 BT_DBG("status 0x%02x", status);
4115 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
4120 u8 mgmt_err = mgmt_status(status);
4122 /* We need to restore the flag if related HCI commands
4125 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4127 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4129 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4130 new_settings(hdev, cmd->sk);
4133 mgmt_pending_remove(cmd);
4136 hci_dev_unlock(hdev);
4139 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4141 struct mgmt_mode *cp = data;
4142 struct mgmt_pending_cmd *cmd;
4143 struct hci_request req;
4146 BT_DBG("request for %s", hdev->name);
4148 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4149 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4150 MGMT_STATUS_NOT_SUPPORTED);
4152 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4153 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4154 MGMT_STATUS_REJECTED);
4156 if (cp->val != 0x00 && cp->val != 0x01)
4157 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4158 MGMT_STATUS_INVALID_PARAMS);
4162 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4163 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4167 if (!hdev_is_powered(hdev)) {
4169 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
4170 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
4171 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
4172 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4173 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4176 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4178 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4182 err = new_settings(hdev, sk);
4186 /* Reject disabling when powered on */
4188 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4189 MGMT_STATUS_REJECTED);
4192 /* When configuring a dual-mode controller to operate
4193 * with LE only and using a static address, then switching
4194 * BR/EDR back on is not allowed.
4196 * Dual-mode controllers shall operate with the public
4197 * address as its identity address for BR/EDR and LE. So
4198 * reject the attempt to create an invalid configuration.
4200 * The same restrictions applies when secure connections
4201 * has been enabled. For BR/EDR this is a controller feature
4202 * while for LE it is a host stack feature. This means that
4203 * switching BR/EDR back on when secure connections has been
4204 * enabled is not a supported transaction.
4206 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4207 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4208 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
4209 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4210 MGMT_STATUS_REJECTED);
4215 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
4216 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4221 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4227 /* We need to flip the bit already here so that
4228 * hci_req_update_adv_data generates the correct flags.
4230 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4232 hci_req_init(&req, hdev);
4234 __hci_req_write_fast_connectable(&req, false);
4235 __hci_req_update_scan(&req);
4237 /* Since only the advertising data flags will change, there
4238 * is no need to update the scan response data.
4240 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
4242 err = hci_req_run(&req, set_bredr_complete);
4244 mgmt_pending_remove(cmd);
4247 hci_dev_unlock(hdev);
4251 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4253 struct mgmt_pending_cmd *cmd;
4254 struct mgmt_mode *cp;
4256 BT_DBG("%s status %u", hdev->name, status);
4260 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4265 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
4266 mgmt_status(status));
4274 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
4275 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4278 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4279 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4282 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4283 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4287 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
4288 new_settings(hdev, cmd->sk);
4291 mgmt_pending_remove(cmd);
4293 hci_dev_unlock(hdev);
4296 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4297 void *data, u16 len)
4299 struct mgmt_mode *cp = data;
4300 struct mgmt_pending_cmd *cmd;
4301 struct hci_request req;
4305 BT_DBG("request for %s", hdev->name);
4307 if (!lmp_sc_capable(hdev) &&
4308 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4309 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4310 MGMT_STATUS_NOT_SUPPORTED);
4312 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4313 lmp_sc_capable(hdev) &&
4314 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
4315 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4316 MGMT_STATUS_REJECTED);
4318 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4319 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4320 MGMT_STATUS_INVALID_PARAMS);
4324 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4325 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4329 changed = !hci_dev_test_and_set_flag(hdev,
4331 if (cp->val == 0x02)
4332 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4334 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4336 changed = hci_dev_test_and_clear_flag(hdev,
4338 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4341 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4346 err = new_settings(hdev, sk);
4351 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4352 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4359 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
4360 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4361 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4365 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4371 hci_req_init(&req, hdev);
4372 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4373 err = hci_req_run(&req, sc_enable_complete);
4375 mgmt_pending_remove(cmd);
4380 hci_dev_unlock(hdev);
4384 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4385 void *data, u16 len)
4387 struct mgmt_mode *cp = data;
4388 bool changed, use_changed;
4391 BT_DBG("request for %s", hdev->name);
4393 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4394 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4395 MGMT_STATUS_INVALID_PARAMS);
4400 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
4402 changed = hci_dev_test_and_clear_flag(hdev,
4403 HCI_KEEP_DEBUG_KEYS);
4405 if (cp->val == 0x02)
4406 use_changed = !hci_dev_test_and_set_flag(hdev,
4407 HCI_USE_DEBUG_KEYS);
4409 use_changed = hci_dev_test_and_clear_flag(hdev,
4410 HCI_USE_DEBUG_KEYS);
4412 if (hdev_is_powered(hdev) && use_changed &&
4413 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
4414 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4415 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4416 sizeof(mode), &mode);
4419 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4424 err = new_settings(hdev, sk);
4427 hci_dev_unlock(hdev);
4431 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4434 struct mgmt_cp_set_privacy *cp = cp_data;
4438 BT_DBG("request for %s", hdev->name);
4440 if (!lmp_le_capable(hdev))
4441 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4442 MGMT_STATUS_NOT_SUPPORTED);
4444 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
4445 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4446 MGMT_STATUS_INVALID_PARAMS);
4448 if (hdev_is_powered(hdev))
4449 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4450 MGMT_STATUS_REJECTED);
4454 /* If user space supports this command it is also expected to
4455 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4457 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
4460 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
4461 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4462 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
4463 if (cp->privacy == 0x02)
4464 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
4466 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4468 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
4469 memset(hdev->irk, 0, sizeof(hdev->irk));
4470 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
4471 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4474 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4479 err = new_settings(hdev, sk);
4482 hci_dev_unlock(hdev);
4486 static bool irk_is_valid(struct mgmt_irk_info *irk)
4488 switch (irk->addr.type) {
4489 case BDADDR_LE_PUBLIC:
4492 case BDADDR_LE_RANDOM:
4493 /* Two most significant bits shall be set */
4494 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4502 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4505 struct mgmt_cp_load_irks *cp = cp_data;
4506 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4507 sizeof(struct mgmt_irk_info));
4508 u16 irk_count, expected_len;
4511 BT_DBG("request for %s", hdev->name);
4513 if (!lmp_le_capable(hdev))
4514 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4515 MGMT_STATUS_NOT_SUPPORTED);
4517 irk_count = __le16_to_cpu(cp->irk_count);
4518 if (irk_count > max_irk_count) {
4519 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4520 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4521 MGMT_STATUS_INVALID_PARAMS);
4524 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4525 if (expected_len != len) {
4526 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4528 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4529 MGMT_STATUS_INVALID_PARAMS);
4532 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4534 for (i = 0; i < irk_count; i++) {
4535 struct mgmt_irk_info *key = &cp->irks[i];
4537 if (!irk_is_valid(key))
4538 return mgmt_cmd_status(sk, hdev->id,
4540 MGMT_STATUS_INVALID_PARAMS);
4545 hci_smp_irks_clear(hdev);
4547 for (i = 0; i < irk_count; i++) {
4548 struct mgmt_irk_info *irk = &cp->irks[i];
4550 hci_add_irk(hdev, &irk->addr.bdaddr,
4551 le_addr_type(irk->addr.type), irk->val,
4555 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
4557 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4559 hci_dev_unlock(hdev);
4564 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4566 if (key->master != 0x00 && key->master != 0x01)
4569 switch (key->addr.type) {
4570 case BDADDR_LE_PUBLIC:
4573 case BDADDR_LE_RANDOM:
4574 /* Two most significant bits shall be set */
4575 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4583 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4584 void *cp_data, u16 len)
4586 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4587 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4588 sizeof(struct mgmt_ltk_info));
4589 u16 key_count, expected_len;
4592 BT_DBG("request for %s", hdev->name);
4594 if (!lmp_le_capable(hdev))
4595 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4596 MGMT_STATUS_NOT_SUPPORTED);
4598 key_count = __le16_to_cpu(cp->key_count);
4599 if (key_count > max_key_count) {
4600 BT_ERR("load_ltks: too big key_count value %u", key_count);
4601 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4602 MGMT_STATUS_INVALID_PARAMS);
4605 expected_len = sizeof(*cp) + key_count *
4606 sizeof(struct mgmt_ltk_info);
4607 if (expected_len != len) {
4608 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4610 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4611 MGMT_STATUS_INVALID_PARAMS);
4614 BT_DBG("%s key_count %u", hdev->name, key_count);
4616 for (i = 0; i < key_count; i++) {
4617 struct mgmt_ltk_info *key = &cp->keys[i];
4619 if (!ltk_is_valid(key))
4620 return mgmt_cmd_status(sk, hdev->id,
4621 MGMT_OP_LOAD_LONG_TERM_KEYS,
4622 MGMT_STATUS_INVALID_PARAMS);
4627 hci_smp_ltks_clear(hdev);
4629 for (i = 0; i < key_count; i++) {
4630 struct mgmt_ltk_info *key = &cp->keys[i];
4631 u8 type, authenticated;
4633 switch (key->type) {
4634 case MGMT_LTK_UNAUTHENTICATED:
4635 authenticated = 0x00;
4636 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
4638 case MGMT_LTK_AUTHENTICATED:
4639 authenticated = 0x01;
4640 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
4642 case MGMT_LTK_P256_UNAUTH:
4643 authenticated = 0x00;
4644 type = SMP_LTK_P256;
4646 case MGMT_LTK_P256_AUTH:
4647 authenticated = 0x01;
4648 type = SMP_LTK_P256;
4650 case MGMT_LTK_P256_DEBUG:
4651 authenticated = 0x00;
4652 type = SMP_LTK_P256_DEBUG;
4657 hci_add_ltk(hdev, &key->addr.bdaddr,
4658 le_addr_type(key->addr.type), type, authenticated,
4659 key->val, key->enc_size, key->ediv, key->rand);
4662 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4665 hci_dev_unlock(hdev);
4670 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
4672 struct hci_conn *conn = cmd->user_data;
4673 struct mgmt_rp_get_conn_info rp;
4676 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
4678 if (status == MGMT_STATUS_SUCCESS) {
4679 rp.rssi = conn->rssi;
4680 rp.tx_power = conn->tx_power;
4681 rp.max_tx_power = conn->max_tx_power;
4683 rp.rssi = HCI_RSSI_INVALID;
4684 rp.tx_power = HCI_TX_POWER_INVALID;
4685 rp.max_tx_power = HCI_TX_POWER_INVALID;
4688 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4689 status, &rp, sizeof(rp));
4691 hci_conn_drop(conn);
4697 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
4700 struct hci_cp_read_rssi *cp;
4701 struct mgmt_pending_cmd *cmd;
4702 struct hci_conn *conn;
4706 BT_DBG("status 0x%02x", hci_status);
4710 /* Commands sent in request are either Read RSSI or Read Transmit Power
4711 * Level so we check which one was last sent to retrieve connection
4712 * handle. Both commands have handle as first parameter so it's safe to
4713 * cast data on the same command struct.
4715 * First command sent is always Read RSSI and we fail only if it fails.
4716 * In other case we simply override error to indicate success as we
4717 * already remembered if TX power value is actually valid.
4719 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4721 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4722 status = MGMT_STATUS_SUCCESS;
4724 status = mgmt_status(hci_status);
4728 BT_ERR("invalid sent_cmd in conn_info response");
4732 handle = __le16_to_cpu(cp->handle);
4733 conn = hci_conn_hash_lookup_handle(hdev, handle);
4735 BT_ERR("unknown handle (%d) in conn_info response", handle);
4739 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
4743 cmd->cmd_complete(cmd, status);
4744 mgmt_pending_remove(cmd);
4747 hci_dev_unlock(hdev);
4750 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4753 struct mgmt_cp_get_conn_info *cp = data;
4754 struct mgmt_rp_get_conn_info rp;
4755 struct hci_conn *conn;
4756 unsigned long conn_info_age;
4759 BT_DBG("%s", hdev->name);
4761 memset(&rp, 0, sizeof(rp));
4762 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4763 rp.addr.type = cp->addr.type;
4765 if (!bdaddr_type_is_valid(cp->addr.type))
4766 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4767 MGMT_STATUS_INVALID_PARAMS,
4772 if (!hdev_is_powered(hdev)) {
4773 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4774 MGMT_STATUS_NOT_POWERED, &rp,
4779 if (cp->addr.type == BDADDR_BREDR)
4780 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4783 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4785 if (!conn || conn->state != BT_CONNECTED) {
4786 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4787 MGMT_STATUS_NOT_CONNECTED, &rp,
4792 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
4793 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4794 MGMT_STATUS_BUSY, &rp, sizeof(rp));
4798 /* To avoid client trying to guess when to poll again for information we
4799 * calculate conn info age as random value between min/max set in hdev.
4801 conn_info_age = hdev->conn_info_min_age +
4802 prandom_u32_max(hdev->conn_info_max_age -
4803 hdev->conn_info_min_age);
4805 /* Query controller to refresh cached values if they are too old or were
4808 if (time_after(jiffies, conn->conn_info_timestamp +
4809 msecs_to_jiffies(conn_info_age)) ||
4810 !conn->conn_info_timestamp) {
4811 struct hci_request req;
4812 struct hci_cp_read_tx_power req_txp_cp;
4813 struct hci_cp_read_rssi req_rssi_cp;
4814 struct mgmt_pending_cmd *cmd;
4816 hci_req_init(&req, hdev);
4817 req_rssi_cp.handle = cpu_to_le16(conn->handle);
4818 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
4821 /* For LE links TX power does not change thus we don't need to
4822 * query for it once value is known.
4824 if (!bdaddr_type_is_le(cp->addr.type) ||
4825 conn->tx_power == HCI_TX_POWER_INVALID) {
4826 req_txp_cp.handle = cpu_to_le16(conn->handle);
4827 req_txp_cp.type = 0x00;
4828 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4829 sizeof(req_txp_cp), &req_txp_cp);
4832 /* Max TX power needs to be read only once per connection */
4833 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
4834 req_txp_cp.handle = cpu_to_le16(conn->handle);
4835 req_txp_cp.type = 0x01;
4836 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4837 sizeof(req_txp_cp), &req_txp_cp);
4840 err = hci_req_run(&req, conn_info_refresh_complete);
4844 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
4851 hci_conn_hold(conn);
4852 cmd->user_data = hci_conn_get(conn);
4853 cmd->cmd_complete = conn_info_cmd_complete;
4855 conn->conn_info_timestamp = jiffies;
4857 /* Cache is valid, just reply with values cached in hci_conn */
4858 rp.rssi = conn->rssi;
4859 rp.tx_power = conn->tx_power;
4860 rp.max_tx_power = conn->max_tx_power;
4862 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4863 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4867 hci_dev_unlock(hdev);
4871 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
4873 struct hci_conn *conn = cmd->user_data;
4874 struct mgmt_rp_get_clock_info rp;
4875 struct hci_dev *hdev;
4878 memset(&rp, 0, sizeof(rp));
4879 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
4884 hdev = hci_dev_get(cmd->index);
4886 rp.local_clock = cpu_to_le32(hdev->clock);
4891 rp.piconet_clock = cpu_to_le32(conn->clock);
4892 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
4896 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
4900 hci_conn_drop(conn);
4907 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4909 struct hci_cp_read_clock *hci_cp;
4910 struct mgmt_pending_cmd *cmd;
4911 struct hci_conn *conn;
4913 BT_DBG("%s status %u", hdev->name, status);
4917 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
4921 if (hci_cp->which) {
4922 u16 handle = __le16_to_cpu(hci_cp->handle);
4923 conn = hci_conn_hash_lookup_handle(hdev, handle);
4928 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
4932 cmd->cmd_complete(cmd, mgmt_status(status));
4933 mgmt_pending_remove(cmd);
4936 hci_dev_unlock(hdev);
4939 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
4942 struct mgmt_cp_get_clock_info *cp = data;
4943 struct mgmt_rp_get_clock_info rp;
4944 struct hci_cp_read_clock hci_cp;
4945 struct mgmt_pending_cmd *cmd;
4946 struct hci_request req;
4947 struct hci_conn *conn;
4950 BT_DBG("%s", hdev->name);
4952 memset(&rp, 0, sizeof(rp));
4953 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4954 rp.addr.type = cp->addr.type;
4956 if (cp->addr.type != BDADDR_BREDR)
4957 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
4958 MGMT_STATUS_INVALID_PARAMS,
4963 if (!hdev_is_powered(hdev)) {
4964 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
4965 MGMT_STATUS_NOT_POWERED, &rp,
4970 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4971 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4973 if (!conn || conn->state != BT_CONNECTED) {
4974 err = mgmt_cmd_complete(sk, hdev->id,
4975 MGMT_OP_GET_CLOCK_INFO,
4976 MGMT_STATUS_NOT_CONNECTED,
4984 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
4990 cmd->cmd_complete = clock_info_cmd_complete;
4992 hci_req_init(&req, hdev);
4994 memset(&hci_cp, 0, sizeof(hci_cp));
4995 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
4998 hci_conn_hold(conn);
4999 cmd->user_data = hci_conn_get(conn);
5001 hci_cp.handle = cpu_to_le16(conn->handle);
5002 hci_cp.which = 0x01; /* Piconet clock */
5003 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5006 err = hci_req_run(&req, get_clock_info_complete);
5008 mgmt_pending_remove(cmd);
5011 hci_dev_unlock(hdev);
5015 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5017 struct hci_conn *conn;
5019 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5023 if (conn->dst_type != type)
5026 if (conn->state != BT_CONNECTED)
5032 /* This function requires the caller holds hdev->lock */
5033 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
5034 u8 addr_type, u8 auto_connect)
5036 struct hci_conn_params *params;
5038 params = hci_conn_params_add(hdev, addr, addr_type);
5042 if (params->auto_connect == auto_connect)
5045 list_del_init(¶ms->action);
5047 switch (auto_connect) {
5048 case HCI_AUTO_CONN_DISABLED:
5049 case HCI_AUTO_CONN_LINK_LOSS:
5050 /* If auto connect is being disabled when we're trying to
5051 * connect to device, keep connecting.
5053 if (params->explicit_connect)
5054 list_add(¶ms->action, &hdev->pend_le_conns);
5056 case HCI_AUTO_CONN_REPORT:
5057 if (params->explicit_connect)
5058 list_add(¶ms->action, &hdev->pend_le_conns);
5060 list_add(¶ms->action, &hdev->pend_le_reports);
5062 case HCI_AUTO_CONN_DIRECT:
5063 case HCI_AUTO_CONN_ALWAYS:
5064 if (!is_connected(hdev, addr, addr_type))
5065 list_add(¶ms->action, &hdev->pend_le_conns);
5069 params->auto_connect = auto_connect;
5071 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5077 static void device_added(struct sock *sk, struct hci_dev *hdev,
5078 bdaddr_t *bdaddr, u8 type, u8 action)
5080 struct mgmt_ev_device_added ev;
5082 bacpy(&ev.addr.bdaddr, bdaddr);
5083 ev.addr.type = type;
5086 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5089 static int add_device(struct sock *sk, struct hci_dev *hdev,
5090 void *data, u16 len)
5092 struct mgmt_cp_add_device *cp = data;
5093 u8 auto_conn, addr_type;
5096 BT_DBG("%s", hdev->name);
5098 if (!bdaddr_type_is_valid(cp->addr.type) ||
5099 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5100 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5101 MGMT_STATUS_INVALID_PARAMS,
5102 &cp->addr, sizeof(cp->addr));
5104 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5105 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5106 MGMT_STATUS_INVALID_PARAMS,
5107 &cp->addr, sizeof(cp->addr));
5111 if (cp->addr.type == BDADDR_BREDR) {
5112 /* Only incoming connections action is supported for now */
5113 if (cp->action != 0x01) {
5114 err = mgmt_cmd_complete(sk, hdev->id,
5116 MGMT_STATUS_INVALID_PARAMS,
5117 &cp->addr, sizeof(cp->addr));
5121 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5126 hci_req_update_scan(hdev);
5131 addr_type = le_addr_type(cp->addr.type);
5133 if (cp->action == 0x02)
5134 auto_conn = HCI_AUTO_CONN_ALWAYS;
5135 else if (cp->action == 0x01)
5136 auto_conn = HCI_AUTO_CONN_DIRECT;
5138 auto_conn = HCI_AUTO_CONN_REPORT;
5140 /* Kernel internally uses conn_params with resolvable private
5141 * address, but Add Device allows only identity addresses.
5142 * Make sure it is enforced before calling
5143 * hci_conn_params_lookup.
5145 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5146 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5147 MGMT_STATUS_INVALID_PARAMS,
5148 &cp->addr, sizeof(cp->addr));
5152 /* If the connection parameters don't exist for this device,
5153 * they will be created and configured with defaults.
5155 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5157 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5158 MGMT_STATUS_FAILED, &cp->addr,
5163 hci_update_background_scan(hdev);
5166 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5168 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5169 MGMT_STATUS_SUCCESS, &cp->addr,
5173 hci_dev_unlock(hdev);
5177 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5178 bdaddr_t *bdaddr, u8 type)
5180 struct mgmt_ev_device_removed ev;
5182 bacpy(&ev.addr.bdaddr, bdaddr);
5183 ev.addr.type = type;
5185 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5188 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5189 void *data, u16 len)
5191 struct mgmt_cp_remove_device *cp = data;
5194 BT_DBG("%s", hdev->name);
5198 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5199 struct hci_conn_params *params;
5202 if (!bdaddr_type_is_valid(cp->addr.type)) {
5203 err = mgmt_cmd_complete(sk, hdev->id,
5204 MGMT_OP_REMOVE_DEVICE,
5205 MGMT_STATUS_INVALID_PARAMS,
5206 &cp->addr, sizeof(cp->addr));
5210 if (cp->addr.type == BDADDR_BREDR) {
5211 err = hci_bdaddr_list_del(&hdev->whitelist,
5215 err = mgmt_cmd_complete(sk, hdev->id,
5216 MGMT_OP_REMOVE_DEVICE,
5217 MGMT_STATUS_INVALID_PARAMS,
5223 hci_req_update_scan(hdev);
5225 device_removed(sk, hdev, &cp->addr.bdaddr,
5230 addr_type = le_addr_type(cp->addr.type);
5232 /* Kernel internally uses conn_params with resolvable private
5233 * address, but Remove Device allows only identity addresses.
5234 * Make sure it is enforced before calling
5235 * hci_conn_params_lookup.
5237 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5238 err = mgmt_cmd_complete(sk, hdev->id,
5239 MGMT_OP_REMOVE_DEVICE,
5240 MGMT_STATUS_INVALID_PARAMS,
5241 &cp->addr, sizeof(cp->addr));
5245 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5248 err = mgmt_cmd_complete(sk, hdev->id,
5249 MGMT_OP_REMOVE_DEVICE,
5250 MGMT_STATUS_INVALID_PARAMS,
5251 &cp->addr, sizeof(cp->addr));
5255 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
5256 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
5257 err = mgmt_cmd_complete(sk, hdev->id,
5258 MGMT_OP_REMOVE_DEVICE,
5259 MGMT_STATUS_INVALID_PARAMS,
5260 &cp->addr, sizeof(cp->addr));
5264 list_del(¶ms->action);
5265 list_del(¶ms->list);
5267 hci_update_background_scan(hdev);
5269 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5271 struct hci_conn_params *p, *tmp;
5272 struct bdaddr_list *b, *btmp;
5274 if (cp->addr.type) {
5275 err = mgmt_cmd_complete(sk, hdev->id,
5276 MGMT_OP_REMOVE_DEVICE,
5277 MGMT_STATUS_INVALID_PARAMS,
5278 &cp->addr, sizeof(cp->addr));
5282 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5283 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5288 hci_req_update_scan(hdev);
5290 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5291 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5293 device_removed(sk, hdev, &p->addr, p->addr_type);
5294 if (p->explicit_connect) {
5295 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
5298 list_del(&p->action);
5303 BT_DBG("All LE connection parameters were removed");
5305 hci_update_background_scan(hdev);
5309 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5310 MGMT_STATUS_SUCCESS, &cp->addr,
5313 hci_dev_unlock(hdev);
5317 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5320 struct mgmt_cp_load_conn_param *cp = data;
5321 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5322 sizeof(struct mgmt_conn_param));
5323 u16 param_count, expected_len;
5326 if (!lmp_le_capable(hdev))
5327 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5328 MGMT_STATUS_NOT_SUPPORTED);
5330 param_count = __le16_to_cpu(cp->param_count);
5331 if (param_count > max_param_count) {
5332 BT_ERR("load_conn_param: too big param_count value %u",
5334 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5335 MGMT_STATUS_INVALID_PARAMS);
5338 expected_len = sizeof(*cp) + param_count *
5339 sizeof(struct mgmt_conn_param);
5340 if (expected_len != len) {
5341 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5343 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5344 MGMT_STATUS_INVALID_PARAMS);
5347 BT_DBG("%s param_count %u", hdev->name, param_count);
5351 hci_conn_params_clear_disabled(hdev);
5353 for (i = 0; i < param_count; i++) {
5354 struct mgmt_conn_param *param = &cp->params[i];
5355 struct hci_conn_params *hci_param;
5356 u16 min, max, latency, timeout;
5359 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
5362 if (param->addr.type == BDADDR_LE_PUBLIC) {
5363 addr_type = ADDR_LE_DEV_PUBLIC;
5364 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5365 addr_type = ADDR_LE_DEV_RANDOM;
5367 BT_ERR("Ignoring invalid connection parameters");
5371 min = le16_to_cpu(param->min_interval);
5372 max = le16_to_cpu(param->max_interval);
5373 latency = le16_to_cpu(param->latency);
5374 timeout = le16_to_cpu(param->timeout);
5376 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5377 min, max, latency, timeout);
5379 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5380 BT_ERR("Ignoring invalid connection parameters");
5384 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
5387 BT_ERR("Failed to add connection parameters");
5391 hci_param->conn_min_interval = min;
5392 hci_param->conn_max_interval = max;
5393 hci_param->conn_latency = latency;
5394 hci_param->supervision_timeout = timeout;
5397 hci_dev_unlock(hdev);
5399 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
5403 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5404 void *data, u16 len)
5406 struct mgmt_cp_set_external_config *cp = data;
5410 BT_DBG("%s", hdev->name);
5412 if (hdev_is_powered(hdev))
5413 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5414 MGMT_STATUS_REJECTED);
5416 if (cp->config != 0x00 && cp->config != 0x01)
5417 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5418 MGMT_STATUS_INVALID_PARAMS);
5420 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5421 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5422 MGMT_STATUS_NOT_SUPPORTED);
5427 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
5429 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
5431 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5438 err = new_options(hdev, sk);
5440 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
5441 mgmt_index_removed(hdev);
5443 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
5444 hci_dev_set_flag(hdev, HCI_CONFIG);
5445 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
5447 queue_work(hdev->req_workqueue, &hdev->power_on);
5449 set_bit(HCI_RAW, &hdev->flags);
5450 mgmt_index_added(hdev);
5455 hci_dev_unlock(hdev);
5459 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5460 void *data, u16 len)
5462 struct mgmt_cp_set_public_address *cp = data;
5466 BT_DBG("%s", hdev->name);
5468 if (hdev_is_powered(hdev))
5469 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5470 MGMT_STATUS_REJECTED);
5472 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5473 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5474 MGMT_STATUS_INVALID_PARAMS);
5476 if (!hdev->set_bdaddr)
5477 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5478 MGMT_STATUS_NOT_SUPPORTED);
5482 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5483 bacpy(&hdev->public_addr, &cp->bdaddr);
5485 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5492 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5493 err = new_options(hdev, sk);
5495 if (is_configured(hdev)) {
5496 mgmt_index_removed(hdev);
5498 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
5500 hci_dev_set_flag(hdev, HCI_CONFIG);
5501 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
5503 queue_work(hdev->req_workqueue, &hdev->power_on);
5507 hci_dev_unlock(hdev);
5511 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
5514 eir[eir_len++] = sizeof(type) + data_len;
5515 eir[eir_len++] = type;
5516 memcpy(&eir[eir_len], data, data_len);
5517 eir_len += data_len;
5522 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
5523 u16 opcode, struct sk_buff *skb)
5525 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
5526 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
5527 u8 *h192, *r192, *h256, *r256;
5528 struct mgmt_pending_cmd *cmd;
5532 BT_DBG("%s status %u", hdev->name, status);
5534 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
5538 mgmt_cp = cmd->param;
5541 status = mgmt_status(status);
5548 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
5549 struct hci_rp_read_local_oob_data *rp;
5551 if (skb->len != sizeof(*rp)) {
5552 status = MGMT_STATUS_FAILED;
5555 status = MGMT_STATUS_SUCCESS;
5556 rp = (void *)skb->data;
5558 eir_len = 5 + 18 + 18;
5565 struct hci_rp_read_local_oob_ext_data *rp;
5567 if (skb->len != sizeof(*rp)) {
5568 status = MGMT_STATUS_FAILED;
5571 status = MGMT_STATUS_SUCCESS;
5572 rp = (void *)skb->data;
5574 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5575 eir_len = 5 + 18 + 18;
5579 eir_len = 5 + 18 + 18 + 18 + 18;
5589 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
5596 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
5597 hdev->dev_class, 3);
5600 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
5601 EIR_SSP_HASH_C192, h192, 16);
5602 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
5603 EIR_SSP_RAND_R192, r192, 16);
5607 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
5608 EIR_SSP_HASH_C256, h256, 16);
5609 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
5610 EIR_SSP_RAND_R256, r256, 16);
5614 mgmt_rp->type = mgmt_cp->type;
5615 mgmt_rp->eir_len = cpu_to_le16(eir_len);
5617 err = mgmt_cmd_complete(cmd->sk, hdev->id,
5618 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
5619 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
5620 if (err < 0 || status)
5623 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
5625 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
5626 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
5627 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
5630 mgmt_pending_remove(cmd);
5633 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
5634 struct mgmt_cp_read_local_oob_ext_data *cp)
5636 struct mgmt_pending_cmd *cmd;
5637 struct hci_request req;
5640 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
5645 hci_req_init(&req, hdev);
5647 if (bredr_sc_enabled(hdev))
5648 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
5650 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
5652 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
5654 mgmt_pending_remove(cmd);
5661 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
5662 void *data, u16 data_len)
5664 struct mgmt_cp_read_local_oob_ext_data *cp = data;
5665 struct mgmt_rp_read_local_oob_ext_data *rp;
5668 u8 status, flags, role, addr[7], hash[16], rand[16];
5671 BT_DBG("%s", hdev->name);
5673 if (hdev_is_powered(hdev)) {
5675 case BIT(BDADDR_BREDR):
5676 status = mgmt_bredr_support(hdev);
5682 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
5683 status = mgmt_le_support(hdev);
5687 eir_len = 9 + 3 + 18 + 18 + 3;
5690 status = MGMT_STATUS_INVALID_PARAMS;
5695 status = MGMT_STATUS_NOT_POWERED;
5699 rp_len = sizeof(*rp) + eir_len;
5700 rp = kmalloc(rp_len, GFP_ATOMIC);
5711 case BIT(BDADDR_BREDR):
5712 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5713 err = read_local_ssp_oob_req(hdev, sk, cp);
5714 hci_dev_unlock(hdev);
5718 status = MGMT_STATUS_FAILED;
5721 eir_len = eir_append_data(rp->eir, eir_len,
5723 hdev->dev_class, 3);
5726 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
5727 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5728 smp_generate_oob(hdev, hash, rand) < 0) {
5729 hci_dev_unlock(hdev);
5730 status = MGMT_STATUS_FAILED;
5734 /* This should return the active RPA, but since the RPA
5735 * is only programmed on demand, it is really hard to fill
5736 * this in at the moment. For now disallow retrieving
5737 * local out-of-band data when privacy is in use.
5739 * Returning the identity address will not help here since
5740 * pairing happens before the identity resolving key is
5741 * known and thus the connection establishment happens
5742 * based on the RPA and not the identity address.
5744 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5745 hci_dev_unlock(hdev);
5746 status = MGMT_STATUS_REJECTED;
5750 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
5751 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
5752 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5753 bacmp(&hdev->static_addr, BDADDR_ANY))) {
5754 memcpy(addr, &hdev->static_addr, 6);
5757 memcpy(addr, &hdev->bdaddr, 6);
5761 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
5762 addr, sizeof(addr));
5764 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
5769 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
5770 &role, sizeof(role));
5772 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
5773 eir_len = eir_append_data(rp->eir, eir_len,
5775 hash, sizeof(hash));
5777 eir_len = eir_append_data(rp->eir, eir_len,
5779 rand, sizeof(rand));
5782 flags = mgmt_get_adv_discov_flags(hdev);
5784 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
5785 flags |= LE_AD_NO_BREDR;
5787 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
5788 &flags, sizeof(flags));
5792 hci_dev_unlock(hdev);
5794 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
5796 status = MGMT_STATUS_SUCCESS;
5799 rp->type = cp->type;
5800 rp->eir_len = cpu_to_le16(eir_len);
5802 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
5803 status, rp, sizeof(*rp) + eir_len);
5804 if (err < 0 || status)
5807 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
5808 rp, sizeof(*rp) + eir_len,
5809 HCI_MGMT_OOB_DATA_EVENTS, sk);
5817 static u32 get_supported_adv_flags(struct hci_dev *hdev)
5821 flags |= MGMT_ADV_FLAG_CONNECTABLE;
5822 flags |= MGMT_ADV_FLAG_DISCOV;
5823 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
5824 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
5826 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID)
5827 flags |= MGMT_ADV_FLAG_TX_POWER;
5832 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
5833 void *data, u16 data_len)
5835 struct mgmt_rp_read_adv_features *rp;
5838 struct adv_info *adv_instance;
5839 u32 supported_flags;
5842 BT_DBG("%s", hdev->name);
5844 if (!lmp_le_capable(hdev))
5845 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
5846 MGMT_STATUS_REJECTED);
5850 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
5851 rp = kmalloc(rp_len, GFP_ATOMIC);
5853 hci_dev_unlock(hdev);
5857 supported_flags = get_supported_adv_flags(hdev);
5859 rp->supported_flags = cpu_to_le32(supported_flags);
5860 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
5861 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
5862 rp->max_instances = HCI_MAX_ADV_INSTANCES;
5863 rp->num_instances = hdev->adv_instance_cnt;
5865 instance = rp->instance;
5866 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
5867 *instance = adv_instance->instance;
5871 hci_dev_unlock(hdev);
5873 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
5874 MGMT_STATUS_SUCCESS, rp, rp_len);
5881 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
5882 u8 len, bool is_adv_data)
5884 u8 max_len = HCI_MAX_AD_LENGTH;
5886 bool flags_managed = false;
5887 bool tx_power_managed = false;
5890 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
5891 MGMT_ADV_FLAG_LIMITED_DISCOV |
5892 MGMT_ADV_FLAG_MANAGED_FLAGS)) {
5893 flags_managed = true;
5897 if (adv_flags & MGMT_ADV_FLAG_TX_POWER) {
5898 tx_power_managed = true;
5906 /* Make sure that the data is correctly formatted. */
5907 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
5910 if (flags_managed && data[i + 1] == EIR_FLAGS)
5913 if (tx_power_managed && data[i + 1] == EIR_TX_POWER)
5916 /* If the current field length would exceed the total data
5917 * length, then it's invalid.
5919 if (i + cur_len >= len)
5926 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
5929 struct mgmt_pending_cmd *cmd;
5930 struct mgmt_cp_add_advertising *cp;
5931 struct mgmt_rp_add_advertising rp;
5932 struct adv_info *adv_instance, *n;
5935 BT_DBG("status %d", status);
5939 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
5941 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
5942 if (!adv_instance->pending)
5946 adv_instance->pending = false;
5950 instance = adv_instance->instance;
5952 if (hdev->cur_adv_instance == instance)
5953 cancel_adv_timeout(hdev);
5955 hci_remove_adv_instance(hdev, instance);
5956 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
5963 rp.instance = cp->instance;
5966 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5967 mgmt_status(status));
5969 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5970 mgmt_status(status), &rp, sizeof(rp));
5972 mgmt_pending_remove(cmd);
5975 hci_dev_unlock(hdev);
5978 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
5979 void *data, u16 data_len)
5981 struct mgmt_cp_add_advertising *cp = data;
5982 struct mgmt_rp_add_advertising rp;
5984 u32 supported_flags;
5986 u16 timeout, duration;
5987 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
5988 u8 schedule_instance = 0;
5989 struct adv_info *next_instance;
5991 struct mgmt_pending_cmd *cmd;
5992 struct hci_request req;
5994 BT_DBG("%s", hdev->name);
5996 status = mgmt_le_support(hdev);
5998 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6001 if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6002 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6003 MGMT_STATUS_INVALID_PARAMS);
6005 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
6006 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6007 MGMT_STATUS_INVALID_PARAMS);
6009 flags = __le32_to_cpu(cp->flags);
6010 timeout = __le16_to_cpu(cp->timeout);
6011 duration = __le16_to_cpu(cp->duration);
6013 /* The current implementation only supports a subset of the specified
6016 supported_flags = get_supported_adv_flags(hdev);
6017 if (flags & ~supported_flags)
6018 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6019 MGMT_STATUS_INVALID_PARAMS);
6023 if (timeout && !hdev_is_powered(hdev)) {
6024 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6025 MGMT_STATUS_REJECTED);
6029 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6030 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6031 pending_find(MGMT_OP_SET_LE, hdev)) {
6032 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6037 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
6038 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6039 cp->scan_rsp_len, false)) {
6040 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6041 MGMT_STATUS_INVALID_PARAMS);
6045 err = hci_add_adv_instance(hdev, cp->instance, flags,
6046 cp->adv_data_len, cp->data,
6048 cp->data + cp->adv_data_len,
6051 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6052 MGMT_STATUS_FAILED);
6056 /* Only trigger an advertising added event if a new instance was
6059 if (hdev->adv_instance_cnt > prev_instance_cnt)
6060 mgmt_advertising_added(sk, hdev, cp->instance);
6062 if (hdev->cur_adv_instance == cp->instance) {
6063 /* If the currently advertised instance is being changed then
6064 * cancel the current advertising and schedule the next
6065 * instance. If there is only one instance then the overridden
6066 * advertising data will be visible right away.
6068 cancel_adv_timeout(hdev);
6070 next_instance = hci_get_next_instance(hdev, cp->instance);
6072 schedule_instance = next_instance->instance;
6073 } else if (!hdev->adv_instance_timeout) {
6074 /* Immediately advertise the new instance if no other
6075 * instance is currently being advertised.
6077 schedule_instance = cp->instance;
6080 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
6081 * there is no instance to be advertised then we have no HCI
6082 * communication to make. Simply return.
6084 if (!hdev_is_powered(hdev) ||
6085 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6086 !schedule_instance) {
6087 rp.instance = cp->instance;
6088 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6089 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6093 /* We're good to go, update advertising data, parameters, and start
6096 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
6103 hci_req_init(&req, hdev);
6105 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
6108 err = hci_req_run(&req, add_advertising_complete);
6111 mgmt_pending_remove(cmd);
6114 hci_dev_unlock(hdev);
6119 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
6122 struct mgmt_pending_cmd *cmd;
6123 struct mgmt_cp_remove_advertising *cp;
6124 struct mgmt_rp_remove_advertising rp;
6126 BT_DBG("status %d", status);
6130 /* A failure status here only means that we failed to disable
6131 * advertising. Otherwise, the advertising instance has been removed,
6132 * so report success.
6134 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
6139 rp.instance = cp->instance;
6141 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
6143 mgmt_pending_remove(cmd);
6146 hci_dev_unlock(hdev);
6149 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
6150 void *data, u16 data_len)
6152 struct mgmt_cp_remove_advertising *cp = data;
6153 struct mgmt_rp_remove_advertising rp;
6154 struct mgmt_pending_cmd *cmd;
6155 struct hci_request req;
6158 BT_DBG("%s", hdev->name);
6162 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
6163 err = mgmt_cmd_status(sk, hdev->id,
6164 MGMT_OP_REMOVE_ADVERTISING,
6165 MGMT_STATUS_INVALID_PARAMS);
6169 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6170 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6171 pending_find(MGMT_OP_SET_LE, hdev)) {
6172 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6177 if (list_empty(&hdev->adv_instances)) {
6178 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6179 MGMT_STATUS_INVALID_PARAMS);
6183 hci_req_init(&req, hdev);
6185 hci_req_clear_adv_instance(hdev, &req, cp->instance, true);
6187 if (list_empty(&hdev->adv_instances))
6188 __hci_req_disable_advertising(&req);
6190 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
6191 * flag is set or the device isn't powered then we have no HCI
6192 * communication to make. Simply return.
6194 if (skb_queue_empty(&req.cmd_q) ||
6195 !hdev_is_powered(hdev) ||
6196 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6197 rp.instance = cp->instance;
6198 err = mgmt_cmd_complete(sk, hdev->id,
6199 MGMT_OP_REMOVE_ADVERTISING,
6200 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6204 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
6211 err = hci_req_run(&req, remove_advertising_complete);
6213 mgmt_pending_remove(cmd);
6216 hci_dev_unlock(hdev);
6221 static u8 tlv_data_max_len(u32 adv_flags, bool is_adv_data)
6223 u8 max_len = HCI_MAX_AD_LENGTH;
6226 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
6227 MGMT_ADV_FLAG_LIMITED_DISCOV |
6228 MGMT_ADV_FLAG_MANAGED_FLAGS))
6231 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
6238 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
6239 void *data, u16 data_len)
6241 struct mgmt_cp_get_adv_size_info *cp = data;
6242 struct mgmt_rp_get_adv_size_info rp;
6243 u32 flags, supported_flags;
6246 BT_DBG("%s", hdev->name);
6248 if (!lmp_le_capable(hdev))
6249 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6250 MGMT_STATUS_REJECTED);
6252 if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6253 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6254 MGMT_STATUS_INVALID_PARAMS);
6256 flags = __le32_to_cpu(cp->flags);
6258 /* The current implementation only supports a subset of the specified
6261 supported_flags = get_supported_adv_flags(hdev);
6262 if (flags & ~supported_flags)
6263 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6264 MGMT_STATUS_INVALID_PARAMS);
6266 rp.instance = cp->instance;
6267 rp.flags = cp->flags;
6268 rp.max_adv_data_len = tlv_data_max_len(flags, true);
6269 rp.max_scan_rsp_len = tlv_data_max_len(flags, false);
6271 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6272 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6277 static const struct hci_mgmt_handler mgmt_handlers[] = {
6278 { NULL }, /* 0x0000 (no command) */
6279 { read_version, MGMT_READ_VERSION_SIZE,
6281 HCI_MGMT_UNTRUSTED },
6282 { read_commands, MGMT_READ_COMMANDS_SIZE,
6284 HCI_MGMT_UNTRUSTED },
6285 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
6287 HCI_MGMT_UNTRUSTED },
6288 { read_controller_info, MGMT_READ_INFO_SIZE,
6289 HCI_MGMT_UNTRUSTED },
6290 { set_powered, MGMT_SETTING_SIZE },
6291 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
6292 { set_connectable, MGMT_SETTING_SIZE },
6293 { set_fast_connectable, MGMT_SETTING_SIZE },
6294 { set_bondable, MGMT_SETTING_SIZE },
6295 { set_link_security, MGMT_SETTING_SIZE },
6296 { set_ssp, MGMT_SETTING_SIZE },
6297 { set_hs, MGMT_SETTING_SIZE },
6298 { set_le, MGMT_SETTING_SIZE },
6299 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
6300 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
6301 { add_uuid, MGMT_ADD_UUID_SIZE },
6302 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
6303 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
6305 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
6307 { disconnect, MGMT_DISCONNECT_SIZE },
6308 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
6309 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
6310 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
6311 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
6312 { pair_device, MGMT_PAIR_DEVICE_SIZE },
6313 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
6314 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
6315 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
6316 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6317 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
6318 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6319 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6320 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
6322 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6323 { start_discovery, MGMT_START_DISCOVERY_SIZE },
6324 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
6325 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
6326 { block_device, MGMT_BLOCK_DEVICE_SIZE },
6327 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
6328 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
6329 { set_advertising, MGMT_SETTING_SIZE },
6330 { set_bredr, MGMT_SETTING_SIZE },
6331 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
6332 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
6333 { set_secure_conn, MGMT_SETTING_SIZE },
6334 { set_debug_keys, MGMT_SETTING_SIZE },
6335 { set_privacy, MGMT_SET_PRIVACY_SIZE },
6336 { load_irks, MGMT_LOAD_IRKS_SIZE,
6338 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
6339 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
6340 { add_device, MGMT_ADD_DEVICE_SIZE },
6341 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
6342 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
6344 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
6346 HCI_MGMT_UNTRUSTED },
6347 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
6348 HCI_MGMT_UNCONFIGURED |
6349 HCI_MGMT_UNTRUSTED },
6350 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
6351 HCI_MGMT_UNCONFIGURED },
6352 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
6353 HCI_MGMT_UNCONFIGURED },
6354 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
6356 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
6357 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
6359 HCI_MGMT_UNTRUSTED },
6360 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
6361 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
6363 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
6364 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
6365 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
6368 void mgmt_index_added(struct hci_dev *hdev)
6370 struct mgmt_ev_ext_index ev;
6372 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6375 switch (hdev->dev_type) {
6377 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6378 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
6379 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6382 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
6383 HCI_MGMT_INDEX_EVENTS);
6396 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
6397 HCI_MGMT_EXT_INDEX_EVENTS);
6400 void mgmt_index_removed(struct hci_dev *hdev)
6402 struct mgmt_ev_ext_index ev;
6403 u8 status = MGMT_STATUS_INVALID_INDEX;
6405 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6408 switch (hdev->dev_type) {
6410 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6412 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6413 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
6414 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6417 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
6418 HCI_MGMT_INDEX_EVENTS);
6431 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
6432 HCI_MGMT_EXT_INDEX_EVENTS);
6435 /* This function requires the caller holds hdev->lock */
6436 static void restart_le_actions(struct hci_dev *hdev)
6438 struct hci_conn_params *p;
6440 list_for_each_entry(p, &hdev->le_conn_params, list) {
6441 /* Needed for AUTO_OFF case where might not "really"
6442 * have been powered off.
6444 list_del_init(&p->action);
6446 switch (p->auto_connect) {
6447 case HCI_AUTO_CONN_DIRECT:
6448 case HCI_AUTO_CONN_ALWAYS:
6449 list_add(&p->action, &hdev->pend_le_conns);
6451 case HCI_AUTO_CONN_REPORT:
6452 list_add(&p->action, &hdev->pend_le_reports);
6460 void mgmt_power_on(struct hci_dev *hdev, int err)
6462 struct cmd_lookup match = { NULL, hdev };
6464 BT_DBG("err %d", err);
6469 restart_le_actions(hdev);
6470 hci_update_background_scan(hdev);
6473 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6475 new_settings(hdev, match.sk);
6480 hci_dev_unlock(hdev);
6483 void __mgmt_power_off(struct hci_dev *hdev)
6485 struct cmd_lookup match = { NULL, hdev };
6486 u8 status, zero_cod[] = { 0, 0, 0 };
6488 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6490 /* If the power off is because of hdev unregistration let
6491 * use the appropriate INVALID_INDEX status. Otherwise use
6492 * NOT_POWERED. We cover both scenarios here since later in
6493 * mgmt_index_removed() any hci_conn callbacks will have already
6494 * been triggered, potentially causing misleading DISCONNECTED
6497 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
6498 status = MGMT_STATUS_INVALID_INDEX;
6500 status = MGMT_STATUS_NOT_POWERED;
6502 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6504 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6505 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6506 zero_cod, sizeof(zero_cod), NULL);
6508 new_settings(hdev, match.sk);
6514 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6516 struct mgmt_pending_cmd *cmd;
6519 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
6523 if (err == -ERFKILL)
6524 status = MGMT_STATUS_RFKILLED;
6526 status = MGMT_STATUS_FAILED;
6528 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6530 mgmt_pending_remove(cmd);
6533 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6536 struct mgmt_ev_new_link_key ev;
6538 memset(&ev, 0, sizeof(ev));
6540 ev.store_hint = persistent;
6541 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6542 ev.key.addr.type = BDADDR_BREDR;
6543 ev.key.type = key->type;
6544 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6545 ev.key.pin_len = key->pin_len;
6547 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6550 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6552 switch (ltk->type) {
6555 if (ltk->authenticated)
6556 return MGMT_LTK_AUTHENTICATED;
6557 return MGMT_LTK_UNAUTHENTICATED;
6559 if (ltk->authenticated)
6560 return MGMT_LTK_P256_AUTH;
6561 return MGMT_LTK_P256_UNAUTH;
6562 case SMP_LTK_P256_DEBUG:
6563 return MGMT_LTK_P256_DEBUG;
6566 return MGMT_LTK_UNAUTHENTICATED;
6569 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6571 struct mgmt_ev_new_long_term_key ev;
6573 memset(&ev, 0, sizeof(ev));
6575 /* Devices using resolvable or non-resolvable random addresses
6576 * without providing an identity resolving key don't require
6577 * to store long term keys. Their addresses will change the
6580 * Only when a remote device provides an identity address
6581 * make sure the long term key is stored. If the remote
6582 * identity is known, the long term keys are internally
6583 * mapped to the identity address. So allow static random
6584 * and public addresses here.
6586 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6587 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6588 ev.store_hint = 0x00;
6590 ev.store_hint = persistent;
6592 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6593 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6594 ev.key.type = mgmt_ltk_type(key);
6595 ev.key.enc_size = key->enc_size;
6596 ev.key.ediv = key->ediv;
6597 ev.key.rand = key->rand;
6599 if (key->type == SMP_LTK)
6602 /* Make sure we copy only the significant bytes based on the
6603 * encryption key size, and set the rest of the value to zeroes.
6605 memcpy(ev.key.val, key->val, key->enc_size);
6606 memset(ev.key.val + key->enc_size, 0,
6607 sizeof(ev.key.val) - key->enc_size);
6609 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6612 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
6614 struct mgmt_ev_new_irk ev;
6616 memset(&ev, 0, sizeof(ev));
6618 ev.store_hint = persistent;
6620 bacpy(&ev.rpa, &irk->rpa);
6621 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6622 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6623 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6625 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6628 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6631 struct mgmt_ev_new_csrk ev;
6633 memset(&ev, 0, sizeof(ev));
6635 /* Devices using resolvable or non-resolvable random addresses
6636 * without providing an identity resolving key don't require
6637 * to store signature resolving keys. Their addresses will change
6638 * the next time around.
6640 * Only when a remote device provides an identity address
6641 * make sure the signature resolving key is stored. So allow
6642 * static random and public addresses here.
6644 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6645 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6646 ev.store_hint = 0x00;
6648 ev.store_hint = persistent;
6650 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6651 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6652 ev.key.type = csrk->type;
6653 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6655 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6658 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6659 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6660 u16 max_interval, u16 latency, u16 timeout)
6662 struct mgmt_ev_new_conn_param ev;
6664 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6667 memset(&ev, 0, sizeof(ev));
6668 bacpy(&ev.addr.bdaddr, bdaddr);
6669 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6670 ev.store_hint = store_hint;
6671 ev.min_interval = cpu_to_le16(min_interval);
6672 ev.max_interval = cpu_to_le16(max_interval);
6673 ev.latency = cpu_to_le16(latency);
6674 ev.timeout = cpu_to_le16(timeout);
6676 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6679 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6680 u32 flags, u8 *name, u8 name_len)
6683 struct mgmt_ev_device_connected *ev = (void *) buf;
6686 bacpy(&ev->addr.bdaddr, &conn->dst);
6687 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6689 ev->flags = __cpu_to_le32(flags);
6691 /* We must ensure that the EIR Data fields are ordered and
6692 * unique. Keep it simple for now and avoid the problem by not
6693 * adding any BR/EDR data to the LE adv.
6695 if (conn->le_adv_data_len > 0) {
6696 memcpy(&ev->eir[eir_len],
6697 conn->le_adv_data, conn->le_adv_data_len);
6698 eir_len = conn->le_adv_data_len;
6701 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6704 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6705 eir_len = eir_append_data(ev->eir, eir_len,
6707 conn->dev_class, 3);
6710 ev->eir_len = cpu_to_le16(eir_len);
6712 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6713 sizeof(*ev) + eir_len, NULL);
6716 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
6718 struct sock **sk = data;
6720 cmd->cmd_complete(cmd, 0);
6725 mgmt_pending_remove(cmd);
6728 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
6730 struct hci_dev *hdev = data;
6731 struct mgmt_cp_unpair_device *cp = cmd->param;
6733 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6735 cmd->cmd_complete(cmd, 0);
6736 mgmt_pending_remove(cmd);
6739 bool mgmt_powering_down(struct hci_dev *hdev)
6741 struct mgmt_pending_cmd *cmd;
6742 struct mgmt_mode *cp;
6744 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
6755 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6756 u8 link_type, u8 addr_type, u8 reason,
6757 bool mgmt_connected)
6759 struct mgmt_ev_device_disconnected ev;
6760 struct sock *sk = NULL;
6762 /* The connection is still in hci_conn_hash so test for 1
6763 * instead of 0 to know if this is the last one.
6765 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6766 cancel_delayed_work(&hdev->power_off);
6767 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6770 if (!mgmt_connected)
6773 if (link_type != ACL_LINK && link_type != LE_LINK)
6776 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6778 bacpy(&ev.addr.bdaddr, bdaddr);
6779 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6782 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6787 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6791 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6792 u8 link_type, u8 addr_type, u8 status)
6794 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6795 struct mgmt_cp_disconnect *cp;
6796 struct mgmt_pending_cmd *cmd;
6798 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6801 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
6807 if (bacmp(bdaddr, &cp->addr.bdaddr))
6810 if (cp->addr.type != bdaddr_type)
6813 cmd->cmd_complete(cmd, mgmt_status(status));
6814 mgmt_pending_remove(cmd);
6817 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6818 u8 addr_type, u8 status)
6820 struct mgmt_ev_connect_failed ev;
6822 /* The connection is still in hci_conn_hash so test for 1
6823 * instead of 0 to know if this is the last one.
6825 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6826 cancel_delayed_work(&hdev->power_off);
6827 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6830 bacpy(&ev.addr.bdaddr, bdaddr);
6831 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6832 ev.status = mgmt_status(status);
6834 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6837 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6839 struct mgmt_ev_pin_code_request ev;
6841 bacpy(&ev.addr.bdaddr, bdaddr);
6842 ev.addr.type = BDADDR_BREDR;
6845 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6848 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6851 struct mgmt_pending_cmd *cmd;
6853 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6857 cmd->cmd_complete(cmd, mgmt_status(status));
6858 mgmt_pending_remove(cmd);
6861 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6864 struct mgmt_pending_cmd *cmd;
6866 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6870 cmd->cmd_complete(cmd, mgmt_status(status));
6871 mgmt_pending_remove(cmd);
6874 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6875 u8 link_type, u8 addr_type, u32 value,
6878 struct mgmt_ev_user_confirm_request ev;
6880 BT_DBG("%s", hdev->name);
6882 bacpy(&ev.addr.bdaddr, bdaddr);
6883 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6884 ev.confirm_hint = confirm_hint;
6885 ev.value = cpu_to_le32(value);
6887 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6891 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6892 u8 link_type, u8 addr_type)
6894 struct mgmt_ev_user_passkey_request ev;
6896 BT_DBG("%s", hdev->name);
6898 bacpy(&ev.addr.bdaddr, bdaddr);
6899 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6901 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6905 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6906 u8 link_type, u8 addr_type, u8 status,
6909 struct mgmt_pending_cmd *cmd;
6911 cmd = pending_find(opcode, hdev);
6915 cmd->cmd_complete(cmd, mgmt_status(status));
6916 mgmt_pending_remove(cmd);
6921 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6922 u8 link_type, u8 addr_type, u8 status)
6924 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6925 status, MGMT_OP_USER_CONFIRM_REPLY);
6928 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6929 u8 link_type, u8 addr_type, u8 status)
6931 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6933 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6936 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6937 u8 link_type, u8 addr_type, u8 status)
6939 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6940 status, MGMT_OP_USER_PASSKEY_REPLY);
6943 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6944 u8 link_type, u8 addr_type, u8 status)
6946 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6948 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6951 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6952 u8 link_type, u8 addr_type, u32 passkey,
6955 struct mgmt_ev_passkey_notify ev;
6957 BT_DBG("%s", hdev->name);
6959 bacpy(&ev.addr.bdaddr, bdaddr);
6960 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6961 ev.passkey = __cpu_to_le32(passkey);
6962 ev.entered = entered;
6964 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6967 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
6969 struct mgmt_ev_auth_failed ev;
6970 struct mgmt_pending_cmd *cmd;
6971 u8 status = mgmt_status(hci_status);
6973 bacpy(&ev.addr.bdaddr, &conn->dst);
6974 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6977 cmd = find_pairing(conn);
6979 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
6980 cmd ? cmd->sk : NULL);
6983 cmd->cmd_complete(cmd, status);
6984 mgmt_pending_remove(cmd);
6988 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6990 struct cmd_lookup match = { NULL, hdev };
6994 u8 mgmt_err = mgmt_status(status);
6995 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6996 cmd_status_rsp, &mgmt_err);
7000 if (test_bit(HCI_AUTH, &hdev->flags))
7001 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7003 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7005 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7009 new_settings(hdev, match.sk);
7015 static void clear_eir(struct hci_request *req)
7017 struct hci_dev *hdev = req->hdev;
7018 struct hci_cp_write_eir cp;
7020 if (!lmp_ext_inq_capable(hdev))
7023 memset(hdev->eir, 0, sizeof(hdev->eir));
7025 memset(&cp, 0, sizeof(cp));
7027 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7030 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7032 struct cmd_lookup match = { NULL, hdev };
7033 struct hci_request req;
7034 bool changed = false;
7037 u8 mgmt_err = mgmt_status(status);
7039 if (enable && hci_dev_test_and_clear_flag(hdev,
7041 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7042 new_settings(hdev, NULL);
7045 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7051 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7053 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7055 changed = hci_dev_test_and_clear_flag(hdev,
7058 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7061 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7064 new_settings(hdev, match.sk);
7069 hci_req_init(&req, hdev);
7071 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7072 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7073 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7074 sizeof(enable), &enable);
7075 __hci_req_update_eir(&req);
7080 hci_req_run(&req, NULL);
7083 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7085 struct cmd_lookup *match = data;
7087 if (match->sk == NULL) {
7088 match->sk = cmd->sk;
7089 sock_hold(match->sk);
7093 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7096 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7098 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7099 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7100 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7103 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7104 dev_class, 3, NULL);
7110 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7112 struct mgmt_cp_set_local_name ev;
7113 struct mgmt_pending_cmd *cmd;
7118 memset(&ev, 0, sizeof(ev));
7119 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7120 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7122 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7124 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7126 /* If this is a HCI command related to powering on the
7127 * HCI dev don't send any mgmt signals.
7129 if (pending_find(MGMT_OP_SET_POWERED, hdev))
7133 mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7134 cmd ? cmd->sk : NULL);
7137 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7141 for (i = 0; i < uuid_count; i++) {
7142 if (!memcmp(uuid, uuids[i], 16))
7149 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7153 while (parsed < eir_len) {
7154 u8 field_len = eir[0];
7161 if (eir_len - parsed < field_len + 1)
7165 case EIR_UUID16_ALL:
7166 case EIR_UUID16_SOME:
7167 for (i = 0; i + 3 <= field_len; i += 2) {
7168 memcpy(uuid, bluetooth_base_uuid, 16);
7169 uuid[13] = eir[i + 3];
7170 uuid[12] = eir[i + 2];
7171 if (has_uuid(uuid, uuid_count, uuids))
7175 case EIR_UUID32_ALL:
7176 case EIR_UUID32_SOME:
7177 for (i = 0; i + 5 <= field_len; i += 4) {
7178 memcpy(uuid, bluetooth_base_uuid, 16);
7179 uuid[15] = eir[i + 5];
7180 uuid[14] = eir[i + 4];
7181 uuid[13] = eir[i + 3];
7182 uuid[12] = eir[i + 2];
7183 if (has_uuid(uuid, uuid_count, uuids))
7187 case EIR_UUID128_ALL:
7188 case EIR_UUID128_SOME:
7189 for (i = 0; i + 17 <= field_len; i += 16) {
7190 memcpy(uuid, eir + i + 2, 16);
7191 if (has_uuid(uuid, uuid_count, uuids))
7197 parsed += field_len + 1;
7198 eir += field_len + 1;
7204 static void restart_le_scan(struct hci_dev *hdev)
7206 /* If controller is not scanning we are done. */
7207 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
7210 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
7211 hdev->discovery.scan_start +
7212 hdev->discovery.scan_duration))
7215 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
7216 DISCOV_LE_RESTART_DELAY);
7219 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
7220 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7222 /* If a RSSI threshold has been specified, and
7223 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
7224 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
7225 * is set, let it through for further processing, as we might need to
7228 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7229 * the results are also dropped.
7231 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7232 (rssi == HCI_RSSI_INVALID ||
7233 (rssi < hdev->discovery.rssi &&
7234 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7237 if (hdev->discovery.uuid_count != 0) {
7238 /* If a list of UUIDs is provided in filter, results with no
7239 * matching UUID should be dropped.
7241 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
7242 hdev->discovery.uuids) &&
7243 !eir_has_uuids(scan_rsp, scan_rsp_len,
7244 hdev->discovery.uuid_count,
7245 hdev->discovery.uuids))
7249 /* If duplicate filtering does not report RSSI changes, then restart
7250 * scanning to ensure updated result with updated RSSI values.
7252 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
7253 restart_le_scan(hdev);
7255 /* Validate RSSI value against the RSSI threshold once more. */
7256 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7257 rssi < hdev->discovery.rssi)
7264 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7265 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7266 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7269 struct mgmt_ev_device_found *ev = (void *)buf;
7272 /* Don't send events for a non-kernel initiated discovery. With
7273 * LE one exception is if we have pend_le_reports > 0 in which
7274 * case we're doing passive scanning and want these events.
7276 if (!hci_discovery_active(hdev)) {
7277 if (link_type == ACL_LINK)
7279 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7283 if (hdev->discovery.result_filtering) {
7284 /* We are using service discovery */
7285 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
7290 if (hdev->discovery.limited) {
7291 /* Check for limited discoverable bit */
7293 if (!(dev_class[1] & 0x20))
7296 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
7297 if (!flags || !(flags[0] & LE_AD_LIMITED))
7302 /* Make sure that the buffer is big enough. The 5 extra bytes
7303 * are for the potential CoD field.
7305 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7308 memset(buf, 0, sizeof(buf));
7310 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7311 * RSSI value was reported as 0 when not available. This behavior
7312 * is kept when using device discovery. This is required for full
7313 * backwards compatibility with the API.
7315 * However when using service discovery, the value 127 will be
7316 * returned when the RSSI is not available.
7318 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
7319 link_type == ACL_LINK)
7322 bacpy(&ev->addr.bdaddr, bdaddr);
7323 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7325 ev->flags = cpu_to_le32(flags);
7328 /* Copy EIR or advertising data into event */
7329 memcpy(ev->eir, eir, eir_len);
7331 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7333 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7336 if (scan_rsp_len > 0)
7337 /* Append scan response data to event */
7338 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7340 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7341 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7343 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7346 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7347 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7349 struct mgmt_ev_device_found *ev;
7350 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7353 ev = (struct mgmt_ev_device_found *) buf;
7355 memset(buf, 0, sizeof(buf));
7357 bacpy(&ev->addr.bdaddr, bdaddr);
7358 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7361 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7364 ev->eir_len = cpu_to_le16(eir_len);
7366 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7369 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7371 struct mgmt_ev_discovering ev;
7373 BT_DBG("%s discovering %u", hdev->name, discovering);
7375 memset(&ev, 0, sizeof(ev));
7376 ev.type = hdev->discovery.type;
7377 ev.discovering = discovering;
7379 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7382 static struct hci_mgmt_chan chan = {
7383 .channel = HCI_CHANNEL_CONTROL,
7384 .handler_count = ARRAY_SIZE(mgmt_handlers),
7385 .handlers = mgmt_handlers,
7386 .hdev_init = mgmt_init_hdev,
7391 return hci_mgmt_chan_register(&chan);
7394 void mgmt_exit(void)
7396 hci_mgmt_chan_unregister(&chan);