2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 8
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
87 MGMT_OP_GET_CONN_INFO,
88 MGMT_OP_GET_CLOCK_INFO,
90 MGMT_OP_REMOVE_DEVICE,
91 MGMT_OP_LOAD_CONN_PARAM,
92 MGMT_OP_READ_UNCONF_INDEX_LIST,
93 MGMT_OP_READ_CONFIG_INFO,
94 MGMT_OP_SET_EXTERNAL_CONFIG,
95 MGMT_OP_SET_PUBLIC_ADDRESS,
96 MGMT_OP_START_SERVICE_DISCOVERY,
99 static const u16 mgmt_events[] = {
100 MGMT_EV_CONTROLLER_ERROR,
102 MGMT_EV_INDEX_REMOVED,
103 MGMT_EV_NEW_SETTINGS,
104 MGMT_EV_CLASS_OF_DEV_CHANGED,
105 MGMT_EV_LOCAL_NAME_CHANGED,
106 MGMT_EV_NEW_LINK_KEY,
107 MGMT_EV_NEW_LONG_TERM_KEY,
108 MGMT_EV_DEVICE_CONNECTED,
109 MGMT_EV_DEVICE_DISCONNECTED,
110 MGMT_EV_CONNECT_FAILED,
111 MGMT_EV_PIN_CODE_REQUEST,
112 MGMT_EV_USER_CONFIRM_REQUEST,
113 MGMT_EV_USER_PASSKEY_REQUEST,
115 MGMT_EV_DEVICE_FOUND,
117 MGMT_EV_DEVICE_BLOCKED,
118 MGMT_EV_DEVICE_UNBLOCKED,
119 MGMT_EV_DEVICE_UNPAIRED,
120 MGMT_EV_PASSKEY_NOTIFY,
123 MGMT_EV_DEVICE_ADDED,
124 MGMT_EV_DEVICE_REMOVED,
125 MGMT_EV_NEW_CONN_PARAM,
126 MGMT_EV_UNCONF_INDEX_ADDED,
127 MGMT_EV_UNCONF_INDEX_REMOVED,
128 MGMT_EV_NEW_CONFIG_OPTIONS,
131 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
134 struct list_head list;
141 void (*cmd_complete)(struct pending_cmd *cmd, u8 status);
144 /* HCI to MGMT error code conversion table */
145 static u8 mgmt_status_table[] = {
147 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
148 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
149 MGMT_STATUS_FAILED, /* Hardware Failure */
150 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
151 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
152 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
153 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
154 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
155 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
156 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
157 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
158 MGMT_STATUS_BUSY, /* Command Disallowed */
159 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
160 MGMT_STATUS_REJECTED, /* Rejected Security */
161 MGMT_STATUS_REJECTED, /* Rejected Personal */
162 MGMT_STATUS_TIMEOUT, /* Host Timeout */
163 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
164 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
165 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
166 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
167 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
168 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
169 MGMT_STATUS_BUSY, /* Repeated Attempts */
170 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
171 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
172 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
173 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
174 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
175 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
176 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
177 MGMT_STATUS_FAILED, /* Unspecified Error */
178 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
179 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
180 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
181 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
182 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
183 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
184 MGMT_STATUS_FAILED, /* Unit Link Key Used */
185 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
186 MGMT_STATUS_TIMEOUT, /* Instant Passed */
187 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
188 MGMT_STATUS_FAILED, /* Transaction Collision */
189 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
190 MGMT_STATUS_REJECTED, /* QoS Rejected */
191 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
192 MGMT_STATUS_REJECTED, /* Insufficient Security */
193 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
194 MGMT_STATUS_BUSY, /* Role Switch Pending */
195 MGMT_STATUS_FAILED, /* Slot Violation */
196 MGMT_STATUS_FAILED, /* Role Switch Failed */
197 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
198 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
199 MGMT_STATUS_BUSY, /* Host Busy Pairing */
200 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
201 MGMT_STATUS_BUSY, /* Controller Busy */
202 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
203 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
204 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
205 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
206 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
209 static u8 mgmt_status(u8 hci_status)
211 if (hci_status < ARRAY_SIZE(mgmt_status_table))
212 return mgmt_status_table[hci_status];
214 return MGMT_STATUS_FAILED;
217 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
218 struct sock *skip_sk)
221 struct mgmt_hdr *hdr;
223 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
227 hdr = (void *) skb_put(skb, sizeof(*hdr));
228 hdr->opcode = cpu_to_le16(event);
230 hdr->index = cpu_to_le16(hdev->id);
232 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
233 hdr->len = cpu_to_le16(data_len);
236 memcpy(skb_put(skb, data_len), data, data_len);
239 __net_timestamp(skb);
241 hci_send_to_control(skb, skip_sk);
247 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
250 struct mgmt_hdr *hdr;
251 struct mgmt_ev_cmd_status *ev;
254 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
256 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
260 hdr = (void *) skb_put(skb, sizeof(*hdr));
262 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
263 hdr->index = cpu_to_le16(index);
264 hdr->len = cpu_to_le16(sizeof(*ev));
266 ev = (void *) skb_put(skb, sizeof(*ev));
268 ev->opcode = cpu_to_le16(cmd);
270 err = sock_queue_rcv_skb(sk, skb);
277 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
278 void *rp, size_t rp_len)
281 struct mgmt_hdr *hdr;
282 struct mgmt_ev_cmd_complete *ev;
285 BT_DBG("sock %p", sk);
287 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
291 hdr = (void *) skb_put(skb, sizeof(*hdr));
293 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
294 hdr->index = cpu_to_le16(index);
295 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
297 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
298 ev->opcode = cpu_to_le16(cmd);
302 memcpy(ev->data, rp, rp_len);
304 err = sock_queue_rcv_skb(sk, skb);
311 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
314 struct mgmt_rp_read_version rp;
316 BT_DBG("sock %p", sk);
318 rp.version = MGMT_VERSION;
319 rp.revision = cpu_to_le16(MGMT_REVISION);
321 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
325 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
328 struct mgmt_rp_read_commands *rp;
329 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
330 const u16 num_events = ARRAY_SIZE(mgmt_events);
335 BT_DBG("sock %p", sk);
337 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
339 rp = kmalloc(rp_size, GFP_KERNEL);
343 rp->num_commands = cpu_to_le16(num_commands);
344 rp->num_events = cpu_to_le16(num_events);
346 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
347 put_unaligned_le16(mgmt_commands[i], opcode);
349 for (i = 0; i < num_events; i++, opcode++)
350 put_unaligned_le16(mgmt_events[i], opcode);
352 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
359 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
362 struct mgmt_rp_read_index_list *rp;
368 BT_DBG("sock %p", sk);
370 read_lock(&hci_dev_list_lock);
373 list_for_each_entry(d, &hci_dev_list, list) {
374 if (d->dev_type == HCI_BREDR &&
375 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
379 rp_len = sizeof(*rp) + (2 * count);
380 rp = kmalloc(rp_len, GFP_ATOMIC);
382 read_unlock(&hci_dev_list_lock);
387 list_for_each_entry(d, &hci_dev_list, list) {
388 if (test_bit(HCI_SETUP, &d->dev_flags) ||
389 test_bit(HCI_CONFIG, &d->dev_flags) ||
390 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
393 /* Devices marked as raw-only are neither configured
394 * nor unconfigured controllers.
396 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
399 if (d->dev_type == HCI_BREDR &&
400 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
401 rp->index[count++] = cpu_to_le16(d->id);
402 BT_DBG("Added hci%u", d->id);
406 rp->num_controllers = cpu_to_le16(count);
407 rp_len = sizeof(*rp) + (2 * count);
409 read_unlock(&hci_dev_list_lock);
411 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
419 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
420 void *data, u16 data_len)
422 struct mgmt_rp_read_unconf_index_list *rp;
428 BT_DBG("sock %p", sk);
430 read_lock(&hci_dev_list_lock);
433 list_for_each_entry(d, &hci_dev_list, list) {
434 if (d->dev_type == HCI_BREDR &&
435 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
439 rp_len = sizeof(*rp) + (2 * count);
440 rp = kmalloc(rp_len, GFP_ATOMIC);
442 read_unlock(&hci_dev_list_lock);
447 list_for_each_entry(d, &hci_dev_list, list) {
448 if (test_bit(HCI_SETUP, &d->dev_flags) ||
449 test_bit(HCI_CONFIG, &d->dev_flags) ||
450 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
453 /* Devices marked as raw-only are neither configured
454 * nor unconfigured controllers.
456 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
459 if (d->dev_type == HCI_BREDR &&
460 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
461 rp->index[count++] = cpu_to_le16(d->id);
462 BT_DBG("Added hci%u", d->id);
466 rp->num_controllers = cpu_to_le16(count);
467 rp_len = sizeof(*rp) + (2 * count);
469 read_unlock(&hci_dev_list_lock);
471 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
479 static bool is_configured(struct hci_dev *hdev)
481 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
482 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
485 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
486 !bacmp(&hdev->public_addr, BDADDR_ANY))
492 static __le32 get_missing_options(struct hci_dev *hdev)
496 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
497 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
498 options |= MGMT_OPTION_EXTERNAL_CONFIG;
500 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
501 !bacmp(&hdev->public_addr, BDADDR_ANY))
502 options |= MGMT_OPTION_PUBLIC_ADDRESS;
504 return cpu_to_le32(options);
507 static int new_options(struct hci_dev *hdev, struct sock *skip)
509 __le32 options = get_missing_options(hdev);
511 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
512 sizeof(options), skip);
515 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
517 __le32 options = get_missing_options(hdev);
519 return cmd_complete(sk, hdev->id, opcode, 0, &options,
523 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
524 void *data, u16 data_len)
526 struct mgmt_rp_read_config_info rp;
529 BT_DBG("sock %p %s", sk, hdev->name);
533 memset(&rp, 0, sizeof(rp));
534 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
536 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
537 options |= MGMT_OPTION_EXTERNAL_CONFIG;
539 if (hdev->set_bdaddr)
540 options |= MGMT_OPTION_PUBLIC_ADDRESS;
542 rp.supported_options = cpu_to_le32(options);
543 rp.missing_options = get_missing_options(hdev);
545 hci_dev_unlock(hdev);
547 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
551 static u32 get_supported_settings(struct hci_dev *hdev)
555 settings |= MGMT_SETTING_POWERED;
556 settings |= MGMT_SETTING_BONDABLE;
557 settings |= MGMT_SETTING_DEBUG_KEYS;
558 settings |= MGMT_SETTING_CONNECTABLE;
559 settings |= MGMT_SETTING_DISCOVERABLE;
561 if (lmp_bredr_capable(hdev)) {
562 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
563 settings |= MGMT_SETTING_FAST_CONNECTABLE;
564 settings |= MGMT_SETTING_BREDR;
565 settings |= MGMT_SETTING_LINK_SECURITY;
567 if (lmp_ssp_capable(hdev)) {
568 settings |= MGMT_SETTING_SSP;
569 settings |= MGMT_SETTING_HS;
572 if (lmp_sc_capable(hdev) ||
573 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
574 settings |= MGMT_SETTING_SECURE_CONN;
577 if (lmp_le_capable(hdev)) {
578 settings |= MGMT_SETTING_LE;
579 settings |= MGMT_SETTING_ADVERTISING;
580 settings |= MGMT_SETTING_SECURE_CONN;
581 settings |= MGMT_SETTING_PRIVACY;
584 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
586 settings |= MGMT_SETTING_CONFIGURATION;
591 static u32 get_current_settings(struct hci_dev *hdev)
595 if (hdev_is_powered(hdev))
596 settings |= MGMT_SETTING_POWERED;
598 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
599 settings |= MGMT_SETTING_CONNECTABLE;
601 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
602 settings |= MGMT_SETTING_FAST_CONNECTABLE;
604 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
605 settings |= MGMT_SETTING_DISCOVERABLE;
607 if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
608 settings |= MGMT_SETTING_BONDABLE;
610 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
611 settings |= MGMT_SETTING_BREDR;
613 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
614 settings |= MGMT_SETTING_LE;
616 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
617 settings |= MGMT_SETTING_LINK_SECURITY;
619 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
620 settings |= MGMT_SETTING_SSP;
622 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
623 settings |= MGMT_SETTING_HS;
625 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
626 settings |= MGMT_SETTING_ADVERTISING;
628 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
629 settings |= MGMT_SETTING_SECURE_CONN;
631 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
632 settings |= MGMT_SETTING_DEBUG_KEYS;
634 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
635 settings |= MGMT_SETTING_PRIVACY;
640 #define PNP_INFO_SVCLASS_ID 0x1200
642 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
644 u8 *ptr = data, *uuids_start = NULL;
645 struct bt_uuid *uuid;
650 list_for_each_entry(uuid, &hdev->uuids, list) {
653 if (uuid->size != 16)
656 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
660 if (uuid16 == PNP_INFO_SVCLASS_ID)
666 uuids_start[1] = EIR_UUID16_ALL;
670 /* Stop if not enough space to put next UUID */
671 if ((ptr - data) + sizeof(u16) > len) {
672 uuids_start[1] = EIR_UUID16_SOME;
676 *ptr++ = (uuid16 & 0x00ff);
677 *ptr++ = (uuid16 & 0xff00) >> 8;
678 uuids_start[0] += sizeof(uuid16);
684 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
686 u8 *ptr = data, *uuids_start = NULL;
687 struct bt_uuid *uuid;
692 list_for_each_entry(uuid, &hdev->uuids, list) {
693 if (uuid->size != 32)
699 uuids_start[1] = EIR_UUID32_ALL;
703 /* Stop if not enough space to put next UUID */
704 if ((ptr - data) + sizeof(u32) > len) {
705 uuids_start[1] = EIR_UUID32_SOME;
709 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
711 uuids_start[0] += sizeof(u32);
717 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
719 u8 *ptr = data, *uuids_start = NULL;
720 struct bt_uuid *uuid;
725 list_for_each_entry(uuid, &hdev->uuids, list) {
726 if (uuid->size != 128)
732 uuids_start[1] = EIR_UUID128_ALL;
736 /* Stop if not enough space to put next UUID */
737 if ((ptr - data) + 16 > len) {
738 uuids_start[1] = EIR_UUID128_SOME;
742 memcpy(ptr, uuid->uuid, 16);
744 uuids_start[0] += 16;
750 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
752 struct pending_cmd *cmd;
754 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
755 if (cmd->opcode == opcode)
762 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
763 struct hci_dev *hdev,
766 struct pending_cmd *cmd;
768 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
769 if (cmd->user_data != data)
771 if (cmd->opcode == opcode)
778 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
783 name_len = strlen(hdev->dev_name);
785 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
787 if (name_len > max_len) {
789 ptr[1] = EIR_NAME_SHORT;
791 ptr[1] = EIR_NAME_COMPLETE;
793 ptr[0] = name_len + 1;
795 memcpy(ptr + 2, hdev->dev_name, name_len);
797 ad_len += (name_len + 2);
798 ptr += (name_len + 2);
804 static void update_scan_rsp_data(struct hci_request *req)
806 struct hci_dev *hdev = req->hdev;
807 struct hci_cp_le_set_scan_rsp_data cp;
810 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
813 memset(&cp, 0, sizeof(cp));
815 len = create_scan_rsp_data(hdev, cp.data);
817 if (hdev->scan_rsp_data_len == len &&
818 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
821 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
822 hdev->scan_rsp_data_len = len;
826 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
829 static u8 get_adv_discov_flags(struct hci_dev *hdev)
831 struct pending_cmd *cmd;
833 /* If there's a pending mgmt command the flags will not yet have
834 * their final values, so check for this first.
836 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
838 struct mgmt_mode *cp = cmd->param;
840 return LE_AD_GENERAL;
841 else if (cp->val == 0x02)
842 return LE_AD_LIMITED;
844 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
845 return LE_AD_LIMITED;
846 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
847 return LE_AD_GENERAL;
853 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
855 u8 ad_len = 0, flags = 0;
857 flags |= get_adv_discov_flags(hdev);
859 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
860 flags |= LE_AD_NO_BREDR;
863 BT_DBG("adv flags 0x%02x", flags);
873 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
875 ptr[1] = EIR_TX_POWER;
876 ptr[2] = (u8) hdev->adv_tx_power;
885 static void update_adv_data(struct hci_request *req)
887 struct hci_dev *hdev = req->hdev;
888 struct hci_cp_le_set_adv_data cp;
891 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
894 memset(&cp, 0, sizeof(cp));
896 len = create_adv_data(hdev, cp.data);
898 if (hdev->adv_data_len == len &&
899 memcmp(cp.data, hdev->adv_data, len) == 0)
902 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
903 hdev->adv_data_len = len;
907 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
910 int mgmt_update_adv_data(struct hci_dev *hdev)
912 struct hci_request req;
914 hci_req_init(&req, hdev);
915 update_adv_data(&req);
917 return hci_req_run(&req, NULL);
920 static void create_eir(struct hci_dev *hdev, u8 *data)
925 name_len = strlen(hdev->dev_name);
931 ptr[1] = EIR_NAME_SHORT;
933 ptr[1] = EIR_NAME_COMPLETE;
935 /* EIR Data length */
936 ptr[0] = name_len + 1;
938 memcpy(ptr + 2, hdev->dev_name, name_len);
940 ptr += (name_len + 2);
943 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
945 ptr[1] = EIR_TX_POWER;
946 ptr[2] = (u8) hdev->inq_tx_power;
951 if (hdev->devid_source > 0) {
953 ptr[1] = EIR_DEVICE_ID;
955 put_unaligned_le16(hdev->devid_source, ptr + 2);
956 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
957 put_unaligned_le16(hdev->devid_product, ptr + 6);
958 put_unaligned_le16(hdev->devid_version, ptr + 8);
963 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
964 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
965 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
968 static void update_eir(struct hci_request *req)
970 struct hci_dev *hdev = req->hdev;
971 struct hci_cp_write_eir cp;
973 if (!hdev_is_powered(hdev))
976 if (!lmp_ext_inq_capable(hdev))
979 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
982 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
985 memset(&cp, 0, sizeof(cp));
987 create_eir(hdev, cp.data);
989 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
992 memcpy(hdev->eir, cp.data, sizeof(cp.data));
994 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
997 static u8 get_service_classes(struct hci_dev *hdev)
999 struct bt_uuid *uuid;
1002 list_for_each_entry(uuid, &hdev->uuids, list)
1003 val |= uuid->svc_hint;
1008 static void update_class(struct hci_request *req)
1010 struct hci_dev *hdev = req->hdev;
1013 BT_DBG("%s", hdev->name);
1015 if (!hdev_is_powered(hdev))
1018 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1021 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1024 cod[0] = hdev->minor_class;
1025 cod[1] = hdev->major_class;
1026 cod[2] = get_service_classes(hdev);
1028 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1031 if (memcmp(cod, hdev->dev_class, 3) == 0)
1034 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1037 static bool get_connectable(struct hci_dev *hdev)
1039 struct pending_cmd *cmd;
1041 /* If there's a pending mgmt command the flag will not yet have
1042 * it's final value, so check for this first.
1044 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1046 struct mgmt_mode *cp = cmd->param;
1050 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1053 static void disable_advertising(struct hci_request *req)
1057 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1060 static void enable_advertising(struct hci_request *req)
1062 struct hci_dev *hdev = req->hdev;
1063 struct hci_cp_le_set_adv_param cp;
1064 u8 own_addr_type, enable = 0x01;
1067 if (hci_conn_num(hdev, LE_LINK) > 0)
1070 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1071 disable_advertising(req);
1073 /* Clear the HCI_LE_ADV bit temporarily so that the
1074 * hci_update_random_address knows that it's safe to go ahead
1075 * and write a new random address. The flag will be set back on
1076 * as soon as the SET_ADV_ENABLE HCI command completes.
1078 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1080 connectable = get_connectable(hdev);
1082 /* Set require_privacy to true only when non-connectable
1083 * advertising is used. In that case it is fine to use a
1084 * non-resolvable private address.
1086 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1089 memset(&cp, 0, sizeof(cp));
1090 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1091 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1092 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1093 cp.own_address_type = own_addr_type;
1094 cp.channel_map = hdev->le_adv_channel_map;
1096 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1098 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1101 static void service_cache_off(struct work_struct *work)
1103 struct hci_dev *hdev = container_of(work, struct hci_dev,
1104 service_cache.work);
1105 struct hci_request req;
1107 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1110 hci_req_init(&req, hdev);
1117 hci_dev_unlock(hdev);
1119 hci_req_run(&req, NULL);
1122 static void rpa_expired(struct work_struct *work)
1124 struct hci_dev *hdev = container_of(work, struct hci_dev,
1126 struct hci_request req;
1130 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1132 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1135 /* The generation of a new RPA and programming it into the
1136 * controller happens in the enable_advertising() function.
1138 hci_req_init(&req, hdev);
1139 enable_advertising(&req);
1140 hci_req_run(&req, NULL);
1143 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1145 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1148 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1149 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1151 /* Non-mgmt controlled devices get this bit set
1152 * implicitly so that pairing works for them, however
1153 * for mgmt we require user-space to explicitly enable
1156 clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1159 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1160 void *data, u16 data_len)
1162 struct mgmt_rp_read_info rp;
1164 BT_DBG("sock %p %s", sk, hdev->name);
1168 memset(&rp, 0, sizeof(rp));
1170 bacpy(&rp.bdaddr, &hdev->bdaddr);
1172 rp.version = hdev->hci_ver;
1173 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1175 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1176 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1178 memcpy(rp.dev_class, hdev->dev_class, 3);
1180 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1181 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1183 hci_dev_unlock(hdev);
1185 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1189 static void mgmt_pending_free(struct pending_cmd *cmd)
1196 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1197 struct hci_dev *hdev, void *data,
1200 struct pending_cmd *cmd;
1202 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1206 cmd->opcode = opcode;
1207 cmd->index = hdev->id;
1209 cmd->param = kmemdup(data, len, GFP_KERNEL);
1215 cmd->param_len = len;
1220 list_add(&cmd->list, &hdev->mgmt_pending);
1225 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1226 void (*cb)(struct pending_cmd *cmd,
1230 struct pending_cmd *cmd, *tmp;
1232 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1233 if (opcode > 0 && cmd->opcode != opcode)
1240 static void mgmt_pending_remove(struct pending_cmd *cmd)
1242 list_del(&cmd->list);
1243 mgmt_pending_free(cmd);
1246 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1248 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1250 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1254 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1256 BT_DBG("%s status 0x%02x", hdev->name, status);
1258 if (hci_conn_count(hdev) == 0) {
1259 cancel_delayed_work(&hdev->power_off);
1260 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1264 static bool hci_stop_discovery(struct hci_request *req)
1266 struct hci_dev *hdev = req->hdev;
1267 struct hci_cp_remote_name_req_cancel cp;
1268 struct inquiry_entry *e;
1270 switch (hdev->discovery.state) {
1271 case DISCOVERY_FINDING:
1272 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1273 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1275 cancel_delayed_work(&hdev->le_scan_disable);
1276 hci_req_add_le_scan_disable(req);
1281 case DISCOVERY_RESOLVING:
1282 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1287 bacpy(&cp.bdaddr, &e->data.bdaddr);
1288 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1294 /* Passive scanning */
1295 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1296 hci_req_add_le_scan_disable(req);
1306 static int clean_up_hci_state(struct hci_dev *hdev)
1308 struct hci_request req;
1309 struct hci_conn *conn;
1310 bool discov_stopped;
1313 hci_req_init(&req, hdev);
1315 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1316 test_bit(HCI_PSCAN, &hdev->flags)) {
1318 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1321 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1322 disable_advertising(&req);
1324 discov_stopped = hci_stop_discovery(&req);
1326 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1327 struct hci_cp_disconnect dc;
1328 struct hci_cp_reject_conn_req rej;
1330 switch (conn->state) {
1333 dc.handle = cpu_to_le16(conn->handle);
1334 dc.reason = 0x15; /* Terminated due to Power Off */
1335 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1338 if (conn->type == LE_LINK)
1339 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1341 else if (conn->type == ACL_LINK)
1342 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1346 bacpy(&rej.bdaddr, &conn->dst);
1347 rej.reason = 0x15; /* Terminated due to Power Off */
1348 if (conn->type == ACL_LINK)
1349 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1351 else if (conn->type == SCO_LINK)
1352 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1358 err = hci_req_run(&req, clean_up_hci_complete);
1359 if (!err && discov_stopped)
1360 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1365 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1368 struct mgmt_mode *cp = data;
1369 struct pending_cmd *cmd;
1372 BT_DBG("request for %s", hdev->name);
1374 if (cp->val != 0x00 && cp->val != 0x01)
1375 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1376 MGMT_STATUS_INVALID_PARAMS);
1380 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1381 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1386 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1387 cancel_delayed_work(&hdev->power_off);
1390 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1392 err = mgmt_powered(hdev, 1);
1397 if (!!cp->val == hdev_is_powered(hdev)) {
1398 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1402 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1409 queue_work(hdev->req_workqueue, &hdev->power_on);
1412 /* Disconnect connections, stop scans, etc */
1413 err = clean_up_hci_state(hdev);
1415 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1416 HCI_POWER_OFF_TIMEOUT);
1418 /* ENODATA means there were no HCI commands queued */
1419 if (err == -ENODATA) {
1420 cancel_delayed_work(&hdev->power_off);
1421 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1427 hci_dev_unlock(hdev);
1431 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1435 ev = cpu_to_le32(get_current_settings(hdev));
1437 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1440 int mgmt_new_settings(struct hci_dev *hdev)
1442 return new_settings(hdev, NULL);
1447 struct hci_dev *hdev;
1451 static void settings_rsp(struct pending_cmd *cmd, void *data)
1453 struct cmd_lookup *match = data;
1455 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1457 list_del(&cmd->list);
1459 if (match->sk == NULL) {
1460 match->sk = cmd->sk;
1461 sock_hold(match->sk);
1464 mgmt_pending_free(cmd);
1467 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1471 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1472 mgmt_pending_remove(cmd);
1475 static void cmd_complete_rsp(struct pending_cmd *cmd, void *data)
1477 if (cmd->cmd_complete) {
1480 cmd->cmd_complete(cmd, *status);
1481 mgmt_pending_remove(cmd);
1486 cmd_status_rsp(cmd, data);
1489 static void generic_cmd_complete(struct pending_cmd *cmd, u8 status)
1491 cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
1495 static void addr_cmd_complete(struct pending_cmd *cmd, u8 status)
1497 cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
1498 sizeof(struct mgmt_addr_info));
1501 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1503 if (!lmp_bredr_capable(hdev))
1504 return MGMT_STATUS_NOT_SUPPORTED;
1505 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1506 return MGMT_STATUS_REJECTED;
1508 return MGMT_STATUS_SUCCESS;
1511 static u8 mgmt_le_support(struct hci_dev *hdev)
1513 if (!lmp_le_capable(hdev))
1514 return MGMT_STATUS_NOT_SUPPORTED;
1515 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1516 return MGMT_STATUS_REJECTED;
1518 return MGMT_STATUS_SUCCESS;
1521 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1523 struct pending_cmd *cmd;
1524 struct mgmt_mode *cp;
1525 struct hci_request req;
1528 BT_DBG("status 0x%02x", status);
1532 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1537 u8 mgmt_err = mgmt_status(status);
1538 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1539 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1545 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1548 if (hdev->discov_timeout > 0) {
1549 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1550 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1554 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1558 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1561 new_settings(hdev, cmd->sk);
1563 /* When the discoverable mode gets changed, make sure
1564 * that class of device has the limited discoverable
1565 * bit correctly set. Also update page scan based on whitelist
1568 hci_req_init(&req, hdev);
1569 hci_update_page_scan(hdev, &req);
1571 hci_req_run(&req, NULL);
1574 mgmt_pending_remove(cmd);
1577 hci_dev_unlock(hdev);
1580 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1583 struct mgmt_cp_set_discoverable *cp = data;
1584 struct pending_cmd *cmd;
1585 struct hci_request req;
1590 BT_DBG("request for %s", hdev->name);
1592 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1593 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1594 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1595 MGMT_STATUS_REJECTED);
1597 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1598 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1599 MGMT_STATUS_INVALID_PARAMS);
1601 timeout = __le16_to_cpu(cp->timeout);
1603 /* Disabling discoverable requires that no timeout is set,
1604 * and enabling limited discoverable requires a timeout.
1606 if ((cp->val == 0x00 && timeout > 0) ||
1607 (cp->val == 0x02 && timeout == 0))
1608 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1609 MGMT_STATUS_INVALID_PARAMS);
1613 if (!hdev_is_powered(hdev) && timeout > 0) {
1614 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1615 MGMT_STATUS_NOT_POWERED);
1619 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1620 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1621 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1626 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1627 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1628 MGMT_STATUS_REJECTED);
1632 if (!hdev_is_powered(hdev)) {
1633 bool changed = false;
1635 /* Setting limited discoverable when powered off is
1636 * not a valid operation since it requires a timeout
1637 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1639 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1640 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1644 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1649 err = new_settings(hdev, sk);
1654 /* If the current mode is the same, then just update the timeout
1655 * value with the new value. And if only the timeout gets updated,
1656 * then no need for any HCI transactions.
1658 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1659 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1660 &hdev->dev_flags)) {
1661 cancel_delayed_work(&hdev->discov_off);
1662 hdev->discov_timeout = timeout;
1664 if (cp->val && hdev->discov_timeout > 0) {
1665 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1666 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1670 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1674 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1680 /* Cancel any potential discoverable timeout that might be
1681 * still active and store new timeout value. The arming of
1682 * the timeout happens in the complete handler.
1684 cancel_delayed_work(&hdev->discov_off);
1685 hdev->discov_timeout = timeout;
1687 /* Limited discoverable mode */
1688 if (cp->val == 0x02)
1689 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1691 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1693 hci_req_init(&req, hdev);
1695 /* The procedure for LE-only controllers is much simpler - just
1696 * update the advertising data.
1698 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1704 struct hci_cp_write_current_iac_lap hci_cp;
1706 if (cp->val == 0x02) {
1707 /* Limited discoverable mode */
1708 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1709 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1710 hci_cp.iac_lap[1] = 0x8b;
1711 hci_cp.iac_lap[2] = 0x9e;
1712 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1713 hci_cp.iac_lap[4] = 0x8b;
1714 hci_cp.iac_lap[5] = 0x9e;
1716 /* General discoverable mode */
1718 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1719 hci_cp.iac_lap[1] = 0x8b;
1720 hci_cp.iac_lap[2] = 0x9e;
1723 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1724 (hci_cp.num_iac * 3) + 1, &hci_cp);
1726 scan |= SCAN_INQUIRY;
1728 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1731 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1734 update_adv_data(&req);
1736 err = hci_req_run(&req, set_discoverable_complete);
1738 mgmt_pending_remove(cmd);
1741 hci_dev_unlock(hdev);
1745 static void write_fast_connectable(struct hci_request *req, bool enable)
1747 struct hci_dev *hdev = req->hdev;
1748 struct hci_cp_write_page_scan_activity acp;
1751 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1754 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1758 type = PAGE_SCAN_TYPE_INTERLACED;
1760 /* 160 msec page scan interval */
1761 acp.interval = cpu_to_le16(0x0100);
1763 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1765 /* default 1.28 sec page scan */
1766 acp.interval = cpu_to_le16(0x0800);
1769 acp.window = cpu_to_le16(0x0012);
1771 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1772 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1773 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1776 if (hdev->page_scan_type != type)
1777 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1780 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1782 struct pending_cmd *cmd;
1783 struct mgmt_mode *cp;
1784 bool conn_changed, discov_changed;
1786 BT_DBG("status 0x%02x", status);
1790 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1795 u8 mgmt_err = mgmt_status(status);
1796 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1802 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1804 discov_changed = false;
1806 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1808 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1812 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1814 if (conn_changed || discov_changed) {
1815 new_settings(hdev, cmd->sk);
1816 hci_update_page_scan(hdev, NULL);
1818 mgmt_update_adv_data(hdev);
1819 hci_update_background_scan(hdev);
1823 mgmt_pending_remove(cmd);
1826 hci_dev_unlock(hdev);
1829 static int set_connectable_update_settings(struct hci_dev *hdev,
1830 struct sock *sk, u8 val)
1832 bool changed = false;
1835 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1839 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1841 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1842 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1845 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1850 hci_update_page_scan(hdev, NULL);
1851 hci_update_background_scan(hdev);
1852 return new_settings(hdev, sk);
1858 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1861 struct mgmt_mode *cp = data;
1862 struct pending_cmd *cmd;
1863 struct hci_request req;
1867 BT_DBG("request for %s", hdev->name);
1869 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1870 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1871 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1872 MGMT_STATUS_REJECTED);
1874 if (cp->val != 0x00 && cp->val != 0x01)
1875 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1876 MGMT_STATUS_INVALID_PARAMS);
1880 if (!hdev_is_powered(hdev)) {
1881 err = set_connectable_update_settings(hdev, sk, cp->val);
1885 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1886 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1887 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1892 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1898 hci_req_init(&req, hdev);
1900 /* If BR/EDR is not enabled and we disable advertising as a
1901 * by-product of disabling connectable, we need to update the
1902 * advertising flags.
1904 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1906 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1907 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1909 update_adv_data(&req);
1910 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1914 /* If we don't have any whitelist entries just
1915 * disable all scanning. If there are entries
1916 * and we had both page and inquiry scanning
1917 * enabled then fall back to only page scanning.
1918 * Otherwise no changes are needed.
1920 if (list_empty(&hdev->whitelist))
1921 scan = SCAN_DISABLED;
1922 else if (test_bit(HCI_ISCAN, &hdev->flags))
1925 goto no_scan_update;
1927 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1928 hdev->discov_timeout > 0)
1929 cancel_delayed_work(&hdev->discov_off);
1932 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1936 /* If we're going from non-connectable to connectable or
1937 * vice-versa when fast connectable is enabled ensure that fast
1938 * connectable gets disabled. write_fast_connectable won't do
1939 * anything if the page scan parameters are already what they
1942 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1943 write_fast_connectable(&req, false);
1945 /* Update the advertising parameters if necessary */
1946 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1947 enable_advertising(&req);
1949 err = hci_req_run(&req, set_connectable_complete);
1951 mgmt_pending_remove(cmd);
1952 if (err == -ENODATA)
1953 err = set_connectable_update_settings(hdev, sk,
1959 hci_dev_unlock(hdev);
1963 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1966 struct mgmt_mode *cp = data;
1970 BT_DBG("request for %s", hdev->name);
1972 if (cp->val != 0x00 && cp->val != 0x01)
1973 return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1974 MGMT_STATUS_INVALID_PARAMS);
1979 changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
1981 changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1983 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1988 err = new_settings(hdev, sk);
1991 hci_dev_unlock(hdev);
1995 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1998 struct mgmt_mode *cp = data;
1999 struct pending_cmd *cmd;
2003 BT_DBG("request for %s", hdev->name);
2005 status = mgmt_bredr_support(hdev);
2007 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2010 if (cp->val != 0x00 && cp->val != 0x01)
2011 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2012 MGMT_STATUS_INVALID_PARAMS);
2016 if (!hdev_is_powered(hdev)) {
2017 bool changed = false;
2019 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
2020 &hdev->dev_flags)) {
2021 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
2025 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2030 err = new_settings(hdev, sk);
2035 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2036 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2043 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2044 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2048 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2054 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2056 mgmt_pending_remove(cmd);
2061 hci_dev_unlock(hdev);
2065 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2067 struct mgmt_mode *cp = data;
2068 struct pending_cmd *cmd;
2072 BT_DBG("request for %s", hdev->name);
2074 status = mgmt_bredr_support(hdev);
2076 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2078 if (!lmp_ssp_capable(hdev))
2079 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2080 MGMT_STATUS_NOT_SUPPORTED);
2082 if (cp->val != 0x00 && cp->val != 0x01)
2083 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2084 MGMT_STATUS_INVALID_PARAMS);
2088 if (!hdev_is_powered(hdev)) {
2092 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2095 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2098 changed = test_and_clear_bit(HCI_HS_ENABLED,
2101 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2104 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2109 err = new_settings(hdev, sk);
2114 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2115 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2116 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2121 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2122 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2126 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2132 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2133 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2134 sizeof(cp->val), &cp->val);
2136 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2138 mgmt_pending_remove(cmd);
2143 hci_dev_unlock(hdev);
2147 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2149 struct mgmt_mode *cp = data;
2154 BT_DBG("request for %s", hdev->name);
2156 status = mgmt_bredr_support(hdev);
2158 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2160 if (!lmp_ssp_capable(hdev))
2161 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2162 MGMT_STATUS_NOT_SUPPORTED);
2164 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2165 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2166 MGMT_STATUS_REJECTED);
2168 if (cp->val != 0x00 && cp->val != 0x01)
2169 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2170 MGMT_STATUS_INVALID_PARAMS);
2175 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2177 if (hdev_is_powered(hdev)) {
2178 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2179 MGMT_STATUS_REJECTED);
2183 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2186 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2191 err = new_settings(hdev, sk);
2194 hci_dev_unlock(hdev);
2198 static void le_enable_complete(struct hci_dev *hdev, u8 status)
2200 struct cmd_lookup match = { NULL, hdev };
2203 u8 mgmt_err = mgmt_status(status);
2205 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2210 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2212 new_settings(hdev, match.sk);
2217 /* Make sure the controller has a good default for
2218 * advertising data. Restrict the update to when LE
2219 * has actually been enabled. During power on, the
2220 * update in powered_update_hci will take care of it.
2222 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2223 struct hci_request req;
2227 hci_req_init(&req, hdev);
2228 update_adv_data(&req);
2229 update_scan_rsp_data(&req);
2230 hci_req_run(&req, NULL);
2232 hci_update_background_scan(hdev);
2234 hci_dev_unlock(hdev);
2238 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2240 struct mgmt_mode *cp = data;
2241 struct hci_cp_write_le_host_supported hci_cp;
2242 struct pending_cmd *cmd;
2243 struct hci_request req;
2247 BT_DBG("request for %s", hdev->name);
2249 if (!lmp_le_capable(hdev))
2250 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2251 MGMT_STATUS_NOT_SUPPORTED);
2253 if (cp->val != 0x00 && cp->val != 0x01)
2254 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2255 MGMT_STATUS_INVALID_PARAMS);
2257 /* LE-only devices do not allow toggling LE on/off */
2258 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2259 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2260 MGMT_STATUS_REJECTED);
2265 enabled = lmp_host_le_capable(hdev);
2267 if (!hdev_is_powered(hdev) || val == enabled) {
2268 bool changed = false;
2270 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2271 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2275 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2276 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2280 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2285 err = new_settings(hdev, sk);
2290 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2291 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2292 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2297 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2303 hci_req_init(&req, hdev);
2305 memset(&hci_cp, 0, sizeof(hci_cp));
2309 hci_cp.simul = 0x00;
2311 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2312 disable_advertising(&req);
2315 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2318 err = hci_req_run(&req, le_enable_complete);
2320 mgmt_pending_remove(cmd);
2323 hci_dev_unlock(hdev);
2327 /* This is a helper function to test for pending mgmt commands that can
2328 * cause CoD or EIR HCI commands. We can only allow one such pending
2329 * mgmt command at a time since otherwise we cannot easily track what
2330 * the current values are, will be, and based on that calculate if a new
2331 * HCI command needs to be sent and if yes with what value.
2333 static bool pending_eir_or_class(struct hci_dev *hdev)
2335 struct pending_cmd *cmd;
2337 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2338 switch (cmd->opcode) {
2339 case MGMT_OP_ADD_UUID:
2340 case MGMT_OP_REMOVE_UUID:
2341 case MGMT_OP_SET_DEV_CLASS:
2342 case MGMT_OP_SET_POWERED:
2350 static const u8 bluetooth_base_uuid[] = {
2351 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2352 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2355 static u8 get_uuid_size(const u8 *uuid)
2359 if (memcmp(uuid, bluetooth_base_uuid, 12))
2362 val = get_unaligned_le32(&uuid[12]);
2369 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2371 struct pending_cmd *cmd;
2375 cmd = mgmt_pending_find(mgmt_op, hdev);
2379 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2380 hdev->dev_class, 3);
2382 mgmt_pending_remove(cmd);
2385 hci_dev_unlock(hdev);
2388 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2390 BT_DBG("status 0x%02x", status);
2392 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2395 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2397 struct mgmt_cp_add_uuid *cp = data;
2398 struct pending_cmd *cmd;
2399 struct hci_request req;
2400 struct bt_uuid *uuid;
2403 BT_DBG("request for %s", hdev->name);
2407 if (pending_eir_or_class(hdev)) {
2408 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2413 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2419 memcpy(uuid->uuid, cp->uuid, 16);
2420 uuid->svc_hint = cp->svc_hint;
2421 uuid->size = get_uuid_size(cp->uuid);
2423 list_add_tail(&uuid->list, &hdev->uuids);
2425 hci_req_init(&req, hdev);
2430 err = hci_req_run(&req, add_uuid_complete);
2432 if (err != -ENODATA)
2435 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2436 hdev->dev_class, 3);
2440 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2449 hci_dev_unlock(hdev);
2453 static bool enable_service_cache(struct hci_dev *hdev)
2455 if (!hdev_is_powered(hdev))
2458 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2459 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2467 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2469 BT_DBG("status 0x%02x", status);
2471 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2474 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2477 struct mgmt_cp_remove_uuid *cp = data;
2478 struct pending_cmd *cmd;
2479 struct bt_uuid *match, *tmp;
2480 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2481 struct hci_request req;
2484 BT_DBG("request for %s", hdev->name);
2488 if (pending_eir_or_class(hdev)) {
2489 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2494 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2495 hci_uuids_clear(hdev);
2497 if (enable_service_cache(hdev)) {
2498 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2499 0, hdev->dev_class, 3);
2508 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2509 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2512 list_del(&match->list);
2518 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2519 MGMT_STATUS_INVALID_PARAMS);
2524 hci_req_init(&req, hdev);
2529 err = hci_req_run(&req, remove_uuid_complete);
2531 if (err != -ENODATA)
2534 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2535 hdev->dev_class, 3);
2539 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2548 hci_dev_unlock(hdev);
2552 static void set_class_complete(struct hci_dev *hdev, u8 status)
2554 BT_DBG("status 0x%02x", status);
2556 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2559 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2562 struct mgmt_cp_set_dev_class *cp = data;
2563 struct pending_cmd *cmd;
2564 struct hci_request req;
2567 BT_DBG("request for %s", hdev->name);
2569 if (!lmp_bredr_capable(hdev))
2570 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2571 MGMT_STATUS_NOT_SUPPORTED);
2575 if (pending_eir_or_class(hdev)) {
2576 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2581 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2582 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2583 MGMT_STATUS_INVALID_PARAMS);
2587 hdev->major_class = cp->major;
2588 hdev->minor_class = cp->minor;
2590 if (!hdev_is_powered(hdev)) {
2591 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2592 hdev->dev_class, 3);
2596 hci_req_init(&req, hdev);
2598 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2599 hci_dev_unlock(hdev);
2600 cancel_delayed_work_sync(&hdev->service_cache);
2607 err = hci_req_run(&req, set_class_complete);
2609 if (err != -ENODATA)
2612 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2613 hdev->dev_class, 3);
2617 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2626 hci_dev_unlock(hdev);
2630 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2633 struct mgmt_cp_load_link_keys *cp = data;
2634 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2635 sizeof(struct mgmt_link_key_info));
2636 u16 key_count, expected_len;
2640 BT_DBG("request for %s", hdev->name);
2642 if (!lmp_bredr_capable(hdev))
2643 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2644 MGMT_STATUS_NOT_SUPPORTED);
2646 key_count = __le16_to_cpu(cp->key_count);
2647 if (key_count > max_key_count) {
2648 BT_ERR("load_link_keys: too big key_count value %u",
2650 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2651 MGMT_STATUS_INVALID_PARAMS);
2654 expected_len = sizeof(*cp) + key_count *
2655 sizeof(struct mgmt_link_key_info);
2656 if (expected_len != len) {
2657 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2659 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2660 MGMT_STATUS_INVALID_PARAMS);
2663 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2664 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2665 MGMT_STATUS_INVALID_PARAMS);
2667 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2670 for (i = 0; i < key_count; i++) {
2671 struct mgmt_link_key_info *key = &cp->keys[i];
2673 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2674 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2675 MGMT_STATUS_INVALID_PARAMS);
2680 hci_link_keys_clear(hdev);
2683 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2686 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2690 new_settings(hdev, NULL);
2692 for (i = 0; i < key_count; i++) {
2693 struct mgmt_link_key_info *key = &cp->keys[i];
2695 /* Always ignore debug keys and require a new pairing if
2696 * the user wants to use them.
2698 if (key->type == HCI_LK_DEBUG_COMBINATION)
2701 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2702 key->type, key->pin_len, NULL);
2705 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2707 hci_dev_unlock(hdev);
2712 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2713 u8 addr_type, struct sock *skip_sk)
2715 struct mgmt_ev_device_unpaired ev;
2717 bacpy(&ev.addr.bdaddr, bdaddr);
2718 ev.addr.type = addr_type;
2720 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2724 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2727 struct mgmt_cp_unpair_device *cp = data;
2728 struct mgmt_rp_unpair_device rp;
2729 struct hci_cp_disconnect dc;
2730 struct pending_cmd *cmd;
2731 struct hci_conn *conn;
2734 memset(&rp, 0, sizeof(rp));
2735 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2736 rp.addr.type = cp->addr.type;
2738 if (!bdaddr_type_is_valid(cp->addr.type))
2739 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2740 MGMT_STATUS_INVALID_PARAMS,
2743 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2744 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2745 MGMT_STATUS_INVALID_PARAMS,
2750 if (!hdev_is_powered(hdev)) {
2751 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2752 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2756 if (cp->addr.type == BDADDR_BREDR) {
2757 /* If disconnection is requested, then look up the
2758 * connection. If the remote device is connected, it
2759 * will be later used to terminate the link.
2761 * Setting it to NULL explicitly will cause no
2762 * termination of the link.
2765 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2770 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2774 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2777 /* Defer clearing up the connection parameters
2778 * until closing to give a chance of keeping
2779 * them if a repairing happens.
2781 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2783 /* If disconnection is not requested, then
2784 * clear the connection variable so that the
2785 * link is not terminated.
2787 if (!cp->disconnect)
2791 if (cp->addr.type == BDADDR_LE_PUBLIC)
2792 addr_type = ADDR_LE_DEV_PUBLIC;
2794 addr_type = ADDR_LE_DEV_RANDOM;
2796 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2798 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2802 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2803 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2807 /* If the connection variable is set, then termination of the
2808 * link is requested.
2811 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2813 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2817 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2824 dc.handle = cpu_to_le16(conn->handle);
2825 dc.reason = 0x13; /* Remote User Terminated Connection */
2826 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2828 mgmt_pending_remove(cmd);
2831 hci_dev_unlock(hdev);
2835 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2838 struct mgmt_cp_disconnect *cp = data;
2839 struct mgmt_rp_disconnect rp;
2840 struct pending_cmd *cmd;
2841 struct hci_conn *conn;
2846 memset(&rp, 0, sizeof(rp));
2847 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2848 rp.addr.type = cp->addr.type;
2850 if (!bdaddr_type_is_valid(cp->addr.type))
2851 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2852 MGMT_STATUS_INVALID_PARAMS,
2857 if (!test_bit(HCI_UP, &hdev->flags)) {
2858 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2859 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2863 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2864 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2865 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2869 if (cp->addr.type == BDADDR_BREDR)
2870 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2873 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2875 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2876 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2877 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2881 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2887 cmd->cmd_complete = generic_cmd_complete;
2889 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2891 mgmt_pending_remove(cmd);
2894 hci_dev_unlock(hdev);
2898 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2900 switch (link_type) {
2902 switch (addr_type) {
2903 case ADDR_LE_DEV_PUBLIC:
2904 return BDADDR_LE_PUBLIC;
2907 /* Fallback to LE Random address type */
2908 return BDADDR_LE_RANDOM;
2912 /* Fallback to BR/EDR type */
2913 return BDADDR_BREDR;
2917 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2920 struct mgmt_rp_get_connections *rp;
2930 if (!hdev_is_powered(hdev)) {
2931 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2932 MGMT_STATUS_NOT_POWERED);
2937 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2938 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2942 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2943 rp = kmalloc(rp_len, GFP_KERNEL);
2950 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2951 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2953 bacpy(&rp->addr[i].bdaddr, &c->dst);
2954 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2955 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2960 rp->conn_count = cpu_to_le16(i);
2962 /* Recalculate length in case of filtered SCO connections, etc */
2963 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2965 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2971 hci_dev_unlock(hdev);
2975 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2976 struct mgmt_cp_pin_code_neg_reply *cp)
2978 struct pending_cmd *cmd;
2981 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2986 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2987 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2989 mgmt_pending_remove(cmd);
2994 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2997 struct hci_conn *conn;
2998 struct mgmt_cp_pin_code_reply *cp = data;
2999 struct hci_cp_pin_code_reply reply;
3000 struct pending_cmd *cmd;
3007 if (!hdev_is_powered(hdev)) {
3008 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3009 MGMT_STATUS_NOT_POWERED);
3013 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3015 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3016 MGMT_STATUS_NOT_CONNECTED);
3020 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3021 struct mgmt_cp_pin_code_neg_reply ncp;
3023 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3025 BT_ERR("PIN code is not 16 bytes long");
3027 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3029 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3030 MGMT_STATUS_INVALID_PARAMS);
3035 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3041 cmd->cmd_complete = addr_cmd_complete;
3043 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3044 reply.pin_len = cp->pin_len;
3045 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3047 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3049 mgmt_pending_remove(cmd);
3052 hci_dev_unlock(hdev);
3056 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3059 struct mgmt_cp_set_io_capability *cp = data;
3063 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3064 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3065 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3069 hdev->io_capability = cp->io_capability;
3071 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3072 hdev->io_capability);
3074 hci_dev_unlock(hdev);
3076 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
3080 static struct pending_cmd *find_pairing(struct hci_conn *conn)
3082 struct hci_dev *hdev = conn->hdev;
3083 struct pending_cmd *cmd;
3085 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3086 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3089 if (cmd->user_data != conn)
3098 static void pairing_complete(struct pending_cmd *cmd, u8 status)
3100 struct mgmt_rp_pair_device rp;
3101 struct hci_conn *conn = cmd->user_data;
3103 bacpy(&rp.addr.bdaddr, &conn->dst);
3104 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3106 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3109 /* So we don't get further callbacks for this connection */
3110 conn->connect_cfm_cb = NULL;
3111 conn->security_cfm_cb = NULL;
3112 conn->disconn_cfm_cb = NULL;
3114 hci_conn_drop(conn);
3117 mgmt_pending_remove(cmd);
3119 /* The device is paired so there is no need to remove
3120 * its connection parameters anymore.
3122 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3125 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3127 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3128 struct pending_cmd *cmd;
3130 cmd = find_pairing(conn);
3132 cmd->cmd_complete(cmd, status);
3135 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3137 struct pending_cmd *cmd;
3139 BT_DBG("status %u", status);
3141 cmd = find_pairing(conn);
3143 BT_DBG("Unable to find a pending command");
3145 cmd->cmd_complete(cmd, mgmt_status(status));
3148 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3150 struct pending_cmd *cmd;
3152 BT_DBG("status %u", status);
3157 cmd = find_pairing(conn);
3159 BT_DBG("Unable to find a pending command");
3161 cmd->cmd_complete(cmd, mgmt_status(status));
3164 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3167 struct mgmt_cp_pair_device *cp = data;
3168 struct mgmt_rp_pair_device rp;
3169 struct pending_cmd *cmd;
3170 u8 sec_level, auth_type;
3171 struct hci_conn *conn;
3176 memset(&rp, 0, sizeof(rp));
3177 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3178 rp.addr.type = cp->addr.type;
3180 if (!bdaddr_type_is_valid(cp->addr.type))
3181 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3182 MGMT_STATUS_INVALID_PARAMS,
3185 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3186 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3187 MGMT_STATUS_INVALID_PARAMS,
3192 if (!hdev_is_powered(hdev)) {
3193 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3194 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3198 sec_level = BT_SECURITY_MEDIUM;
3199 auth_type = HCI_AT_DEDICATED_BONDING;
3201 if (cp->addr.type == BDADDR_BREDR) {
3202 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3207 /* Convert from L2CAP channel address type to HCI address type
3209 if (cp->addr.type == BDADDR_LE_PUBLIC)
3210 addr_type = ADDR_LE_DEV_PUBLIC;
3212 addr_type = ADDR_LE_DEV_RANDOM;
3214 /* When pairing a new device, it is expected to remember
3215 * this device for future connections. Adding the connection
3216 * parameter information ahead of time allows tracking
3217 * of the slave preferred values and will speed up any
3218 * further connection establishment.
3220 * If connection parameters already exist, then they
3221 * will be kept and this function does nothing.
3223 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3225 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3226 sec_level, HCI_LE_CONN_TIMEOUT,
3233 if (PTR_ERR(conn) == -EBUSY)
3234 status = MGMT_STATUS_BUSY;
3236 status = MGMT_STATUS_CONNECT_FAILED;
3238 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3244 if (conn->connect_cfm_cb) {
3245 hci_conn_drop(conn);
3246 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3247 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3251 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3254 hci_conn_drop(conn);
3258 cmd->cmd_complete = pairing_complete;
3260 /* For LE, just connecting isn't a proof that the pairing finished */
3261 if (cp->addr.type == BDADDR_BREDR) {
3262 conn->connect_cfm_cb = pairing_complete_cb;
3263 conn->security_cfm_cb = pairing_complete_cb;
3264 conn->disconn_cfm_cb = pairing_complete_cb;
3266 conn->connect_cfm_cb = le_pairing_complete_cb;
3267 conn->security_cfm_cb = le_pairing_complete_cb;
3268 conn->disconn_cfm_cb = le_pairing_complete_cb;
3271 conn->io_capability = cp->io_cap;
3272 cmd->user_data = hci_conn_get(conn);
3274 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3275 hci_conn_security(conn, sec_level, auth_type, true))
3276 pairing_complete(cmd, 0);
3281 hci_dev_unlock(hdev);
3285 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3288 struct mgmt_addr_info *addr = data;
3289 struct pending_cmd *cmd;
3290 struct hci_conn *conn;
3297 if (!hdev_is_powered(hdev)) {
3298 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3299 MGMT_STATUS_NOT_POWERED);
3303 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3305 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3306 MGMT_STATUS_INVALID_PARAMS);
3310 conn = cmd->user_data;
3312 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3313 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3314 MGMT_STATUS_INVALID_PARAMS);
3318 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
3320 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3321 addr, sizeof(*addr));
3323 hci_dev_unlock(hdev);
3327 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3328 struct mgmt_addr_info *addr, u16 mgmt_op,
3329 u16 hci_op, __le32 passkey)
3331 struct pending_cmd *cmd;
3332 struct hci_conn *conn;
3337 if (!hdev_is_powered(hdev)) {
3338 err = cmd_complete(sk, hdev->id, mgmt_op,
3339 MGMT_STATUS_NOT_POWERED, addr,
3344 if (addr->type == BDADDR_BREDR)
3345 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3347 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3350 err = cmd_complete(sk, hdev->id, mgmt_op,
3351 MGMT_STATUS_NOT_CONNECTED, addr,
3356 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3357 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3359 err = cmd_complete(sk, hdev->id, mgmt_op,
3360 MGMT_STATUS_SUCCESS, addr,
3363 err = cmd_complete(sk, hdev->id, mgmt_op,
3364 MGMT_STATUS_FAILED, addr,
3370 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3376 cmd->cmd_complete = addr_cmd_complete;
3378 /* Continue with pairing via HCI */
3379 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3380 struct hci_cp_user_passkey_reply cp;
3382 bacpy(&cp.bdaddr, &addr->bdaddr);
3383 cp.passkey = passkey;
3384 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3386 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3390 mgmt_pending_remove(cmd);
3393 hci_dev_unlock(hdev);
3397 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3398 void *data, u16 len)
3400 struct mgmt_cp_pin_code_neg_reply *cp = data;
3404 return user_pairing_resp(sk, hdev, &cp->addr,
3405 MGMT_OP_PIN_CODE_NEG_REPLY,
3406 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3409 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3412 struct mgmt_cp_user_confirm_reply *cp = data;
3416 if (len != sizeof(*cp))
3417 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3418 MGMT_STATUS_INVALID_PARAMS);
3420 return user_pairing_resp(sk, hdev, &cp->addr,
3421 MGMT_OP_USER_CONFIRM_REPLY,
3422 HCI_OP_USER_CONFIRM_REPLY, 0);
3425 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3426 void *data, u16 len)
3428 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3432 return user_pairing_resp(sk, hdev, &cp->addr,
3433 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3434 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3437 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3440 struct mgmt_cp_user_passkey_reply *cp = data;
3444 return user_pairing_resp(sk, hdev, &cp->addr,
3445 MGMT_OP_USER_PASSKEY_REPLY,
3446 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3449 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3450 void *data, u16 len)
3452 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3456 return user_pairing_resp(sk, hdev, &cp->addr,
3457 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3458 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3461 static void update_name(struct hci_request *req)
3463 struct hci_dev *hdev = req->hdev;
3464 struct hci_cp_write_local_name cp;
3466 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3468 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3471 static void set_name_complete(struct hci_dev *hdev, u8 status)
3473 struct mgmt_cp_set_local_name *cp;
3474 struct pending_cmd *cmd;
3476 BT_DBG("status 0x%02x", status);
3480 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3487 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3488 mgmt_status(status));
3490 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3493 mgmt_pending_remove(cmd);
3496 hci_dev_unlock(hdev);
3499 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3502 struct mgmt_cp_set_local_name *cp = data;
3503 struct pending_cmd *cmd;
3504 struct hci_request req;
3511 /* If the old values are the same as the new ones just return a
3512 * direct command complete event.
3514 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3515 !memcmp(hdev->short_name, cp->short_name,
3516 sizeof(hdev->short_name))) {
3517 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3522 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3524 if (!hdev_is_powered(hdev)) {
3525 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3527 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3532 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3538 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3544 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3546 hci_req_init(&req, hdev);
3548 if (lmp_bredr_capable(hdev)) {
3553 /* The name is stored in the scan response data and so
3554 * no need to udpate the advertising data here.
3556 if (lmp_le_capable(hdev))
3557 update_scan_rsp_data(&req);
3559 err = hci_req_run(&req, set_name_complete);
3561 mgmt_pending_remove(cmd);
3564 hci_dev_unlock(hdev);
3568 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3569 void *data, u16 data_len)
3571 struct pending_cmd *cmd;
3574 BT_DBG("%s", hdev->name);
3578 if (!hdev_is_powered(hdev)) {
3579 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3580 MGMT_STATUS_NOT_POWERED);
3584 if (!lmp_ssp_capable(hdev)) {
3585 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3586 MGMT_STATUS_NOT_SUPPORTED);
3590 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3591 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3596 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3602 if (bredr_sc_enabled(hdev))
3603 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3606 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3609 mgmt_pending_remove(cmd);
3612 hci_dev_unlock(hdev);
3616 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3617 void *data, u16 len)
3621 BT_DBG("%s ", hdev->name);
3625 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3626 struct mgmt_cp_add_remote_oob_data *cp = data;
3629 if (cp->addr.type != BDADDR_BREDR) {
3630 err = cmd_complete(sk, hdev->id,
3631 MGMT_OP_ADD_REMOTE_OOB_DATA,
3632 MGMT_STATUS_INVALID_PARAMS,
3633 &cp->addr, sizeof(cp->addr));
3637 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3638 cp->addr.type, cp->hash,
3639 cp->rand, NULL, NULL);
3641 status = MGMT_STATUS_FAILED;
3643 status = MGMT_STATUS_SUCCESS;
3645 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3646 status, &cp->addr, sizeof(cp->addr));
3647 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3648 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3649 u8 *rand192, *hash192;
3652 if (cp->addr.type != BDADDR_BREDR) {
3653 err = cmd_complete(sk, hdev->id,
3654 MGMT_OP_ADD_REMOTE_OOB_DATA,
3655 MGMT_STATUS_INVALID_PARAMS,
3656 &cp->addr, sizeof(cp->addr));
3660 if (bdaddr_type_is_le(cp->addr.type)) {
3664 rand192 = cp->rand192;
3665 hash192 = cp->hash192;
3668 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3669 cp->addr.type, hash192, rand192,
3670 cp->hash256, cp->rand256);
3672 status = MGMT_STATUS_FAILED;
3674 status = MGMT_STATUS_SUCCESS;
3676 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3677 status, &cp->addr, sizeof(cp->addr));
3679 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3680 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3681 MGMT_STATUS_INVALID_PARAMS);
3685 hci_dev_unlock(hdev);
3689 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3690 void *data, u16 len)
3692 struct mgmt_cp_remove_remote_oob_data *cp = data;
3696 BT_DBG("%s", hdev->name);
3698 if (cp->addr.type != BDADDR_BREDR)
3699 return cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3700 MGMT_STATUS_INVALID_PARAMS,
3701 &cp->addr, sizeof(cp->addr));
3705 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3706 hci_remote_oob_data_clear(hdev);
3707 status = MGMT_STATUS_SUCCESS;
3711 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3713 status = MGMT_STATUS_INVALID_PARAMS;
3715 status = MGMT_STATUS_SUCCESS;
3718 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3719 status, &cp->addr, sizeof(cp->addr));
3721 hci_dev_unlock(hdev);
3725 static bool trigger_discovery(struct hci_request *req, u8 *status)
3727 struct hci_dev *hdev = req->hdev;
3728 struct hci_cp_le_set_scan_param param_cp;
3729 struct hci_cp_le_set_scan_enable enable_cp;
3730 struct hci_cp_inquiry inq_cp;
3731 /* General inquiry access code (GIAC) */
3732 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3736 switch (hdev->discovery.type) {
3737 case DISCOV_TYPE_BREDR:
3738 *status = mgmt_bredr_support(hdev);
3742 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3743 *status = MGMT_STATUS_BUSY;
3747 hci_inquiry_cache_flush(hdev);
3749 memset(&inq_cp, 0, sizeof(inq_cp));
3750 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3751 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3752 hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3755 case DISCOV_TYPE_LE:
3756 case DISCOV_TYPE_INTERLEAVED:
3757 *status = mgmt_le_support(hdev);
3761 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3762 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3763 *status = MGMT_STATUS_NOT_SUPPORTED;
3767 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3768 /* Don't let discovery abort an outgoing
3769 * connection attempt that's using directed
3772 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3774 *status = MGMT_STATUS_REJECTED;
3778 disable_advertising(req);
3781 /* If controller is scanning, it means the background scanning
3782 * is running. Thus, we should temporarily stop it in order to
3783 * set the discovery scanning parameters.
3785 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3786 hci_req_add_le_scan_disable(req);
3788 memset(¶m_cp, 0, sizeof(param_cp));
3790 /* All active scans will be done with either a resolvable
3791 * private address (when privacy feature has been enabled)
3792 * or unresolvable private address.
3794 err = hci_update_random_address(req, true, &own_addr_type);
3796 *status = MGMT_STATUS_FAILED;
3800 param_cp.type = LE_SCAN_ACTIVE;
3801 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3802 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3803 param_cp.own_address_type = own_addr_type;
3804 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3807 memset(&enable_cp, 0, sizeof(enable_cp));
3808 enable_cp.enable = LE_SCAN_ENABLE;
3809 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3810 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3815 *status = MGMT_STATUS_INVALID_PARAMS;
3822 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3824 struct pending_cmd *cmd;
3825 unsigned long timeout;
3827 BT_DBG("status %d", status);
3831 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3833 cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3836 u8 type = hdev->discovery.type;
3838 cmd_complete(cmd->sk, hdev->id, cmd->opcode,
3839 mgmt_status(status), &type, sizeof(type));
3840 mgmt_pending_remove(cmd);
3844 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3848 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3850 switch (hdev->discovery.type) {
3851 case DISCOV_TYPE_LE:
3852 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3854 case DISCOV_TYPE_INTERLEAVED:
3855 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3857 case DISCOV_TYPE_BREDR:
3861 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3867 queue_delayed_work(hdev->workqueue,
3868 &hdev->le_scan_disable, timeout);
3871 hci_dev_unlock(hdev);
3874 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3875 void *data, u16 len)
3877 struct mgmt_cp_start_discovery *cp = data;
3878 struct pending_cmd *cmd;
3879 struct hci_request req;
3883 BT_DBG("%s", hdev->name);
3887 if (!hdev_is_powered(hdev)) {
3888 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3889 MGMT_STATUS_NOT_POWERED,
3890 &cp->type, sizeof(cp->type));
3894 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3895 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3896 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3897 MGMT_STATUS_BUSY, &cp->type,
3902 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3908 /* Clear the discovery filter first to free any previously
3909 * allocated memory for the UUID list.
3911 hci_discovery_filter_clear(hdev);
3913 hdev->discovery.type = cp->type;
3915 hci_req_init(&req, hdev);
3917 if (!trigger_discovery(&req, &status)) {
3918 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3919 status, &cp->type, sizeof(cp->type));
3920 mgmt_pending_remove(cmd);
3924 err = hci_req_run(&req, start_discovery_complete);
3926 mgmt_pending_remove(cmd);
3930 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3933 hci_dev_unlock(hdev);
3937 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
3938 void *data, u16 len)
3940 struct mgmt_cp_start_service_discovery *cp = data;
3941 struct pending_cmd *cmd;
3942 struct hci_request req;
3943 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
3944 u16 uuid_count, expected_len;
3948 BT_DBG("%s", hdev->name);
3952 if (!hdev_is_powered(hdev)) {
3953 err = cmd_complete(sk, hdev->id,
3954 MGMT_OP_START_SERVICE_DISCOVERY,
3955 MGMT_STATUS_NOT_POWERED,
3956 &cp->type, sizeof(cp->type));
3960 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3961 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3962 err = cmd_complete(sk, hdev->id,
3963 MGMT_OP_START_SERVICE_DISCOVERY,
3964 MGMT_STATUS_BUSY, &cp->type,
3969 uuid_count = __le16_to_cpu(cp->uuid_count);
3970 if (uuid_count > max_uuid_count) {
3971 BT_ERR("service_discovery: too big uuid_count value %u",
3973 err = cmd_complete(sk, hdev->id,
3974 MGMT_OP_START_SERVICE_DISCOVERY,
3975 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3980 expected_len = sizeof(*cp) + uuid_count * 16;
3981 if (expected_len != len) {
3982 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
3984 err = cmd_complete(sk, hdev->id,
3985 MGMT_OP_START_SERVICE_DISCOVERY,
3986 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3991 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
3998 /* Clear the discovery filter first to free any previously
3999 * allocated memory for the UUID list.
4001 hci_discovery_filter_clear(hdev);
4003 hdev->discovery.type = cp->type;
4004 hdev->discovery.rssi = cp->rssi;
4005 hdev->discovery.uuid_count = uuid_count;
4007 if (uuid_count > 0) {
4008 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4010 if (!hdev->discovery.uuids) {
4011 err = cmd_complete(sk, hdev->id,
4012 MGMT_OP_START_SERVICE_DISCOVERY,
4014 &cp->type, sizeof(cp->type));
4015 mgmt_pending_remove(cmd);
4020 hci_req_init(&req, hdev);
4022 if (!trigger_discovery(&req, &status)) {
4023 err = cmd_complete(sk, hdev->id,
4024 MGMT_OP_START_SERVICE_DISCOVERY,
4025 status, &cp->type, sizeof(cp->type));
4026 mgmt_pending_remove(cmd);
4030 err = hci_req_run(&req, start_discovery_complete);
4032 mgmt_pending_remove(cmd);
4036 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4039 hci_dev_unlock(hdev);
4043 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
4045 struct pending_cmd *cmd;
4047 BT_DBG("status %d", status);
4051 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4053 u8 type = hdev->discovery.type;
4055 cmd_complete(cmd->sk, hdev->id, cmd->opcode,
4056 mgmt_status(status), &type, sizeof(type));
4057 mgmt_pending_remove(cmd);
4061 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4063 hci_dev_unlock(hdev);
4066 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4069 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4070 struct pending_cmd *cmd;
4071 struct hci_request req;
4074 BT_DBG("%s", hdev->name);
4078 if (!hci_discovery_active(hdev)) {
4079 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4080 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4081 sizeof(mgmt_cp->type));
4085 if (hdev->discovery.type != mgmt_cp->type) {
4086 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4087 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
4088 sizeof(mgmt_cp->type));
4092 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
4098 hci_req_init(&req, hdev);
4100 hci_stop_discovery(&req);
4102 err = hci_req_run(&req, stop_discovery_complete);
4104 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4108 mgmt_pending_remove(cmd);
4110 /* If no HCI commands were sent we're done */
4111 if (err == -ENODATA) {
4112 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4113 &mgmt_cp->type, sizeof(mgmt_cp->type));
4114 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4118 hci_dev_unlock(hdev);
4122 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4125 struct mgmt_cp_confirm_name *cp = data;
4126 struct inquiry_entry *e;
4129 BT_DBG("%s", hdev->name);
4133 if (!hci_discovery_active(hdev)) {
4134 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4135 MGMT_STATUS_FAILED, &cp->addr,
4140 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4142 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4143 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4148 if (cp->name_known) {
4149 e->name_state = NAME_KNOWN;
4152 e->name_state = NAME_NEEDED;
4153 hci_inquiry_cache_update_resolve(hdev, e);
4156 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
4160 hci_dev_unlock(hdev);
4164 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4167 struct mgmt_cp_block_device *cp = data;
4171 BT_DBG("%s", hdev->name);
4173 if (!bdaddr_type_is_valid(cp->addr.type))
4174 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4175 MGMT_STATUS_INVALID_PARAMS,
4176 &cp->addr, sizeof(cp->addr));
4180 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4183 status = MGMT_STATUS_FAILED;
4187 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4189 status = MGMT_STATUS_SUCCESS;
4192 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4193 &cp->addr, sizeof(cp->addr));
4195 hci_dev_unlock(hdev);
4200 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4203 struct mgmt_cp_unblock_device *cp = data;
4207 BT_DBG("%s", hdev->name);
4209 if (!bdaddr_type_is_valid(cp->addr.type))
4210 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4211 MGMT_STATUS_INVALID_PARAMS,
4212 &cp->addr, sizeof(cp->addr));
4216 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4219 status = MGMT_STATUS_INVALID_PARAMS;
4223 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4225 status = MGMT_STATUS_SUCCESS;
4228 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4229 &cp->addr, sizeof(cp->addr));
4231 hci_dev_unlock(hdev);
4236 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4239 struct mgmt_cp_set_device_id *cp = data;
4240 struct hci_request req;
4244 BT_DBG("%s", hdev->name);
4246 source = __le16_to_cpu(cp->source);
4248 if (source > 0x0002)
4249 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4250 MGMT_STATUS_INVALID_PARAMS);
4254 hdev->devid_source = source;
4255 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4256 hdev->devid_product = __le16_to_cpu(cp->product);
4257 hdev->devid_version = __le16_to_cpu(cp->version);
4259 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4261 hci_req_init(&req, hdev);
4263 hci_req_run(&req, NULL);
4265 hci_dev_unlock(hdev);
4270 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
4272 struct cmd_lookup match = { NULL, hdev };
4275 u8 mgmt_err = mgmt_status(status);
4277 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4278 cmd_status_rsp, &mgmt_err);
4282 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4283 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4285 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4287 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4290 new_settings(hdev, match.sk);
4296 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4299 struct mgmt_mode *cp = data;
4300 struct pending_cmd *cmd;
4301 struct hci_request req;
4302 u8 val, enabled, status;
4305 BT_DBG("request for %s", hdev->name);
4307 status = mgmt_le_support(hdev);
4309 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4312 if (cp->val != 0x00 && cp->val != 0x01)
4313 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4314 MGMT_STATUS_INVALID_PARAMS);
4319 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4321 /* The following conditions are ones which mean that we should
4322 * not do any HCI communication but directly send a mgmt
4323 * response to user space (after toggling the flag if
4326 if (!hdev_is_powered(hdev) || val == enabled ||
4327 hci_conn_num(hdev, LE_LINK) > 0 ||
4328 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4329 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4330 bool changed = false;
4332 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4333 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4337 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4342 err = new_settings(hdev, sk);
4347 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4348 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4349 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4354 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4360 hci_req_init(&req, hdev);
4363 enable_advertising(&req);
4365 disable_advertising(&req);
4367 err = hci_req_run(&req, set_advertising_complete);
4369 mgmt_pending_remove(cmd);
4372 hci_dev_unlock(hdev);
4376 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4377 void *data, u16 len)
4379 struct mgmt_cp_set_static_address *cp = data;
4382 BT_DBG("%s", hdev->name);
4384 if (!lmp_le_capable(hdev))
4385 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4386 MGMT_STATUS_NOT_SUPPORTED);
4388 if (hdev_is_powered(hdev))
4389 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4390 MGMT_STATUS_REJECTED);
4392 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4393 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4394 return cmd_status(sk, hdev->id,
4395 MGMT_OP_SET_STATIC_ADDRESS,
4396 MGMT_STATUS_INVALID_PARAMS);
4398 /* Two most significant bits shall be set */
4399 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4400 return cmd_status(sk, hdev->id,
4401 MGMT_OP_SET_STATIC_ADDRESS,
4402 MGMT_STATUS_INVALID_PARAMS);
4407 bacpy(&hdev->static_addr, &cp->bdaddr);
4409 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4411 hci_dev_unlock(hdev);
4416 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4417 void *data, u16 len)
4419 struct mgmt_cp_set_scan_params *cp = data;
4420 __u16 interval, window;
4423 BT_DBG("%s", hdev->name);
4425 if (!lmp_le_capable(hdev))
4426 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4427 MGMT_STATUS_NOT_SUPPORTED);
4429 interval = __le16_to_cpu(cp->interval);
4431 if (interval < 0x0004 || interval > 0x4000)
4432 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4433 MGMT_STATUS_INVALID_PARAMS);
4435 window = __le16_to_cpu(cp->window);
4437 if (window < 0x0004 || window > 0x4000)
4438 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4439 MGMT_STATUS_INVALID_PARAMS);
4441 if (window > interval)
4442 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4443 MGMT_STATUS_INVALID_PARAMS);
4447 hdev->le_scan_interval = interval;
4448 hdev->le_scan_window = window;
4450 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4452 /* If background scan is running, restart it so new parameters are
4455 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4456 hdev->discovery.state == DISCOVERY_STOPPED) {
4457 struct hci_request req;
4459 hci_req_init(&req, hdev);
4461 hci_req_add_le_scan_disable(&req);
4462 hci_req_add_le_passive_scan(&req);
4464 hci_req_run(&req, NULL);
4467 hci_dev_unlock(hdev);
4472 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4474 struct pending_cmd *cmd;
4476 BT_DBG("status 0x%02x", status);
4480 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4485 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4486 mgmt_status(status));
4488 struct mgmt_mode *cp = cmd->param;
4491 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4493 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4495 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4496 new_settings(hdev, cmd->sk);
4499 mgmt_pending_remove(cmd);
4502 hci_dev_unlock(hdev);
4505 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4506 void *data, u16 len)
4508 struct mgmt_mode *cp = data;
4509 struct pending_cmd *cmd;
4510 struct hci_request req;
4513 BT_DBG("%s", hdev->name);
4515 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4516 hdev->hci_ver < BLUETOOTH_VER_1_2)
4517 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4518 MGMT_STATUS_NOT_SUPPORTED);
4520 if (cp->val != 0x00 && cp->val != 0x01)
4521 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4522 MGMT_STATUS_INVALID_PARAMS);
4524 if (!hdev_is_powered(hdev))
4525 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4526 MGMT_STATUS_NOT_POWERED);
4528 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4529 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4530 MGMT_STATUS_REJECTED);
4534 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4535 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4540 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4541 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4546 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4553 hci_req_init(&req, hdev);
4555 write_fast_connectable(&req, cp->val);
4557 err = hci_req_run(&req, fast_connectable_complete);
4559 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4560 MGMT_STATUS_FAILED);
4561 mgmt_pending_remove(cmd);
4565 hci_dev_unlock(hdev);
4570 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4572 struct pending_cmd *cmd;
4574 BT_DBG("status 0x%02x", status);
4578 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4583 u8 mgmt_err = mgmt_status(status);
4585 /* We need to restore the flag if related HCI commands
4588 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4590 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4592 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4593 new_settings(hdev, cmd->sk);
4596 mgmt_pending_remove(cmd);
4599 hci_dev_unlock(hdev);
4602 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4604 struct mgmt_mode *cp = data;
4605 struct pending_cmd *cmd;
4606 struct hci_request req;
4609 BT_DBG("request for %s", hdev->name);
4611 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4612 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4613 MGMT_STATUS_NOT_SUPPORTED);
4615 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4616 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4617 MGMT_STATUS_REJECTED);
4619 if (cp->val != 0x00 && cp->val != 0x01)
4620 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4621 MGMT_STATUS_INVALID_PARAMS);
4625 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4626 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4630 if (!hdev_is_powered(hdev)) {
4632 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4633 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4634 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4635 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4636 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4639 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4641 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4645 err = new_settings(hdev, sk);
4649 /* Reject disabling when powered on */
4651 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4652 MGMT_STATUS_REJECTED);
4656 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4657 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4662 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4668 /* We need to flip the bit already here so that update_adv_data
4669 * generates the correct flags.
4671 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4673 hci_req_init(&req, hdev);
4675 write_fast_connectable(&req, false);
4676 hci_update_page_scan(hdev, &req);
4678 /* Since only the advertising data flags will change, there
4679 * is no need to update the scan response data.
4681 update_adv_data(&req);
4683 err = hci_req_run(&req, set_bredr_complete);
4685 mgmt_pending_remove(cmd);
4688 hci_dev_unlock(hdev);
4692 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4693 void *data, u16 len)
4695 struct mgmt_mode *cp = data;
4696 struct pending_cmd *cmd;
4700 BT_DBG("request for %s", hdev->name);
4702 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4703 !lmp_sc_capable(hdev) && !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4704 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4705 MGMT_STATUS_NOT_SUPPORTED);
4707 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4708 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4709 MGMT_STATUS_INVALID_PARAMS);
4713 if (!hdev_is_powered(hdev) ||
4714 (!lmp_sc_capable(hdev) &&
4715 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) ||
4716 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4720 changed = !test_and_set_bit(HCI_SC_ENABLED,
4722 if (cp->val == 0x02)
4723 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4725 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4727 changed = test_and_clear_bit(HCI_SC_ENABLED,
4729 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4732 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4737 err = new_settings(hdev, sk);
4742 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4743 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4750 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4751 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4752 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4756 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4762 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4764 mgmt_pending_remove(cmd);
4768 if (cp->val == 0x02)
4769 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4771 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4774 hci_dev_unlock(hdev);
4778 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4779 void *data, u16 len)
4781 struct mgmt_mode *cp = data;
4782 bool changed, use_changed;
4785 BT_DBG("request for %s", hdev->name);
4787 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4788 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4789 MGMT_STATUS_INVALID_PARAMS);
4794 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4797 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4800 if (cp->val == 0x02)
4801 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4804 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4807 if (hdev_is_powered(hdev) && use_changed &&
4808 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4809 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4810 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4811 sizeof(mode), &mode);
4814 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4819 err = new_settings(hdev, sk);
4822 hci_dev_unlock(hdev);
4826 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4829 struct mgmt_cp_set_privacy *cp = cp_data;
4833 BT_DBG("request for %s", hdev->name);
4835 if (!lmp_le_capable(hdev))
4836 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4837 MGMT_STATUS_NOT_SUPPORTED);
4839 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4840 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4841 MGMT_STATUS_INVALID_PARAMS);
4843 if (hdev_is_powered(hdev))
4844 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4845 MGMT_STATUS_REJECTED);
4849 /* If user space supports this command it is also expected to
4850 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4852 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4855 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4856 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4857 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4859 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4860 memset(hdev->irk, 0, sizeof(hdev->irk));
4861 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4864 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4869 err = new_settings(hdev, sk);
4872 hci_dev_unlock(hdev);
4876 static bool irk_is_valid(struct mgmt_irk_info *irk)
4878 switch (irk->addr.type) {
4879 case BDADDR_LE_PUBLIC:
4882 case BDADDR_LE_RANDOM:
4883 /* Two most significant bits shall be set */
4884 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4892 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4895 struct mgmt_cp_load_irks *cp = cp_data;
4896 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4897 sizeof(struct mgmt_irk_info));
4898 u16 irk_count, expected_len;
4901 BT_DBG("request for %s", hdev->name);
4903 if (!lmp_le_capable(hdev))
4904 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4905 MGMT_STATUS_NOT_SUPPORTED);
4907 irk_count = __le16_to_cpu(cp->irk_count);
4908 if (irk_count > max_irk_count) {
4909 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4910 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4911 MGMT_STATUS_INVALID_PARAMS);
4914 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4915 if (expected_len != len) {
4916 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4918 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4919 MGMT_STATUS_INVALID_PARAMS);
4922 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4924 for (i = 0; i < irk_count; i++) {
4925 struct mgmt_irk_info *key = &cp->irks[i];
4927 if (!irk_is_valid(key))
4928 return cmd_status(sk, hdev->id,
4930 MGMT_STATUS_INVALID_PARAMS);
4935 hci_smp_irks_clear(hdev);
4937 for (i = 0; i < irk_count; i++) {
4938 struct mgmt_irk_info *irk = &cp->irks[i];
4941 if (irk->addr.type == BDADDR_LE_PUBLIC)
4942 addr_type = ADDR_LE_DEV_PUBLIC;
4944 addr_type = ADDR_LE_DEV_RANDOM;
4946 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4950 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4952 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4954 hci_dev_unlock(hdev);
4959 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4961 if (key->master != 0x00 && key->master != 0x01)
4964 switch (key->addr.type) {
4965 case BDADDR_LE_PUBLIC:
4968 case BDADDR_LE_RANDOM:
4969 /* Two most significant bits shall be set */
4970 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4978 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4979 void *cp_data, u16 len)
4981 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4982 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4983 sizeof(struct mgmt_ltk_info));
4984 u16 key_count, expected_len;
4987 BT_DBG("request for %s", hdev->name);
4989 if (!lmp_le_capable(hdev))
4990 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4991 MGMT_STATUS_NOT_SUPPORTED);
4993 key_count = __le16_to_cpu(cp->key_count);
4994 if (key_count > max_key_count) {
4995 BT_ERR("load_ltks: too big key_count value %u", key_count);
4996 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4997 MGMT_STATUS_INVALID_PARAMS);
5000 expected_len = sizeof(*cp) + key_count *
5001 sizeof(struct mgmt_ltk_info);
5002 if (expected_len != len) {
5003 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5005 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5006 MGMT_STATUS_INVALID_PARAMS);
5009 BT_DBG("%s key_count %u", hdev->name, key_count);
5011 for (i = 0; i < key_count; i++) {
5012 struct mgmt_ltk_info *key = &cp->keys[i];
5014 if (!ltk_is_valid(key))
5015 return cmd_status(sk, hdev->id,
5016 MGMT_OP_LOAD_LONG_TERM_KEYS,
5017 MGMT_STATUS_INVALID_PARAMS);
5022 hci_smp_ltks_clear(hdev);
5024 for (i = 0; i < key_count; i++) {
5025 struct mgmt_ltk_info *key = &cp->keys[i];
5026 u8 type, addr_type, authenticated;
5028 if (key->addr.type == BDADDR_LE_PUBLIC)
5029 addr_type = ADDR_LE_DEV_PUBLIC;
5031 addr_type = ADDR_LE_DEV_RANDOM;
5033 switch (key->type) {
5034 case MGMT_LTK_UNAUTHENTICATED:
5035 authenticated = 0x00;
5036 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5038 case MGMT_LTK_AUTHENTICATED:
5039 authenticated = 0x01;
5040 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5042 case MGMT_LTK_P256_UNAUTH:
5043 authenticated = 0x00;
5044 type = SMP_LTK_P256;
5046 case MGMT_LTK_P256_AUTH:
5047 authenticated = 0x01;
5048 type = SMP_LTK_P256;
5050 case MGMT_LTK_P256_DEBUG:
5051 authenticated = 0x00;
5052 type = SMP_LTK_P256_DEBUG;
5057 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5058 authenticated, key->val, key->enc_size, key->ediv,
5062 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5065 hci_dev_unlock(hdev);
5070 struct cmd_conn_lookup {
5071 struct hci_conn *conn;
5072 bool valid_tx_power;
5076 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
5078 struct cmd_conn_lookup *match = data;
5079 struct mgmt_cp_get_conn_info *cp;
5080 struct mgmt_rp_get_conn_info rp;
5081 struct hci_conn *conn = cmd->user_data;
5083 if (conn != match->conn)
5086 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
5088 memset(&rp, 0, sizeof(rp));
5089 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5090 rp.addr.type = cp->addr.type;
5092 if (!match->mgmt_status) {
5093 rp.rssi = conn->rssi;
5095 if (match->valid_tx_power) {
5096 rp.tx_power = conn->tx_power;
5097 rp.max_tx_power = conn->max_tx_power;
5099 rp.tx_power = HCI_TX_POWER_INVALID;
5100 rp.max_tx_power = HCI_TX_POWER_INVALID;
5104 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5105 match->mgmt_status, &rp, sizeof(rp));
5107 hci_conn_drop(conn);
5110 mgmt_pending_remove(cmd);
5113 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
5115 struct hci_cp_read_rssi *cp;
5116 struct hci_conn *conn;
5117 struct cmd_conn_lookup match;
5120 BT_DBG("status 0x%02x", status);
5124 /* TX power data is valid in case request completed successfully,
5125 * otherwise we assume it's not valid. At the moment we assume that
5126 * either both or none of current and max values are valid to keep code
5129 match.valid_tx_power = !status;
5131 /* Commands sent in request are either Read RSSI or Read Transmit Power
5132 * Level so we check which one was last sent to retrieve connection
5133 * handle. Both commands have handle as first parameter so it's safe to
5134 * cast data on the same command struct.
5136 * First command sent is always Read RSSI and we fail only if it fails.
5137 * In other case we simply override error to indicate success as we
5138 * already remembered if TX power value is actually valid.
5140 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5142 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5147 BT_ERR("invalid sent_cmd in response");
5151 handle = __le16_to_cpu(cp->handle);
5152 conn = hci_conn_hash_lookup_handle(hdev, handle);
5154 BT_ERR("unknown handle (%d) in response", handle);
5159 match.mgmt_status = mgmt_status(status);
5161 /* Cache refresh is complete, now reply for mgmt request for given
5164 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
5165 get_conn_info_complete, &match);
5168 hci_dev_unlock(hdev);
5171 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5174 struct mgmt_cp_get_conn_info *cp = data;
5175 struct mgmt_rp_get_conn_info rp;
5176 struct hci_conn *conn;
5177 unsigned long conn_info_age;
5180 BT_DBG("%s", hdev->name);
5182 memset(&rp, 0, sizeof(rp));
5183 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5184 rp.addr.type = cp->addr.type;
5186 if (!bdaddr_type_is_valid(cp->addr.type))
5187 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5188 MGMT_STATUS_INVALID_PARAMS,
5193 if (!hdev_is_powered(hdev)) {
5194 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5195 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5199 if (cp->addr.type == BDADDR_BREDR)
5200 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5203 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5205 if (!conn || conn->state != BT_CONNECTED) {
5206 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5207 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
5211 /* To avoid client trying to guess when to poll again for information we
5212 * calculate conn info age as random value between min/max set in hdev.
5214 conn_info_age = hdev->conn_info_min_age +
5215 prandom_u32_max(hdev->conn_info_max_age -
5216 hdev->conn_info_min_age);
5218 /* Query controller to refresh cached values if they are too old or were
5221 if (time_after(jiffies, conn->conn_info_timestamp +
5222 msecs_to_jiffies(conn_info_age)) ||
5223 !conn->conn_info_timestamp) {
5224 struct hci_request req;
5225 struct hci_cp_read_tx_power req_txp_cp;
5226 struct hci_cp_read_rssi req_rssi_cp;
5227 struct pending_cmd *cmd;
5229 hci_req_init(&req, hdev);
5230 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5231 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5234 /* For LE links TX power does not change thus we don't need to
5235 * query for it once value is known.
5237 if (!bdaddr_type_is_le(cp->addr.type) ||
5238 conn->tx_power == HCI_TX_POWER_INVALID) {
5239 req_txp_cp.handle = cpu_to_le16(conn->handle);
5240 req_txp_cp.type = 0x00;
5241 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5242 sizeof(req_txp_cp), &req_txp_cp);
5245 /* Max TX power needs to be read only once per connection */
5246 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5247 req_txp_cp.handle = cpu_to_le16(conn->handle);
5248 req_txp_cp.type = 0x01;
5249 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5250 sizeof(req_txp_cp), &req_txp_cp);
5253 err = hci_req_run(&req, conn_info_refresh_complete);
5257 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5264 hci_conn_hold(conn);
5265 cmd->user_data = hci_conn_get(conn);
5267 conn->conn_info_timestamp = jiffies;
5269 /* Cache is valid, just reply with values cached in hci_conn */
5270 rp.rssi = conn->rssi;
5271 rp.tx_power = conn->tx_power;
5272 rp.max_tx_power = conn->max_tx_power;
5274 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5275 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5279 hci_dev_unlock(hdev);
5283 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
5285 struct mgmt_cp_get_clock_info *cp;
5286 struct mgmt_rp_get_clock_info rp;
5287 struct hci_cp_read_clock *hci_cp;
5288 struct pending_cmd *cmd;
5289 struct hci_conn *conn;
5291 BT_DBG("%s status %u", hdev->name, status);
5295 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5299 if (hci_cp->which) {
5300 u16 handle = __le16_to_cpu(hci_cp->handle);
5301 conn = hci_conn_hash_lookup_handle(hdev, handle);
5306 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5312 memset(&rp, 0, sizeof(rp));
5313 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
5318 rp.local_clock = cpu_to_le32(hdev->clock);
5321 rp.piconet_clock = cpu_to_le32(conn->clock);
5322 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5326 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
5328 mgmt_pending_remove(cmd);
5330 hci_conn_drop(conn);
5335 hci_dev_unlock(hdev);
5338 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5341 struct mgmt_cp_get_clock_info *cp = data;
5342 struct mgmt_rp_get_clock_info rp;
5343 struct hci_cp_read_clock hci_cp;
5344 struct pending_cmd *cmd;
5345 struct hci_request req;
5346 struct hci_conn *conn;
5349 BT_DBG("%s", hdev->name);
5351 memset(&rp, 0, sizeof(rp));
5352 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5353 rp.addr.type = cp->addr.type;
5355 if (cp->addr.type != BDADDR_BREDR)
5356 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5357 MGMT_STATUS_INVALID_PARAMS,
5362 if (!hdev_is_powered(hdev)) {
5363 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5364 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5368 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5369 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5371 if (!conn || conn->state != BT_CONNECTED) {
5372 err = cmd_complete(sk, hdev->id,
5373 MGMT_OP_GET_CLOCK_INFO,
5374 MGMT_STATUS_NOT_CONNECTED,
5382 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5388 hci_req_init(&req, hdev);
5390 memset(&hci_cp, 0, sizeof(hci_cp));
5391 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5394 hci_conn_hold(conn);
5395 cmd->user_data = hci_conn_get(conn);
5397 hci_cp.handle = cpu_to_le16(conn->handle);
5398 hci_cp.which = 0x01; /* Piconet clock */
5399 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5402 err = hci_req_run(&req, get_clock_info_complete);
5404 mgmt_pending_remove(cmd);
5407 hci_dev_unlock(hdev);
5411 static void device_added(struct sock *sk, struct hci_dev *hdev,
5412 bdaddr_t *bdaddr, u8 type, u8 action)
5414 struct mgmt_ev_device_added ev;
5416 bacpy(&ev.addr.bdaddr, bdaddr);
5417 ev.addr.type = type;
5420 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5423 static int add_device(struct sock *sk, struct hci_dev *hdev,
5424 void *data, u16 len)
5426 struct mgmt_cp_add_device *cp = data;
5427 u8 auto_conn, addr_type;
5430 BT_DBG("%s", hdev->name);
5432 if (!bdaddr_type_is_valid(cp->addr.type) ||
5433 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5434 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5435 MGMT_STATUS_INVALID_PARAMS,
5436 &cp->addr, sizeof(cp->addr));
5438 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5439 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5440 MGMT_STATUS_INVALID_PARAMS,
5441 &cp->addr, sizeof(cp->addr));
5445 if (cp->addr.type == BDADDR_BREDR) {
5446 /* Only incoming connections action is supported for now */
5447 if (cp->action != 0x01) {
5448 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5449 MGMT_STATUS_INVALID_PARAMS,
5450 &cp->addr, sizeof(cp->addr));
5454 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5459 hci_update_page_scan(hdev, NULL);
5464 if (cp->addr.type == BDADDR_LE_PUBLIC)
5465 addr_type = ADDR_LE_DEV_PUBLIC;
5467 addr_type = ADDR_LE_DEV_RANDOM;
5469 if (cp->action == 0x02)
5470 auto_conn = HCI_AUTO_CONN_ALWAYS;
5471 else if (cp->action == 0x01)
5472 auto_conn = HCI_AUTO_CONN_DIRECT;
5474 auto_conn = HCI_AUTO_CONN_REPORT;
5476 /* If the connection parameters don't exist for this device,
5477 * they will be created and configured with defaults.
5479 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5481 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5483 &cp->addr, sizeof(cp->addr));
5488 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5490 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5491 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5494 hci_dev_unlock(hdev);
5498 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5499 bdaddr_t *bdaddr, u8 type)
5501 struct mgmt_ev_device_removed ev;
5503 bacpy(&ev.addr.bdaddr, bdaddr);
5504 ev.addr.type = type;
5506 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5509 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5510 void *data, u16 len)
5512 struct mgmt_cp_remove_device *cp = data;
5515 BT_DBG("%s", hdev->name);
5519 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5520 struct hci_conn_params *params;
5523 if (!bdaddr_type_is_valid(cp->addr.type)) {
5524 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5525 MGMT_STATUS_INVALID_PARAMS,
5526 &cp->addr, sizeof(cp->addr));
5530 if (cp->addr.type == BDADDR_BREDR) {
5531 err = hci_bdaddr_list_del(&hdev->whitelist,
5535 err = cmd_complete(sk, hdev->id,
5536 MGMT_OP_REMOVE_DEVICE,
5537 MGMT_STATUS_INVALID_PARAMS,
5538 &cp->addr, sizeof(cp->addr));
5542 hci_update_page_scan(hdev, NULL);
5544 device_removed(sk, hdev, &cp->addr.bdaddr,
5549 if (cp->addr.type == BDADDR_LE_PUBLIC)
5550 addr_type = ADDR_LE_DEV_PUBLIC;
5552 addr_type = ADDR_LE_DEV_RANDOM;
5554 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5557 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5558 MGMT_STATUS_INVALID_PARAMS,
5559 &cp->addr, sizeof(cp->addr));
5563 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5564 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5565 MGMT_STATUS_INVALID_PARAMS,
5566 &cp->addr, sizeof(cp->addr));
5570 list_del(¶ms->action);
5571 list_del(¶ms->list);
5573 hci_update_background_scan(hdev);
5575 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5577 struct hci_conn_params *p, *tmp;
5578 struct bdaddr_list *b, *btmp;
5580 if (cp->addr.type) {
5581 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5582 MGMT_STATUS_INVALID_PARAMS,
5583 &cp->addr, sizeof(cp->addr));
5587 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5588 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5593 hci_update_page_scan(hdev, NULL);
5595 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5596 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5598 device_removed(sk, hdev, &p->addr, p->addr_type);
5599 list_del(&p->action);
5604 BT_DBG("All LE connection parameters were removed");
5606 hci_update_background_scan(hdev);
5610 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5611 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5614 hci_dev_unlock(hdev);
5618 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5621 struct mgmt_cp_load_conn_param *cp = data;
5622 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5623 sizeof(struct mgmt_conn_param));
5624 u16 param_count, expected_len;
5627 if (!lmp_le_capable(hdev))
5628 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5629 MGMT_STATUS_NOT_SUPPORTED);
5631 param_count = __le16_to_cpu(cp->param_count);
5632 if (param_count > max_param_count) {
5633 BT_ERR("load_conn_param: too big param_count value %u",
5635 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5636 MGMT_STATUS_INVALID_PARAMS);
5639 expected_len = sizeof(*cp) + param_count *
5640 sizeof(struct mgmt_conn_param);
5641 if (expected_len != len) {
5642 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5644 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5645 MGMT_STATUS_INVALID_PARAMS);
5648 BT_DBG("%s param_count %u", hdev->name, param_count);
5652 hci_conn_params_clear_disabled(hdev);
5654 for (i = 0; i < param_count; i++) {
5655 struct mgmt_conn_param *param = &cp->params[i];
5656 struct hci_conn_params *hci_param;
5657 u16 min, max, latency, timeout;
5660 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
5663 if (param->addr.type == BDADDR_LE_PUBLIC) {
5664 addr_type = ADDR_LE_DEV_PUBLIC;
5665 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5666 addr_type = ADDR_LE_DEV_RANDOM;
5668 BT_ERR("Ignoring invalid connection parameters");
5672 min = le16_to_cpu(param->min_interval);
5673 max = le16_to_cpu(param->max_interval);
5674 latency = le16_to_cpu(param->latency);
5675 timeout = le16_to_cpu(param->timeout);
5677 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5678 min, max, latency, timeout);
5680 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5681 BT_ERR("Ignoring invalid connection parameters");
5685 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
5688 BT_ERR("Failed to add connection parameters");
5692 hci_param->conn_min_interval = min;
5693 hci_param->conn_max_interval = max;
5694 hci_param->conn_latency = latency;
5695 hci_param->supervision_timeout = timeout;
5698 hci_dev_unlock(hdev);
5700 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5703 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5704 void *data, u16 len)
5706 struct mgmt_cp_set_external_config *cp = data;
5710 BT_DBG("%s", hdev->name);
5712 if (hdev_is_powered(hdev))
5713 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5714 MGMT_STATUS_REJECTED);
5716 if (cp->config != 0x00 && cp->config != 0x01)
5717 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5718 MGMT_STATUS_INVALID_PARAMS);
5720 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5721 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5722 MGMT_STATUS_NOT_SUPPORTED);
5727 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5730 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5733 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5740 err = new_options(hdev, sk);
5742 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5743 mgmt_index_removed(hdev);
5745 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5746 set_bit(HCI_CONFIG, &hdev->dev_flags);
5747 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5749 queue_work(hdev->req_workqueue, &hdev->power_on);
5751 set_bit(HCI_RAW, &hdev->flags);
5752 mgmt_index_added(hdev);
5757 hci_dev_unlock(hdev);
5761 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5762 void *data, u16 len)
5764 struct mgmt_cp_set_public_address *cp = data;
5768 BT_DBG("%s", hdev->name);
5770 if (hdev_is_powered(hdev))
5771 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5772 MGMT_STATUS_REJECTED);
5774 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5775 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5776 MGMT_STATUS_INVALID_PARAMS);
5778 if (!hdev->set_bdaddr)
5779 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5780 MGMT_STATUS_NOT_SUPPORTED);
5784 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5785 bacpy(&hdev->public_addr, &cp->bdaddr);
5787 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5794 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5795 err = new_options(hdev, sk);
5797 if (is_configured(hdev)) {
5798 mgmt_index_removed(hdev);
5800 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5802 set_bit(HCI_CONFIG, &hdev->dev_flags);
5803 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5805 queue_work(hdev->req_workqueue, &hdev->power_on);
5809 hci_dev_unlock(hdev);
5813 static const struct mgmt_handler {
5814 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5818 } mgmt_handlers[] = {
5819 { NULL }, /* 0x0000 (no command) */
5820 { read_version, false, MGMT_READ_VERSION_SIZE },
5821 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
5822 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
5823 { read_controller_info, false, MGMT_READ_INFO_SIZE },
5824 { set_powered, false, MGMT_SETTING_SIZE },
5825 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
5826 { set_connectable, false, MGMT_SETTING_SIZE },
5827 { set_fast_connectable, false, MGMT_SETTING_SIZE },
5828 { set_bondable, false, MGMT_SETTING_SIZE },
5829 { set_link_security, false, MGMT_SETTING_SIZE },
5830 { set_ssp, false, MGMT_SETTING_SIZE },
5831 { set_hs, false, MGMT_SETTING_SIZE },
5832 { set_le, false, MGMT_SETTING_SIZE },
5833 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
5834 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
5835 { add_uuid, false, MGMT_ADD_UUID_SIZE },
5836 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
5837 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
5838 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5839 { disconnect, false, MGMT_DISCONNECT_SIZE },
5840 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
5841 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
5842 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5843 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
5844 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
5845 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5846 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
5847 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
5848 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5849 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
5850 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5851 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5852 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5853 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5854 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
5855 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
5856 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
5857 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
5858 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
5859 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
5860 { set_advertising, false, MGMT_SETTING_SIZE },
5861 { set_bredr, false, MGMT_SETTING_SIZE },
5862 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
5863 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
5864 { set_secure_conn, false, MGMT_SETTING_SIZE },
5865 { set_debug_keys, false, MGMT_SETTING_SIZE },
5866 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
5867 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
5868 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5869 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5870 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5871 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5872 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
5873 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
5874 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
5875 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
5876 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
5877 { start_service_discovery,true, MGMT_START_SERVICE_DISCOVERY_SIZE },
5880 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5884 struct mgmt_hdr *hdr;
5885 u16 opcode, index, len;
5886 struct hci_dev *hdev = NULL;
5887 const struct mgmt_handler *handler;
5890 BT_DBG("got %zu bytes", msglen);
5892 if (msglen < sizeof(*hdr))
5895 buf = kmalloc(msglen, GFP_KERNEL);
5899 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
5905 opcode = __le16_to_cpu(hdr->opcode);
5906 index = __le16_to_cpu(hdr->index);
5907 len = __le16_to_cpu(hdr->len);
5909 if (len != msglen - sizeof(*hdr)) {
5914 if (index != MGMT_INDEX_NONE) {
5915 hdev = hci_dev_get(index);
5917 err = cmd_status(sk, index, opcode,
5918 MGMT_STATUS_INVALID_INDEX);
5922 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5923 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5924 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5925 err = cmd_status(sk, index, opcode,
5926 MGMT_STATUS_INVALID_INDEX);
5930 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
5931 opcode != MGMT_OP_READ_CONFIG_INFO &&
5932 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
5933 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
5934 err = cmd_status(sk, index, opcode,
5935 MGMT_STATUS_INVALID_INDEX);
5940 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5941 mgmt_handlers[opcode].func == NULL) {
5942 BT_DBG("Unknown op %u", opcode);
5943 err = cmd_status(sk, index, opcode,
5944 MGMT_STATUS_UNKNOWN_COMMAND);
5948 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
5949 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5950 err = cmd_status(sk, index, opcode,
5951 MGMT_STATUS_INVALID_INDEX);
5955 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
5956 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5957 err = cmd_status(sk, index, opcode,
5958 MGMT_STATUS_INVALID_INDEX);
5962 handler = &mgmt_handlers[opcode];
5964 if ((handler->var_len && len < handler->data_len) ||
5965 (!handler->var_len && len != handler->data_len)) {
5966 err = cmd_status(sk, index, opcode,
5967 MGMT_STATUS_INVALID_PARAMS);
5972 mgmt_init_hdev(sk, hdev);
5974 cp = buf + sizeof(*hdr);
5976 err = handler->func(sk, hdev, cp, len);
5990 void mgmt_index_added(struct hci_dev *hdev)
5992 if (hdev->dev_type != HCI_BREDR)
5995 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5998 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5999 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
6001 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
6004 void mgmt_index_removed(struct hci_dev *hdev)
6006 u8 status = MGMT_STATUS_INVALID_INDEX;
6008 if (hdev->dev_type != HCI_BREDR)
6011 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6014 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6016 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6017 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
6019 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
6022 /* This function requires the caller holds hdev->lock */
6023 static void restart_le_actions(struct hci_dev *hdev)
6025 struct hci_conn_params *p;
6027 list_for_each_entry(p, &hdev->le_conn_params, list) {
6028 /* Needed for AUTO_OFF case where might not "really"
6029 * have been powered off.
6031 list_del_init(&p->action);
6033 switch (p->auto_connect) {
6034 case HCI_AUTO_CONN_DIRECT:
6035 case HCI_AUTO_CONN_ALWAYS:
6036 list_add(&p->action, &hdev->pend_le_conns);
6038 case HCI_AUTO_CONN_REPORT:
6039 list_add(&p->action, &hdev->pend_le_reports);
6046 hci_update_background_scan(hdev);
6049 static void powered_complete(struct hci_dev *hdev, u8 status)
6051 struct cmd_lookup match = { NULL, hdev };
6053 BT_DBG("status 0x%02x", status);
6057 restart_le_actions(hdev);
6059 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6061 new_settings(hdev, match.sk);
6063 hci_dev_unlock(hdev);
6069 static int powered_update_hci(struct hci_dev *hdev)
6071 struct hci_request req;
6074 hci_req_init(&req, hdev);
6076 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
6077 !lmp_host_ssp_capable(hdev)) {
6080 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
6083 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
6084 lmp_bredr_capable(hdev)) {
6085 struct hci_cp_write_le_host_supported cp;
6090 /* Check first if we already have the right
6091 * host state (host features set)
6093 if (cp.le != lmp_host_le_capable(hdev) ||
6094 cp.simul != lmp_host_le_br_capable(hdev))
6095 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
6099 if (lmp_le_capable(hdev)) {
6100 /* Make sure the controller has a good default for
6101 * advertising data. This also applies to the case
6102 * where BR/EDR was toggled during the AUTO_OFF phase.
6104 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
6105 update_adv_data(&req);
6106 update_scan_rsp_data(&req);
6109 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6110 enable_advertising(&req);
6113 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
6114 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
6115 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
6116 sizeof(link_sec), &link_sec);
6118 if (lmp_bredr_capable(hdev)) {
6119 write_fast_connectable(&req, false);
6120 hci_update_page_scan(hdev, &req);
6126 return hci_req_run(&req, powered_complete);
6129 int mgmt_powered(struct hci_dev *hdev, u8 powered)
6131 struct cmd_lookup match = { NULL, hdev };
6132 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
6133 u8 zero_cod[] = { 0, 0, 0 };
6136 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
6140 if (powered_update_hci(hdev) == 0)
6143 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
6148 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6149 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status_not_powered);
6151 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6152 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6153 zero_cod, sizeof(zero_cod), NULL);
6156 err = new_settings(hdev, match.sk);
6164 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6166 struct pending_cmd *cmd;
6169 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6173 if (err == -ERFKILL)
6174 status = MGMT_STATUS_RFKILLED;
6176 status = MGMT_STATUS_FAILED;
6178 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6180 mgmt_pending_remove(cmd);
6183 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6185 struct hci_request req;
6189 /* When discoverable timeout triggers, then just make sure
6190 * the limited discoverable flag is cleared. Even in the case
6191 * of a timeout triggered from general discoverable, it is
6192 * safe to unconditionally clear the flag.
6194 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
6195 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6197 hci_req_init(&req, hdev);
6198 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
6199 u8 scan = SCAN_PAGE;
6200 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6201 sizeof(scan), &scan);
6204 update_adv_data(&req);
6205 hci_req_run(&req, NULL);
6207 hdev->discov_timeout = 0;
6209 new_settings(hdev, NULL);
6211 hci_dev_unlock(hdev);
6214 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6217 struct mgmt_ev_new_link_key ev;
6219 memset(&ev, 0, sizeof(ev));
6221 ev.store_hint = persistent;
6222 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6223 ev.key.addr.type = BDADDR_BREDR;
6224 ev.key.type = key->type;
6225 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6226 ev.key.pin_len = key->pin_len;
6228 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6231 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6233 switch (ltk->type) {
6236 if (ltk->authenticated)
6237 return MGMT_LTK_AUTHENTICATED;
6238 return MGMT_LTK_UNAUTHENTICATED;
6240 if (ltk->authenticated)
6241 return MGMT_LTK_P256_AUTH;
6242 return MGMT_LTK_P256_UNAUTH;
6243 case SMP_LTK_P256_DEBUG:
6244 return MGMT_LTK_P256_DEBUG;
6247 return MGMT_LTK_UNAUTHENTICATED;
6250 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6252 struct mgmt_ev_new_long_term_key ev;
6254 memset(&ev, 0, sizeof(ev));
6256 /* Devices using resolvable or non-resolvable random addresses
6257 * without providing an indentity resolving key don't require
6258 * to store long term keys. Their addresses will change the
6261 * Only when a remote device provides an identity address
6262 * make sure the long term key is stored. If the remote
6263 * identity is known, the long term keys are internally
6264 * mapped to the identity address. So allow static random
6265 * and public addresses here.
6267 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6268 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6269 ev.store_hint = 0x00;
6271 ev.store_hint = persistent;
6273 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6274 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6275 ev.key.type = mgmt_ltk_type(key);
6276 ev.key.enc_size = key->enc_size;
6277 ev.key.ediv = key->ediv;
6278 ev.key.rand = key->rand;
6280 if (key->type == SMP_LTK)
6283 memcpy(ev.key.val, key->val, sizeof(key->val));
6285 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6288 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6290 struct mgmt_ev_new_irk ev;
6292 memset(&ev, 0, sizeof(ev));
6294 /* For identity resolving keys from devices that are already
6295 * using a public address or static random address, do not
6296 * ask for storing this key. The identity resolving key really
6297 * is only mandatory for devices using resovlable random
6300 * Storing all identity resolving keys has the downside that
6301 * they will be also loaded on next boot of they system. More
6302 * identity resolving keys, means more time during scanning is
6303 * needed to actually resolve these addresses.
6305 if (bacmp(&irk->rpa, BDADDR_ANY))
6306 ev.store_hint = 0x01;
6308 ev.store_hint = 0x00;
6310 bacpy(&ev.rpa, &irk->rpa);
6311 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6312 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6313 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6315 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6318 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6321 struct mgmt_ev_new_csrk ev;
6323 memset(&ev, 0, sizeof(ev));
6325 /* Devices using resolvable or non-resolvable random addresses
6326 * without providing an indentity resolving key don't require
6327 * to store signature resolving keys. Their addresses will change
6328 * the next time around.
6330 * Only when a remote device provides an identity address
6331 * make sure the signature resolving key is stored. So allow
6332 * static random and public addresses here.
6334 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6335 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6336 ev.store_hint = 0x00;
6338 ev.store_hint = persistent;
6340 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6341 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6342 ev.key.master = csrk->master;
6343 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6345 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6348 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6349 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6350 u16 max_interval, u16 latency, u16 timeout)
6352 struct mgmt_ev_new_conn_param ev;
6354 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6357 memset(&ev, 0, sizeof(ev));
6358 bacpy(&ev.addr.bdaddr, bdaddr);
6359 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6360 ev.store_hint = store_hint;
6361 ev.min_interval = cpu_to_le16(min_interval);
6362 ev.max_interval = cpu_to_le16(max_interval);
6363 ev.latency = cpu_to_le16(latency);
6364 ev.timeout = cpu_to_le16(timeout);
6366 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6369 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6372 eir[eir_len++] = sizeof(type) + data_len;
6373 eir[eir_len++] = type;
6374 memcpy(&eir[eir_len], data, data_len);
6375 eir_len += data_len;
6380 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6381 u32 flags, u8 *name, u8 name_len)
6384 struct mgmt_ev_device_connected *ev = (void *) buf;
6387 bacpy(&ev->addr.bdaddr, &conn->dst);
6388 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6390 ev->flags = __cpu_to_le32(flags);
6392 /* We must ensure that the EIR Data fields are ordered and
6393 * unique. Keep it simple for now and avoid the problem by not
6394 * adding any BR/EDR data to the LE adv.
6396 if (conn->le_adv_data_len > 0) {
6397 memcpy(&ev->eir[eir_len],
6398 conn->le_adv_data, conn->le_adv_data_len);
6399 eir_len = conn->le_adv_data_len;
6402 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6405 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6406 eir_len = eir_append_data(ev->eir, eir_len,
6408 conn->dev_class, 3);
6411 ev->eir_len = cpu_to_le16(eir_len);
6413 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6414 sizeof(*ev) + eir_len, NULL);
6417 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6419 struct sock **sk = data;
6421 cmd->cmd_complete(cmd, 0);
6426 mgmt_pending_remove(cmd);
6429 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6431 struct hci_dev *hdev = data;
6432 struct mgmt_cp_unpair_device *cp = cmd->param;
6433 struct mgmt_rp_unpair_device rp;
6435 memset(&rp, 0, sizeof(rp));
6436 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6437 rp.addr.type = cp->addr.type;
6439 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6441 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
6443 mgmt_pending_remove(cmd);
6446 bool mgmt_powering_down(struct hci_dev *hdev)
6448 struct pending_cmd *cmd;
6449 struct mgmt_mode *cp;
6451 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6462 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6463 u8 link_type, u8 addr_type, u8 reason,
6464 bool mgmt_connected)
6466 struct mgmt_ev_device_disconnected ev;
6467 struct sock *sk = NULL;
6469 /* The connection is still in hci_conn_hash so test for 1
6470 * instead of 0 to know if this is the last one.
6472 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6473 cancel_delayed_work(&hdev->power_off);
6474 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6477 if (!mgmt_connected)
6480 if (link_type != ACL_LINK && link_type != LE_LINK)
6483 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6485 bacpy(&ev.addr.bdaddr, bdaddr);
6486 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6489 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6494 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6498 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6499 u8 link_type, u8 addr_type, u8 status)
6501 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6502 struct mgmt_cp_disconnect *cp;
6503 struct pending_cmd *cmd;
6505 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6508 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6514 if (bacmp(bdaddr, &cp->addr.bdaddr))
6517 if (cp->addr.type != bdaddr_type)
6520 cmd->cmd_complete(cmd, mgmt_status(status));
6521 mgmt_pending_remove(cmd);
6524 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6525 u8 addr_type, u8 status)
6527 struct mgmt_ev_connect_failed ev;
6529 /* The connection is still in hci_conn_hash so test for 1
6530 * instead of 0 to know if this is the last one.
6532 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6533 cancel_delayed_work(&hdev->power_off);
6534 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6537 bacpy(&ev.addr.bdaddr, bdaddr);
6538 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6539 ev.status = mgmt_status(status);
6541 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6544 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6546 struct mgmt_ev_pin_code_request ev;
6548 bacpy(&ev.addr.bdaddr, bdaddr);
6549 ev.addr.type = BDADDR_BREDR;
6552 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6555 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6558 struct pending_cmd *cmd;
6560 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6564 cmd->cmd_complete(cmd, mgmt_status(status));
6565 mgmt_pending_remove(cmd);
6568 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6571 struct pending_cmd *cmd;
6573 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6577 cmd->cmd_complete(cmd, mgmt_status(status));
6578 mgmt_pending_remove(cmd);
6581 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6582 u8 link_type, u8 addr_type, u32 value,
6585 struct mgmt_ev_user_confirm_request ev;
6587 BT_DBG("%s", hdev->name);
6589 bacpy(&ev.addr.bdaddr, bdaddr);
6590 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6591 ev.confirm_hint = confirm_hint;
6592 ev.value = cpu_to_le32(value);
6594 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6598 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6599 u8 link_type, u8 addr_type)
6601 struct mgmt_ev_user_passkey_request ev;
6603 BT_DBG("%s", hdev->name);
6605 bacpy(&ev.addr.bdaddr, bdaddr);
6606 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6608 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6612 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6613 u8 link_type, u8 addr_type, u8 status,
6616 struct pending_cmd *cmd;
6618 cmd = mgmt_pending_find(opcode, hdev);
6622 cmd->cmd_complete(cmd, mgmt_status(status));
6623 mgmt_pending_remove(cmd);
6628 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6629 u8 link_type, u8 addr_type, u8 status)
6631 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6632 status, MGMT_OP_USER_CONFIRM_REPLY);
6635 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6636 u8 link_type, u8 addr_type, u8 status)
6638 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6640 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6643 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6644 u8 link_type, u8 addr_type, u8 status)
6646 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6647 status, MGMT_OP_USER_PASSKEY_REPLY);
6650 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6651 u8 link_type, u8 addr_type, u8 status)
6653 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6655 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6658 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6659 u8 link_type, u8 addr_type, u32 passkey,
6662 struct mgmt_ev_passkey_notify ev;
6664 BT_DBG("%s", hdev->name);
6666 bacpy(&ev.addr.bdaddr, bdaddr);
6667 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6668 ev.passkey = __cpu_to_le32(passkey);
6669 ev.entered = entered;
6671 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6674 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
6676 struct mgmt_ev_auth_failed ev;
6677 struct pending_cmd *cmd;
6678 u8 status = mgmt_status(hci_status);
6680 bacpy(&ev.addr.bdaddr, &conn->dst);
6681 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6684 cmd = find_pairing(conn);
6686 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
6687 cmd ? cmd->sk : NULL);
6690 pairing_complete(cmd, status);
6693 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6695 struct cmd_lookup match = { NULL, hdev };
6699 u8 mgmt_err = mgmt_status(status);
6700 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6701 cmd_status_rsp, &mgmt_err);
6705 if (test_bit(HCI_AUTH, &hdev->flags))
6706 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6709 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6712 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6716 new_settings(hdev, match.sk);
6722 static void clear_eir(struct hci_request *req)
6724 struct hci_dev *hdev = req->hdev;
6725 struct hci_cp_write_eir cp;
6727 if (!lmp_ext_inq_capable(hdev))
6730 memset(hdev->eir, 0, sizeof(hdev->eir));
6732 memset(&cp, 0, sizeof(cp));
6734 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6737 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6739 struct cmd_lookup match = { NULL, hdev };
6740 struct hci_request req;
6741 bool changed = false;
6744 u8 mgmt_err = mgmt_status(status);
6746 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6747 &hdev->dev_flags)) {
6748 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6749 new_settings(hdev, NULL);
6752 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6758 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6760 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6762 changed = test_and_clear_bit(HCI_HS_ENABLED,
6765 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6768 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6771 new_settings(hdev, match.sk);
6776 hci_req_init(&req, hdev);
6778 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6779 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6780 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6781 sizeof(enable), &enable);
6787 hci_req_run(&req, NULL);
6790 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6792 struct cmd_lookup match = { NULL, hdev };
6793 bool changed = false;
6796 u8 mgmt_err = mgmt_status(status);
6799 if (test_and_clear_bit(HCI_SC_ENABLED,
6801 new_settings(hdev, NULL);
6802 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6805 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6806 cmd_status_rsp, &mgmt_err);
6811 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6813 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6814 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6817 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6818 settings_rsp, &match);
6821 new_settings(hdev, match.sk);
6827 static void sk_lookup(struct pending_cmd *cmd, void *data)
6829 struct cmd_lookup *match = data;
6831 if (match->sk == NULL) {
6832 match->sk = cmd->sk;
6833 sock_hold(match->sk);
6837 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6840 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6842 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6843 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6844 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6847 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6854 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6856 struct mgmt_cp_set_local_name ev;
6857 struct pending_cmd *cmd;
6862 memset(&ev, 0, sizeof(ev));
6863 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6864 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6866 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6868 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6870 /* If this is a HCI command related to powering on the
6871 * HCI dev don't send any mgmt signals.
6873 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6877 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6878 cmd ? cmd->sk : NULL);
6881 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6882 u8 *rand192, u8 *hash256, u8 *rand256,
6885 struct pending_cmd *cmd;
6887 BT_DBG("%s status %u", hdev->name, status);
6889 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6894 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6895 mgmt_status(status));
6897 if (bredr_sc_enabled(hdev) && hash256 && rand256) {
6898 struct mgmt_rp_read_local_oob_ext_data rp;
6900 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6901 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
6903 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6904 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
6906 cmd_complete(cmd->sk, hdev->id,
6907 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6910 struct mgmt_rp_read_local_oob_data rp;
6912 memcpy(rp.hash, hash192, sizeof(rp.hash));
6913 memcpy(rp.rand, rand192, sizeof(rp.rand));
6915 cmd_complete(cmd->sk, hdev->id,
6916 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6921 mgmt_pending_remove(cmd);
6924 /* this is reversed hex representation of bluetooth base uuid. We need it for
6925 * service uuid parsing in eir.
6927 static const u8 reverse_base_uuid[] = {
6928 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
6929 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
6932 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
6936 for (i = 0; i < uuid_count; i++) {
6937 if (!memcmp(uuid, uuids[i], 16))
6944 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
6948 while (parsed < eir_len) {
6949 u8 field_len = eir[0];
6956 if (eir_len - parsed < field_len + 1)
6960 case EIR_UUID16_ALL:
6961 case EIR_UUID16_SOME:
6962 for (i = 0; i + 3 <= field_len; i += 2) {
6963 memcpy(uuid, reverse_base_uuid, 16);
6964 uuid[13] = eir[i + 3];
6965 uuid[12] = eir[i + 2];
6966 if (has_uuid(uuid, uuid_count, uuids))
6970 case EIR_UUID32_ALL:
6971 case EIR_UUID32_SOME:
6972 for (i = 0; i + 5 <= field_len; i += 4) {
6973 memcpy(uuid, reverse_base_uuid, 16);
6974 uuid[15] = eir[i + 5];
6975 uuid[14] = eir[i + 4];
6976 uuid[13] = eir[i + 3];
6977 uuid[12] = eir[i + 2];
6978 if (has_uuid(uuid, uuid_count, uuids))
6982 case EIR_UUID128_ALL:
6983 case EIR_UUID128_SOME:
6984 for (i = 0; i + 17 <= field_len; i += 16) {
6985 memcpy(uuid, eir + i + 2, 16);
6986 if (has_uuid(uuid, uuid_count, uuids))
6992 parsed += field_len + 1;
6993 eir += field_len + 1;
6999 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7000 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7001 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7004 struct mgmt_ev_device_found *ev = (void *) buf;
7008 /* Don't send events for a non-kernel initiated discovery. With
7009 * LE one exception is if we have pend_le_reports > 0 in which
7010 * case we're doing passive scanning and want these events.
7012 if (!hci_discovery_active(hdev)) {
7013 if (link_type == ACL_LINK)
7015 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7019 /* When using service discovery with a RSSI threshold, then check
7020 * if such a RSSI threshold is specified. If a RSSI threshold has
7021 * been specified, then all results with a RSSI smaller than the
7022 * RSSI threshold will be dropped.
7024 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7025 rssi < hdev->discovery.rssi)
7028 /* Make sure that the buffer is big enough. The 5 extra bytes
7029 * are for the potential CoD field.
7031 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7034 memset(buf, 0, sizeof(buf));
7036 bacpy(&ev->addr.bdaddr, bdaddr);
7037 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7039 ev->flags = cpu_to_le32(flags);
7042 /* When using service discovery and a list of UUID is
7043 * provided, results with no matching UUID should be
7044 * dropped. In case there is a match the result is
7045 * kept and checking possible scan response data
7048 if (hdev->discovery.uuid_count > 0) {
7049 match = eir_has_uuids(eir, eir_len,
7050 hdev->discovery.uuid_count,
7051 hdev->discovery.uuids);
7056 /* Copy EIR or advertising data into event */
7057 memcpy(ev->eir, eir, eir_len);
7059 /* When using service discovery and a list of UUID is
7060 * provided, results with empty EIR or advertising data
7061 * should be dropped since they do not match any UUID.
7063 if (hdev->discovery.uuid_count > 0)
7067 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
7068 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7071 if (scan_rsp_len > 0) {
7072 /* When using service discovery and a list of UUID is
7073 * provided, results with no matching UUID should be
7074 * dropped if there is no previous match from the
7077 if (hdev->discovery.uuid_count > 0) {
7078 if (!match && !eir_has_uuids(scan_rsp, scan_rsp_len,
7079 hdev->discovery.uuid_count,
7080 hdev->discovery.uuids))
7084 /* Append scan response data to event */
7085 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7087 /* When using service discovery and a list of UUID is
7088 * provided, results with empty scan response and no
7089 * previous matched advertising data should be dropped.
7091 if (hdev->discovery.uuid_count > 0 && !match)
7095 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7096 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7098 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7101 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7102 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7104 struct mgmt_ev_device_found *ev;
7105 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7108 ev = (struct mgmt_ev_device_found *) buf;
7110 memset(buf, 0, sizeof(buf));
7112 bacpy(&ev->addr.bdaddr, bdaddr);
7113 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7116 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7119 ev->eir_len = cpu_to_le16(eir_len);
7121 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7124 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7126 struct mgmt_ev_discovering ev;
7128 BT_DBG("%s discovering %u", hdev->name, discovering);
7130 memset(&ev, 0, sizeof(ev));
7131 ev.type = hdev->discovery.type;
7132 ev.discovering = discovering;
7134 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7137 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
7139 BT_DBG("%s status %u", hdev->name, status);
7142 void mgmt_reenable_advertising(struct hci_dev *hdev)
7144 struct hci_request req;
7146 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7149 hci_req_init(&req, hdev);
7150 enable_advertising(&req);
7151 hci_req_run(&req, adv_enable_complete);