2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 8
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
87 MGMT_OP_GET_CONN_INFO,
88 MGMT_OP_GET_CLOCK_INFO,
90 MGMT_OP_REMOVE_DEVICE,
91 MGMT_OP_LOAD_CONN_PARAM,
92 MGMT_OP_READ_UNCONF_INDEX_LIST,
93 MGMT_OP_READ_CONFIG_INFO,
94 MGMT_OP_SET_EXTERNAL_CONFIG,
95 MGMT_OP_SET_PUBLIC_ADDRESS,
96 MGMT_OP_START_SERVICE_DISCOVERY,
99 static const u16 mgmt_events[] = {
100 MGMT_EV_CONTROLLER_ERROR,
102 MGMT_EV_INDEX_REMOVED,
103 MGMT_EV_NEW_SETTINGS,
104 MGMT_EV_CLASS_OF_DEV_CHANGED,
105 MGMT_EV_LOCAL_NAME_CHANGED,
106 MGMT_EV_NEW_LINK_KEY,
107 MGMT_EV_NEW_LONG_TERM_KEY,
108 MGMT_EV_DEVICE_CONNECTED,
109 MGMT_EV_DEVICE_DISCONNECTED,
110 MGMT_EV_CONNECT_FAILED,
111 MGMT_EV_PIN_CODE_REQUEST,
112 MGMT_EV_USER_CONFIRM_REQUEST,
113 MGMT_EV_USER_PASSKEY_REQUEST,
115 MGMT_EV_DEVICE_FOUND,
117 MGMT_EV_DEVICE_BLOCKED,
118 MGMT_EV_DEVICE_UNBLOCKED,
119 MGMT_EV_DEVICE_UNPAIRED,
120 MGMT_EV_PASSKEY_NOTIFY,
123 MGMT_EV_DEVICE_ADDED,
124 MGMT_EV_DEVICE_REMOVED,
125 MGMT_EV_NEW_CONN_PARAM,
126 MGMT_EV_UNCONF_INDEX_ADDED,
127 MGMT_EV_UNCONF_INDEX_REMOVED,
128 MGMT_EV_NEW_CONFIG_OPTIONS,
131 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
134 struct list_head list;
141 void (*cmd_complete)(struct pending_cmd *cmd, u8 status);
144 /* HCI to MGMT error code conversion table */
145 static u8 mgmt_status_table[] = {
147 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
148 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
149 MGMT_STATUS_FAILED, /* Hardware Failure */
150 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
151 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
152 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
153 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
154 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
155 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
156 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
157 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
158 MGMT_STATUS_BUSY, /* Command Disallowed */
159 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
160 MGMT_STATUS_REJECTED, /* Rejected Security */
161 MGMT_STATUS_REJECTED, /* Rejected Personal */
162 MGMT_STATUS_TIMEOUT, /* Host Timeout */
163 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
164 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
165 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
166 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
167 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
168 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
169 MGMT_STATUS_BUSY, /* Repeated Attempts */
170 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
171 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
172 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
173 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
174 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
175 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
176 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
177 MGMT_STATUS_FAILED, /* Unspecified Error */
178 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
179 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
180 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
181 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
182 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
183 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
184 MGMT_STATUS_FAILED, /* Unit Link Key Used */
185 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
186 MGMT_STATUS_TIMEOUT, /* Instant Passed */
187 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
188 MGMT_STATUS_FAILED, /* Transaction Collision */
189 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
190 MGMT_STATUS_REJECTED, /* QoS Rejected */
191 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
192 MGMT_STATUS_REJECTED, /* Insufficient Security */
193 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
194 MGMT_STATUS_BUSY, /* Role Switch Pending */
195 MGMT_STATUS_FAILED, /* Slot Violation */
196 MGMT_STATUS_FAILED, /* Role Switch Failed */
197 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
198 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
199 MGMT_STATUS_BUSY, /* Host Busy Pairing */
200 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
201 MGMT_STATUS_BUSY, /* Controller Busy */
202 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
203 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
204 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
205 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
206 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
209 static u8 mgmt_status(u8 hci_status)
211 if (hci_status < ARRAY_SIZE(mgmt_status_table))
212 return mgmt_status_table[hci_status];
214 return MGMT_STATUS_FAILED;
217 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
218 struct sock *skip_sk)
221 struct mgmt_hdr *hdr;
223 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
227 hdr = (void *) skb_put(skb, sizeof(*hdr));
228 hdr->opcode = cpu_to_le16(event);
230 hdr->index = cpu_to_le16(hdev->id);
232 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
233 hdr->len = cpu_to_le16(data_len);
236 memcpy(skb_put(skb, data_len), data, data_len);
239 __net_timestamp(skb);
241 hci_send_to_control(skb, skip_sk);
247 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
250 struct mgmt_hdr *hdr;
251 struct mgmt_ev_cmd_status *ev;
254 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
256 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
260 hdr = (void *) skb_put(skb, sizeof(*hdr));
262 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
263 hdr->index = cpu_to_le16(index);
264 hdr->len = cpu_to_le16(sizeof(*ev));
266 ev = (void *) skb_put(skb, sizeof(*ev));
268 ev->opcode = cpu_to_le16(cmd);
270 err = sock_queue_rcv_skb(sk, skb);
277 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
278 void *rp, size_t rp_len)
281 struct mgmt_hdr *hdr;
282 struct mgmt_ev_cmd_complete *ev;
285 BT_DBG("sock %p", sk);
287 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
291 hdr = (void *) skb_put(skb, sizeof(*hdr));
293 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
294 hdr->index = cpu_to_le16(index);
295 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
297 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
298 ev->opcode = cpu_to_le16(cmd);
302 memcpy(ev->data, rp, rp_len);
304 err = sock_queue_rcv_skb(sk, skb);
311 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
314 struct mgmt_rp_read_version rp;
316 BT_DBG("sock %p", sk);
318 rp.version = MGMT_VERSION;
319 rp.revision = cpu_to_le16(MGMT_REVISION);
321 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
325 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
328 struct mgmt_rp_read_commands *rp;
329 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
330 const u16 num_events = ARRAY_SIZE(mgmt_events);
335 BT_DBG("sock %p", sk);
337 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
339 rp = kmalloc(rp_size, GFP_KERNEL);
343 rp->num_commands = cpu_to_le16(num_commands);
344 rp->num_events = cpu_to_le16(num_events);
346 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
347 put_unaligned_le16(mgmt_commands[i], opcode);
349 for (i = 0; i < num_events; i++, opcode++)
350 put_unaligned_le16(mgmt_events[i], opcode);
352 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
359 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
362 struct mgmt_rp_read_index_list *rp;
368 BT_DBG("sock %p", sk);
370 read_lock(&hci_dev_list_lock);
373 list_for_each_entry(d, &hci_dev_list, list) {
374 if (d->dev_type == HCI_BREDR &&
375 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
379 rp_len = sizeof(*rp) + (2 * count);
380 rp = kmalloc(rp_len, GFP_ATOMIC);
382 read_unlock(&hci_dev_list_lock);
387 list_for_each_entry(d, &hci_dev_list, list) {
388 if (test_bit(HCI_SETUP, &d->dev_flags) ||
389 test_bit(HCI_CONFIG, &d->dev_flags) ||
390 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
393 /* Devices marked as raw-only are neither configured
394 * nor unconfigured controllers.
396 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
399 if (d->dev_type == HCI_BREDR &&
400 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
401 rp->index[count++] = cpu_to_le16(d->id);
402 BT_DBG("Added hci%u", d->id);
406 rp->num_controllers = cpu_to_le16(count);
407 rp_len = sizeof(*rp) + (2 * count);
409 read_unlock(&hci_dev_list_lock);
411 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
419 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
420 void *data, u16 data_len)
422 struct mgmt_rp_read_unconf_index_list *rp;
428 BT_DBG("sock %p", sk);
430 read_lock(&hci_dev_list_lock);
433 list_for_each_entry(d, &hci_dev_list, list) {
434 if (d->dev_type == HCI_BREDR &&
435 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
439 rp_len = sizeof(*rp) + (2 * count);
440 rp = kmalloc(rp_len, GFP_ATOMIC);
442 read_unlock(&hci_dev_list_lock);
447 list_for_each_entry(d, &hci_dev_list, list) {
448 if (test_bit(HCI_SETUP, &d->dev_flags) ||
449 test_bit(HCI_CONFIG, &d->dev_flags) ||
450 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
453 /* Devices marked as raw-only are neither configured
454 * nor unconfigured controllers.
456 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
459 if (d->dev_type == HCI_BREDR &&
460 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
461 rp->index[count++] = cpu_to_le16(d->id);
462 BT_DBG("Added hci%u", d->id);
466 rp->num_controllers = cpu_to_le16(count);
467 rp_len = sizeof(*rp) + (2 * count);
469 read_unlock(&hci_dev_list_lock);
471 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
479 static bool is_configured(struct hci_dev *hdev)
481 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
482 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
485 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
486 !bacmp(&hdev->public_addr, BDADDR_ANY))
492 static __le32 get_missing_options(struct hci_dev *hdev)
496 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
497 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
498 options |= MGMT_OPTION_EXTERNAL_CONFIG;
500 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
501 !bacmp(&hdev->public_addr, BDADDR_ANY))
502 options |= MGMT_OPTION_PUBLIC_ADDRESS;
504 return cpu_to_le32(options);
507 static int new_options(struct hci_dev *hdev, struct sock *skip)
509 __le32 options = get_missing_options(hdev);
511 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
512 sizeof(options), skip);
515 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
517 __le32 options = get_missing_options(hdev);
519 return cmd_complete(sk, hdev->id, opcode, 0, &options,
523 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
524 void *data, u16 data_len)
526 struct mgmt_rp_read_config_info rp;
529 BT_DBG("sock %p %s", sk, hdev->name);
533 memset(&rp, 0, sizeof(rp));
534 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
536 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
537 options |= MGMT_OPTION_EXTERNAL_CONFIG;
539 if (hdev->set_bdaddr)
540 options |= MGMT_OPTION_PUBLIC_ADDRESS;
542 rp.supported_options = cpu_to_le32(options);
543 rp.missing_options = get_missing_options(hdev);
545 hci_dev_unlock(hdev);
547 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
551 static u32 get_supported_settings(struct hci_dev *hdev)
555 settings |= MGMT_SETTING_POWERED;
556 settings |= MGMT_SETTING_BONDABLE;
557 settings |= MGMT_SETTING_DEBUG_KEYS;
558 settings |= MGMT_SETTING_CONNECTABLE;
559 settings |= MGMT_SETTING_DISCOVERABLE;
561 if (lmp_bredr_capable(hdev)) {
562 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
563 settings |= MGMT_SETTING_FAST_CONNECTABLE;
564 settings |= MGMT_SETTING_BREDR;
565 settings |= MGMT_SETTING_LINK_SECURITY;
567 if (lmp_ssp_capable(hdev)) {
568 settings |= MGMT_SETTING_SSP;
569 settings |= MGMT_SETTING_HS;
572 if (lmp_sc_capable(hdev) ||
573 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
574 settings |= MGMT_SETTING_SECURE_CONN;
577 if (lmp_le_capable(hdev)) {
578 settings |= MGMT_SETTING_LE;
579 settings |= MGMT_SETTING_ADVERTISING;
580 settings |= MGMT_SETTING_SECURE_CONN;
581 settings |= MGMT_SETTING_PRIVACY;
584 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
586 settings |= MGMT_SETTING_CONFIGURATION;
591 static u32 get_current_settings(struct hci_dev *hdev)
595 if (hdev_is_powered(hdev))
596 settings |= MGMT_SETTING_POWERED;
598 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
599 settings |= MGMT_SETTING_CONNECTABLE;
601 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
602 settings |= MGMT_SETTING_FAST_CONNECTABLE;
604 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
605 settings |= MGMT_SETTING_DISCOVERABLE;
607 if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
608 settings |= MGMT_SETTING_BONDABLE;
610 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
611 settings |= MGMT_SETTING_BREDR;
613 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
614 settings |= MGMT_SETTING_LE;
616 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
617 settings |= MGMT_SETTING_LINK_SECURITY;
619 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
620 settings |= MGMT_SETTING_SSP;
622 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
623 settings |= MGMT_SETTING_HS;
625 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
626 settings |= MGMT_SETTING_ADVERTISING;
628 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
629 settings |= MGMT_SETTING_SECURE_CONN;
631 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
632 settings |= MGMT_SETTING_DEBUG_KEYS;
634 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
635 settings |= MGMT_SETTING_PRIVACY;
640 #define PNP_INFO_SVCLASS_ID 0x1200
642 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
644 u8 *ptr = data, *uuids_start = NULL;
645 struct bt_uuid *uuid;
650 list_for_each_entry(uuid, &hdev->uuids, list) {
653 if (uuid->size != 16)
656 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
660 if (uuid16 == PNP_INFO_SVCLASS_ID)
666 uuids_start[1] = EIR_UUID16_ALL;
670 /* Stop if not enough space to put next UUID */
671 if ((ptr - data) + sizeof(u16) > len) {
672 uuids_start[1] = EIR_UUID16_SOME;
676 *ptr++ = (uuid16 & 0x00ff);
677 *ptr++ = (uuid16 & 0xff00) >> 8;
678 uuids_start[0] += sizeof(uuid16);
684 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
686 u8 *ptr = data, *uuids_start = NULL;
687 struct bt_uuid *uuid;
692 list_for_each_entry(uuid, &hdev->uuids, list) {
693 if (uuid->size != 32)
699 uuids_start[1] = EIR_UUID32_ALL;
703 /* Stop if not enough space to put next UUID */
704 if ((ptr - data) + sizeof(u32) > len) {
705 uuids_start[1] = EIR_UUID32_SOME;
709 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
711 uuids_start[0] += sizeof(u32);
717 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
719 u8 *ptr = data, *uuids_start = NULL;
720 struct bt_uuid *uuid;
725 list_for_each_entry(uuid, &hdev->uuids, list) {
726 if (uuid->size != 128)
732 uuids_start[1] = EIR_UUID128_ALL;
736 /* Stop if not enough space to put next UUID */
737 if ((ptr - data) + 16 > len) {
738 uuids_start[1] = EIR_UUID128_SOME;
742 memcpy(ptr, uuid->uuid, 16);
744 uuids_start[0] += 16;
750 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
752 struct pending_cmd *cmd;
754 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
755 if (cmd->opcode == opcode)
762 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
763 struct hci_dev *hdev,
766 struct pending_cmd *cmd;
768 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
769 if (cmd->user_data != data)
771 if (cmd->opcode == opcode)
778 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
783 name_len = strlen(hdev->dev_name);
785 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
787 if (name_len > max_len) {
789 ptr[1] = EIR_NAME_SHORT;
791 ptr[1] = EIR_NAME_COMPLETE;
793 ptr[0] = name_len + 1;
795 memcpy(ptr + 2, hdev->dev_name, name_len);
797 ad_len += (name_len + 2);
798 ptr += (name_len + 2);
804 static void update_scan_rsp_data(struct hci_request *req)
806 struct hci_dev *hdev = req->hdev;
807 struct hci_cp_le_set_scan_rsp_data cp;
810 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
813 memset(&cp, 0, sizeof(cp));
815 len = create_scan_rsp_data(hdev, cp.data);
817 if (hdev->scan_rsp_data_len == len &&
818 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
821 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
822 hdev->scan_rsp_data_len = len;
826 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
829 static u8 get_adv_discov_flags(struct hci_dev *hdev)
831 struct pending_cmd *cmd;
833 /* If there's a pending mgmt command the flags will not yet have
834 * their final values, so check for this first.
836 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
838 struct mgmt_mode *cp = cmd->param;
840 return LE_AD_GENERAL;
841 else if (cp->val == 0x02)
842 return LE_AD_LIMITED;
844 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
845 return LE_AD_LIMITED;
846 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
847 return LE_AD_GENERAL;
853 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
855 u8 ad_len = 0, flags = 0;
857 flags |= get_adv_discov_flags(hdev);
859 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
860 flags |= LE_AD_NO_BREDR;
863 BT_DBG("adv flags 0x%02x", flags);
873 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
875 ptr[1] = EIR_TX_POWER;
876 ptr[2] = (u8) hdev->adv_tx_power;
885 static void update_adv_data(struct hci_request *req)
887 struct hci_dev *hdev = req->hdev;
888 struct hci_cp_le_set_adv_data cp;
891 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
894 memset(&cp, 0, sizeof(cp));
896 len = create_adv_data(hdev, cp.data);
898 if (hdev->adv_data_len == len &&
899 memcmp(cp.data, hdev->adv_data, len) == 0)
902 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
903 hdev->adv_data_len = len;
907 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
910 int mgmt_update_adv_data(struct hci_dev *hdev)
912 struct hci_request req;
914 hci_req_init(&req, hdev);
915 update_adv_data(&req);
917 return hci_req_run(&req, NULL);
920 static void create_eir(struct hci_dev *hdev, u8 *data)
925 name_len = strlen(hdev->dev_name);
931 ptr[1] = EIR_NAME_SHORT;
933 ptr[1] = EIR_NAME_COMPLETE;
935 /* EIR Data length */
936 ptr[0] = name_len + 1;
938 memcpy(ptr + 2, hdev->dev_name, name_len);
940 ptr += (name_len + 2);
943 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
945 ptr[1] = EIR_TX_POWER;
946 ptr[2] = (u8) hdev->inq_tx_power;
951 if (hdev->devid_source > 0) {
953 ptr[1] = EIR_DEVICE_ID;
955 put_unaligned_le16(hdev->devid_source, ptr + 2);
956 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
957 put_unaligned_le16(hdev->devid_product, ptr + 6);
958 put_unaligned_le16(hdev->devid_version, ptr + 8);
963 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
964 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
965 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
968 static void update_eir(struct hci_request *req)
970 struct hci_dev *hdev = req->hdev;
971 struct hci_cp_write_eir cp;
973 if (!hdev_is_powered(hdev))
976 if (!lmp_ext_inq_capable(hdev))
979 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
982 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
985 memset(&cp, 0, sizeof(cp));
987 create_eir(hdev, cp.data);
989 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
992 memcpy(hdev->eir, cp.data, sizeof(cp.data));
994 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
997 static u8 get_service_classes(struct hci_dev *hdev)
999 struct bt_uuid *uuid;
1002 list_for_each_entry(uuid, &hdev->uuids, list)
1003 val |= uuid->svc_hint;
1008 static void update_class(struct hci_request *req)
1010 struct hci_dev *hdev = req->hdev;
1013 BT_DBG("%s", hdev->name);
1015 if (!hdev_is_powered(hdev))
1018 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1021 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1024 cod[0] = hdev->minor_class;
1025 cod[1] = hdev->major_class;
1026 cod[2] = get_service_classes(hdev);
1028 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1031 if (memcmp(cod, hdev->dev_class, 3) == 0)
1034 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1037 static bool get_connectable(struct hci_dev *hdev)
1039 struct pending_cmd *cmd;
1041 /* If there's a pending mgmt command the flag will not yet have
1042 * it's final value, so check for this first.
1044 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1046 struct mgmt_mode *cp = cmd->param;
1050 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1053 static void disable_advertising(struct hci_request *req)
1057 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1060 static void enable_advertising(struct hci_request *req)
1062 struct hci_dev *hdev = req->hdev;
1063 struct hci_cp_le_set_adv_param cp;
1064 u8 own_addr_type, enable = 0x01;
1067 if (hci_conn_num(hdev, LE_LINK) > 0)
1070 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1071 disable_advertising(req);
1073 /* Clear the HCI_LE_ADV bit temporarily so that the
1074 * hci_update_random_address knows that it's safe to go ahead
1075 * and write a new random address. The flag will be set back on
1076 * as soon as the SET_ADV_ENABLE HCI command completes.
1078 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1080 connectable = get_connectable(hdev);
1082 /* Set require_privacy to true only when non-connectable
1083 * advertising is used. In that case it is fine to use a
1084 * non-resolvable private address.
1086 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1089 memset(&cp, 0, sizeof(cp));
1090 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1091 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1092 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1093 cp.own_address_type = own_addr_type;
1094 cp.channel_map = hdev->le_adv_channel_map;
1096 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1098 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1101 static void service_cache_off(struct work_struct *work)
1103 struct hci_dev *hdev = container_of(work, struct hci_dev,
1104 service_cache.work);
1105 struct hci_request req;
1107 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1110 hci_req_init(&req, hdev);
1117 hci_dev_unlock(hdev);
1119 hci_req_run(&req, NULL);
1122 static void rpa_expired(struct work_struct *work)
1124 struct hci_dev *hdev = container_of(work, struct hci_dev,
1126 struct hci_request req;
1130 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1132 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1135 /* The generation of a new RPA and programming it into the
1136 * controller happens in the enable_advertising() function.
1138 hci_req_init(&req, hdev);
1139 enable_advertising(&req);
1140 hci_req_run(&req, NULL);
1143 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1145 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1148 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1149 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1151 /* Non-mgmt controlled devices get this bit set
1152 * implicitly so that pairing works for them, however
1153 * for mgmt we require user-space to explicitly enable
1156 clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1159 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1160 void *data, u16 data_len)
1162 struct mgmt_rp_read_info rp;
1164 BT_DBG("sock %p %s", sk, hdev->name);
1168 memset(&rp, 0, sizeof(rp));
1170 bacpy(&rp.bdaddr, &hdev->bdaddr);
1172 rp.version = hdev->hci_ver;
1173 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1175 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1176 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1178 memcpy(rp.dev_class, hdev->dev_class, 3);
1180 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1181 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1183 hci_dev_unlock(hdev);
1185 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1189 static void mgmt_pending_free(struct pending_cmd *cmd)
1196 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1197 struct hci_dev *hdev, void *data,
1200 struct pending_cmd *cmd;
1202 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1206 cmd->opcode = opcode;
1207 cmd->index = hdev->id;
1209 cmd->param = kmemdup(data, len, GFP_KERNEL);
1215 cmd->param_len = len;
1220 list_add(&cmd->list, &hdev->mgmt_pending);
1225 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1226 void (*cb)(struct pending_cmd *cmd,
1230 struct pending_cmd *cmd, *tmp;
1232 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1233 if (opcode > 0 && cmd->opcode != opcode)
1240 static void mgmt_pending_remove(struct pending_cmd *cmd)
1242 list_del(&cmd->list);
1243 mgmt_pending_free(cmd);
1246 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1248 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1250 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1254 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1256 BT_DBG("%s status 0x%02x", hdev->name, status);
1258 if (hci_conn_count(hdev) == 0) {
1259 cancel_delayed_work(&hdev->power_off);
1260 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1264 static bool hci_stop_discovery(struct hci_request *req)
1266 struct hci_dev *hdev = req->hdev;
1267 struct hci_cp_remote_name_req_cancel cp;
1268 struct inquiry_entry *e;
1270 switch (hdev->discovery.state) {
1271 case DISCOVERY_FINDING:
1272 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1273 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1275 cancel_delayed_work(&hdev->le_scan_disable);
1276 hci_req_add_le_scan_disable(req);
1281 case DISCOVERY_RESOLVING:
1282 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1287 bacpy(&cp.bdaddr, &e->data.bdaddr);
1288 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1294 /* Passive scanning */
1295 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1296 hci_req_add_le_scan_disable(req);
1306 static int clean_up_hci_state(struct hci_dev *hdev)
1308 struct hci_request req;
1309 struct hci_conn *conn;
1310 bool discov_stopped;
1313 hci_req_init(&req, hdev);
1315 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1316 test_bit(HCI_PSCAN, &hdev->flags)) {
1318 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1321 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1322 disable_advertising(&req);
1324 discov_stopped = hci_stop_discovery(&req);
1326 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1327 struct hci_cp_disconnect dc;
1328 struct hci_cp_reject_conn_req rej;
1330 switch (conn->state) {
1333 dc.handle = cpu_to_le16(conn->handle);
1334 dc.reason = 0x15; /* Terminated due to Power Off */
1335 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1338 if (conn->type == LE_LINK)
1339 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1341 else if (conn->type == ACL_LINK)
1342 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1346 bacpy(&rej.bdaddr, &conn->dst);
1347 rej.reason = 0x15; /* Terminated due to Power Off */
1348 if (conn->type == ACL_LINK)
1349 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1351 else if (conn->type == SCO_LINK)
1352 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1358 err = hci_req_run(&req, clean_up_hci_complete);
1359 if (!err && discov_stopped)
1360 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1365 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1368 struct mgmt_mode *cp = data;
1369 struct pending_cmd *cmd;
1372 BT_DBG("request for %s", hdev->name);
1374 if (cp->val != 0x00 && cp->val != 0x01)
1375 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1376 MGMT_STATUS_INVALID_PARAMS);
1380 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1381 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1386 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1387 cancel_delayed_work(&hdev->power_off);
1390 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1392 err = mgmt_powered(hdev, 1);
1397 if (!!cp->val == hdev_is_powered(hdev)) {
1398 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1402 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1409 queue_work(hdev->req_workqueue, &hdev->power_on);
1412 /* Disconnect connections, stop scans, etc */
1413 err = clean_up_hci_state(hdev);
1415 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1416 HCI_POWER_OFF_TIMEOUT);
1418 /* ENODATA means there were no HCI commands queued */
1419 if (err == -ENODATA) {
1420 cancel_delayed_work(&hdev->power_off);
1421 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1427 hci_dev_unlock(hdev);
1431 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1435 ev = cpu_to_le32(get_current_settings(hdev));
1437 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1440 int mgmt_new_settings(struct hci_dev *hdev)
1442 return new_settings(hdev, NULL);
1447 struct hci_dev *hdev;
1451 static void settings_rsp(struct pending_cmd *cmd, void *data)
1453 struct cmd_lookup *match = data;
1455 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1457 list_del(&cmd->list);
1459 if (match->sk == NULL) {
1460 match->sk = cmd->sk;
1461 sock_hold(match->sk);
1464 mgmt_pending_free(cmd);
1467 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1471 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1472 mgmt_pending_remove(cmd);
1475 static void cmd_complete_rsp(struct pending_cmd *cmd, void *data)
1477 if (cmd->cmd_complete) {
1480 cmd->cmd_complete(cmd, *status);
1481 mgmt_pending_remove(cmd);
1486 cmd_status_rsp(cmd, data);
1489 static void generic_cmd_complete(struct pending_cmd *cmd, u8 status)
1491 cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
1495 static void addr_cmd_complete(struct pending_cmd *cmd, u8 status)
1497 cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
1498 sizeof(struct mgmt_addr_info));
1501 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1503 if (!lmp_bredr_capable(hdev))
1504 return MGMT_STATUS_NOT_SUPPORTED;
1505 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1506 return MGMT_STATUS_REJECTED;
1508 return MGMT_STATUS_SUCCESS;
1511 static u8 mgmt_le_support(struct hci_dev *hdev)
1513 if (!lmp_le_capable(hdev))
1514 return MGMT_STATUS_NOT_SUPPORTED;
1515 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1516 return MGMT_STATUS_REJECTED;
1518 return MGMT_STATUS_SUCCESS;
1521 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1523 struct pending_cmd *cmd;
1524 struct mgmt_mode *cp;
1525 struct hci_request req;
1528 BT_DBG("status 0x%02x", status);
1532 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1537 u8 mgmt_err = mgmt_status(status);
1538 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1539 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1545 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1548 if (hdev->discov_timeout > 0) {
1549 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1550 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1554 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1558 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1561 new_settings(hdev, cmd->sk);
1563 /* When the discoverable mode gets changed, make sure
1564 * that class of device has the limited discoverable
1565 * bit correctly set. Also update page scan based on whitelist
1568 hci_req_init(&req, hdev);
1569 hci_update_page_scan(hdev, &req);
1571 hci_req_run(&req, NULL);
1574 mgmt_pending_remove(cmd);
1577 hci_dev_unlock(hdev);
1580 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1583 struct mgmt_cp_set_discoverable *cp = data;
1584 struct pending_cmd *cmd;
1585 struct hci_request req;
1590 BT_DBG("request for %s", hdev->name);
1592 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1593 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1594 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1595 MGMT_STATUS_REJECTED);
1597 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1598 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1599 MGMT_STATUS_INVALID_PARAMS);
1601 timeout = __le16_to_cpu(cp->timeout);
1603 /* Disabling discoverable requires that no timeout is set,
1604 * and enabling limited discoverable requires a timeout.
1606 if ((cp->val == 0x00 && timeout > 0) ||
1607 (cp->val == 0x02 && timeout == 0))
1608 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1609 MGMT_STATUS_INVALID_PARAMS);
1613 if (!hdev_is_powered(hdev) && timeout > 0) {
1614 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1615 MGMT_STATUS_NOT_POWERED);
1619 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1620 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1621 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1626 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1627 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1628 MGMT_STATUS_REJECTED);
1632 if (!hdev_is_powered(hdev)) {
1633 bool changed = false;
1635 /* Setting limited discoverable when powered off is
1636 * not a valid operation since it requires a timeout
1637 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1639 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1640 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1644 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1649 err = new_settings(hdev, sk);
1654 /* If the current mode is the same, then just update the timeout
1655 * value with the new value. And if only the timeout gets updated,
1656 * then no need for any HCI transactions.
1658 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1659 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1660 &hdev->dev_flags)) {
1661 cancel_delayed_work(&hdev->discov_off);
1662 hdev->discov_timeout = timeout;
1664 if (cp->val && hdev->discov_timeout > 0) {
1665 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1666 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1670 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1674 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1680 /* Cancel any potential discoverable timeout that might be
1681 * still active and store new timeout value. The arming of
1682 * the timeout happens in the complete handler.
1684 cancel_delayed_work(&hdev->discov_off);
1685 hdev->discov_timeout = timeout;
1687 /* Limited discoverable mode */
1688 if (cp->val == 0x02)
1689 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1691 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1693 hci_req_init(&req, hdev);
1695 /* The procedure for LE-only controllers is much simpler - just
1696 * update the advertising data.
1698 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1704 struct hci_cp_write_current_iac_lap hci_cp;
1706 if (cp->val == 0x02) {
1707 /* Limited discoverable mode */
1708 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1709 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1710 hci_cp.iac_lap[1] = 0x8b;
1711 hci_cp.iac_lap[2] = 0x9e;
1712 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1713 hci_cp.iac_lap[4] = 0x8b;
1714 hci_cp.iac_lap[5] = 0x9e;
1716 /* General discoverable mode */
1718 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1719 hci_cp.iac_lap[1] = 0x8b;
1720 hci_cp.iac_lap[2] = 0x9e;
1723 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1724 (hci_cp.num_iac * 3) + 1, &hci_cp);
1726 scan |= SCAN_INQUIRY;
1728 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1731 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1734 update_adv_data(&req);
1736 err = hci_req_run(&req, set_discoverable_complete);
1738 mgmt_pending_remove(cmd);
1741 hci_dev_unlock(hdev);
1745 static void write_fast_connectable(struct hci_request *req, bool enable)
1747 struct hci_dev *hdev = req->hdev;
1748 struct hci_cp_write_page_scan_activity acp;
1751 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1754 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1758 type = PAGE_SCAN_TYPE_INTERLACED;
1760 /* 160 msec page scan interval */
1761 acp.interval = cpu_to_le16(0x0100);
1763 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1765 /* default 1.28 sec page scan */
1766 acp.interval = cpu_to_le16(0x0800);
1769 acp.window = cpu_to_le16(0x0012);
1771 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1772 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1773 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1776 if (hdev->page_scan_type != type)
1777 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1780 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1782 struct pending_cmd *cmd;
1783 struct mgmt_mode *cp;
1784 bool conn_changed, discov_changed;
1786 BT_DBG("status 0x%02x", status);
1790 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1795 u8 mgmt_err = mgmt_status(status);
1796 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1802 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1804 discov_changed = false;
1806 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1808 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1812 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1814 if (conn_changed || discov_changed) {
1815 new_settings(hdev, cmd->sk);
1816 hci_update_page_scan(hdev, NULL);
1818 mgmt_update_adv_data(hdev);
1819 hci_update_background_scan(hdev);
1823 mgmt_pending_remove(cmd);
1826 hci_dev_unlock(hdev);
1829 static int set_connectable_update_settings(struct hci_dev *hdev,
1830 struct sock *sk, u8 val)
1832 bool changed = false;
1835 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1839 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1841 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1842 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1845 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1850 hci_update_page_scan(hdev, NULL);
1851 hci_update_background_scan(hdev);
1852 return new_settings(hdev, sk);
1858 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1861 struct mgmt_mode *cp = data;
1862 struct pending_cmd *cmd;
1863 struct hci_request req;
1867 BT_DBG("request for %s", hdev->name);
1869 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1870 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1871 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1872 MGMT_STATUS_REJECTED);
1874 if (cp->val != 0x00 && cp->val != 0x01)
1875 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1876 MGMT_STATUS_INVALID_PARAMS);
1880 if (!hdev_is_powered(hdev)) {
1881 err = set_connectable_update_settings(hdev, sk, cp->val);
1885 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1886 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1887 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1892 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1898 hci_req_init(&req, hdev);
1900 /* If BR/EDR is not enabled and we disable advertising as a
1901 * by-product of disabling connectable, we need to update the
1902 * advertising flags.
1904 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1906 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1907 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1909 update_adv_data(&req);
1910 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1914 /* If we don't have any whitelist entries just
1915 * disable all scanning. If there are entries
1916 * and we had both page and inquiry scanning
1917 * enabled then fall back to only page scanning.
1918 * Otherwise no changes are needed.
1920 if (list_empty(&hdev->whitelist))
1921 scan = SCAN_DISABLED;
1922 else if (test_bit(HCI_ISCAN, &hdev->flags))
1925 goto no_scan_update;
1927 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1928 hdev->discov_timeout > 0)
1929 cancel_delayed_work(&hdev->discov_off);
1932 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1936 /* If we're going from non-connectable to connectable or
1937 * vice-versa when fast connectable is enabled ensure that fast
1938 * connectable gets disabled. write_fast_connectable won't do
1939 * anything if the page scan parameters are already what they
1942 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1943 write_fast_connectable(&req, false);
1945 /* Update the advertising parameters if necessary */
1946 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1947 enable_advertising(&req);
1949 err = hci_req_run(&req, set_connectable_complete);
1951 mgmt_pending_remove(cmd);
1952 if (err == -ENODATA)
1953 err = set_connectable_update_settings(hdev, sk,
1959 hci_dev_unlock(hdev);
1963 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1966 struct mgmt_mode *cp = data;
1970 BT_DBG("request for %s", hdev->name);
1972 if (cp->val != 0x00 && cp->val != 0x01)
1973 return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1974 MGMT_STATUS_INVALID_PARAMS);
1979 changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
1981 changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1983 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1988 err = new_settings(hdev, sk);
1991 hci_dev_unlock(hdev);
1995 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1998 struct mgmt_mode *cp = data;
1999 struct pending_cmd *cmd;
2003 BT_DBG("request for %s", hdev->name);
2005 status = mgmt_bredr_support(hdev);
2007 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2010 if (cp->val != 0x00 && cp->val != 0x01)
2011 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2012 MGMT_STATUS_INVALID_PARAMS);
2016 if (!hdev_is_powered(hdev)) {
2017 bool changed = false;
2019 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
2020 &hdev->dev_flags)) {
2021 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
2025 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2030 err = new_settings(hdev, sk);
2035 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2036 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2043 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2044 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2048 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2054 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2056 mgmt_pending_remove(cmd);
2061 hci_dev_unlock(hdev);
2065 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2067 struct mgmt_mode *cp = data;
2068 struct pending_cmd *cmd;
2072 BT_DBG("request for %s", hdev->name);
2074 status = mgmt_bredr_support(hdev);
2076 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2078 if (!lmp_ssp_capable(hdev))
2079 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2080 MGMT_STATUS_NOT_SUPPORTED);
2082 if (cp->val != 0x00 && cp->val != 0x01)
2083 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2084 MGMT_STATUS_INVALID_PARAMS);
2088 if (!hdev_is_powered(hdev)) {
2092 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2095 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2098 changed = test_and_clear_bit(HCI_HS_ENABLED,
2101 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2104 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2109 err = new_settings(hdev, sk);
2114 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2115 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2116 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2121 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2122 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2126 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2132 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2133 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2134 sizeof(cp->val), &cp->val);
2136 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2138 mgmt_pending_remove(cmd);
2143 hci_dev_unlock(hdev);
2147 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2149 struct mgmt_mode *cp = data;
2154 BT_DBG("request for %s", hdev->name);
2156 status = mgmt_bredr_support(hdev);
2158 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2160 if (!lmp_ssp_capable(hdev))
2161 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2162 MGMT_STATUS_NOT_SUPPORTED);
2164 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2165 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2166 MGMT_STATUS_REJECTED);
2168 if (cp->val != 0x00 && cp->val != 0x01)
2169 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2170 MGMT_STATUS_INVALID_PARAMS);
2175 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2177 if (hdev_is_powered(hdev)) {
2178 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2179 MGMT_STATUS_REJECTED);
2183 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2186 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2191 err = new_settings(hdev, sk);
2194 hci_dev_unlock(hdev);
2198 static void le_enable_complete(struct hci_dev *hdev, u8 status)
2200 struct cmd_lookup match = { NULL, hdev };
2205 u8 mgmt_err = mgmt_status(status);
2207 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2212 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2214 new_settings(hdev, match.sk);
2219 /* Make sure the controller has a good default for
2220 * advertising data. Restrict the update to when LE
2221 * has actually been enabled. During power on, the
2222 * update in powered_update_hci will take care of it.
2224 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2225 struct hci_request req;
2227 hci_req_init(&req, hdev);
2228 update_adv_data(&req);
2229 update_scan_rsp_data(&req);
2230 hci_req_run(&req, NULL);
2232 hci_update_background_scan(hdev);
2236 hci_dev_unlock(hdev);
2239 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2241 struct mgmt_mode *cp = data;
2242 struct hci_cp_write_le_host_supported hci_cp;
2243 struct pending_cmd *cmd;
2244 struct hci_request req;
2248 BT_DBG("request for %s", hdev->name);
2250 if (!lmp_le_capable(hdev))
2251 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2252 MGMT_STATUS_NOT_SUPPORTED);
2254 if (cp->val != 0x00 && cp->val != 0x01)
2255 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2256 MGMT_STATUS_INVALID_PARAMS);
2258 /* LE-only devices do not allow toggling LE on/off */
2259 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2260 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2261 MGMT_STATUS_REJECTED);
2266 enabled = lmp_host_le_capable(hdev);
2268 if (!hdev_is_powered(hdev) || val == enabled) {
2269 bool changed = false;
2271 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2272 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2276 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2277 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2281 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2286 err = new_settings(hdev, sk);
2291 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2292 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2293 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2298 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2304 hci_req_init(&req, hdev);
2306 memset(&hci_cp, 0, sizeof(hci_cp));
2310 hci_cp.simul = 0x00;
2312 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2313 disable_advertising(&req);
2316 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2319 err = hci_req_run(&req, le_enable_complete);
2321 mgmt_pending_remove(cmd);
2324 hci_dev_unlock(hdev);
2328 /* This is a helper function to test for pending mgmt commands that can
2329 * cause CoD or EIR HCI commands. We can only allow one such pending
2330 * mgmt command at a time since otherwise we cannot easily track what
2331 * the current values are, will be, and based on that calculate if a new
2332 * HCI command needs to be sent and if yes with what value.
2334 static bool pending_eir_or_class(struct hci_dev *hdev)
2336 struct pending_cmd *cmd;
2338 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2339 switch (cmd->opcode) {
2340 case MGMT_OP_ADD_UUID:
2341 case MGMT_OP_REMOVE_UUID:
2342 case MGMT_OP_SET_DEV_CLASS:
2343 case MGMT_OP_SET_POWERED:
2351 static const u8 bluetooth_base_uuid[] = {
2352 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2353 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2356 static u8 get_uuid_size(const u8 *uuid)
2360 if (memcmp(uuid, bluetooth_base_uuid, 12))
2363 val = get_unaligned_le32(&uuid[12]);
2370 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2372 struct pending_cmd *cmd;
2376 cmd = mgmt_pending_find(mgmt_op, hdev);
2380 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2381 hdev->dev_class, 3);
2383 mgmt_pending_remove(cmd);
2386 hci_dev_unlock(hdev);
2389 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2391 BT_DBG("status 0x%02x", status);
2393 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2396 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2398 struct mgmt_cp_add_uuid *cp = data;
2399 struct pending_cmd *cmd;
2400 struct hci_request req;
2401 struct bt_uuid *uuid;
2404 BT_DBG("request for %s", hdev->name);
2408 if (pending_eir_or_class(hdev)) {
2409 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2414 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2420 memcpy(uuid->uuid, cp->uuid, 16);
2421 uuid->svc_hint = cp->svc_hint;
2422 uuid->size = get_uuid_size(cp->uuid);
2424 list_add_tail(&uuid->list, &hdev->uuids);
2426 hci_req_init(&req, hdev);
2431 err = hci_req_run(&req, add_uuid_complete);
2433 if (err != -ENODATA)
2436 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2437 hdev->dev_class, 3);
2441 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2450 hci_dev_unlock(hdev);
2454 static bool enable_service_cache(struct hci_dev *hdev)
2456 if (!hdev_is_powered(hdev))
2459 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2460 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2468 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2470 BT_DBG("status 0x%02x", status);
2472 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2475 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2478 struct mgmt_cp_remove_uuid *cp = data;
2479 struct pending_cmd *cmd;
2480 struct bt_uuid *match, *tmp;
2481 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2482 struct hci_request req;
2485 BT_DBG("request for %s", hdev->name);
2489 if (pending_eir_or_class(hdev)) {
2490 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2495 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2496 hci_uuids_clear(hdev);
2498 if (enable_service_cache(hdev)) {
2499 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2500 0, hdev->dev_class, 3);
2509 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2510 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2513 list_del(&match->list);
2519 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2520 MGMT_STATUS_INVALID_PARAMS);
2525 hci_req_init(&req, hdev);
2530 err = hci_req_run(&req, remove_uuid_complete);
2532 if (err != -ENODATA)
2535 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2536 hdev->dev_class, 3);
2540 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2549 hci_dev_unlock(hdev);
2553 static void set_class_complete(struct hci_dev *hdev, u8 status)
2555 BT_DBG("status 0x%02x", status);
2557 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2560 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2563 struct mgmt_cp_set_dev_class *cp = data;
2564 struct pending_cmd *cmd;
2565 struct hci_request req;
2568 BT_DBG("request for %s", hdev->name);
2570 if (!lmp_bredr_capable(hdev))
2571 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2572 MGMT_STATUS_NOT_SUPPORTED);
2576 if (pending_eir_or_class(hdev)) {
2577 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2582 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2583 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2584 MGMT_STATUS_INVALID_PARAMS);
2588 hdev->major_class = cp->major;
2589 hdev->minor_class = cp->minor;
2591 if (!hdev_is_powered(hdev)) {
2592 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2593 hdev->dev_class, 3);
2597 hci_req_init(&req, hdev);
2599 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2600 hci_dev_unlock(hdev);
2601 cancel_delayed_work_sync(&hdev->service_cache);
2608 err = hci_req_run(&req, set_class_complete);
2610 if (err != -ENODATA)
2613 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2614 hdev->dev_class, 3);
2618 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2627 hci_dev_unlock(hdev);
2631 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2634 struct mgmt_cp_load_link_keys *cp = data;
2635 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2636 sizeof(struct mgmt_link_key_info));
2637 u16 key_count, expected_len;
2641 BT_DBG("request for %s", hdev->name);
2643 if (!lmp_bredr_capable(hdev))
2644 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2645 MGMT_STATUS_NOT_SUPPORTED);
2647 key_count = __le16_to_cpu(cp->key_count);
2648 if (key_count > max_key_count) {
2649 BT_ERR("load_link_keys: too big key_count value %u",
2651 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2652 MGMT_STATUS_INVALID_PARAMS);
2655 expected_len = sizeof(*cp) + key_count *
2656 sizeof(struct mgmt_link_key_info);
2657 if (expected_len != len) {
2658 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2660 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2661 MGMT_STATUS_INVALID_PARAMS);
2664 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2665 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2666 MGMT_STATUS_INVALID_PARAMS);
2668 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2671 for (i = 0; i < key_count; i++) {
2672 struct mgmt_link_key_info *key = &cp->keys[i];
2674 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2675 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2676 MGMT_STATUS_INVALID_PARAMS);
2681 hci_link_keys_clear(hdev);
2684 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2687 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2691 new_settings(hdev, NULL);
2693 for (i = 0; i < key_count; i++) {
2694 struct mgmt_link_key_info *key = &cp->keys[i];
2696 /* Always ignore debug keys and require a new pairing if
2697 * the user wants to use them.
2699 if (key->type == HCI_LK_DEBUG_COMBINATION)
2702 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2703 key->type, key->pin_len, NULL);
2706 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2708 hci_dev_unlock(hdev);
2713 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2714 u8 addr_type, struct sock *skip_sk)
2716 struct mgmt_ev_device_unpaired ev;
2718 bacpy(&ev.addr.bdaddr, bdaddr);
2719 ev.addr.type = addr_type;
2721 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2725 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2728 struct mgmt_cp_unpair_device *cp = data;
2729 struct mgmt_rp_unpair_device rp;
2730 struct hci_cp_disconnect dc;
2731 struct pending_cmd *cmd;
2732 struct hci_conn *conn;
2735 memset(&rp, 0, sizeof(rp));
2736 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2737 rp.addr.type = cp->addr.type;
2739 if (!bdaddr_type_is_valid(cp->addr.type))
2740 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2741 MGMT_STATUS_INVALID_PARAMS,
2744 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2745 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2746 MGMT_STATUS_INVALID_PARAMS,
2751 if (!hdev_is_powered(hdev)) {
2752 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2753 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2757 if (cp->addr.type == BDADDR_BREDR) {
2758 /* If disconnection is requested, then look up the
2759 * connection. If the remote device is connected, it
2760 * will be later used to terminate the link.
2762 * Setting it to NULL explicitly will cause no
2763 * termination of the link.
2766 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2771 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2775 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2778 /* Defer clearing up the connection parameters
2779 * until closing to give a chance of keeping
2780 * them if a repairing happens.
2782 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2784 /* If disconnection is not requested, then
2785 * clear the connection variable so that the
2786 * link is not terminated.
2788 if (!cp->disconnect)
2792 if (cp->addr.type == BDADDR_LE_PUBLIC)
2793 addr_type = ADDR_LE_DEV_PUBLIC;
2795 addr_type = ADDR_LE_DEV_RANDOM;
2797 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2799 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2803 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2804 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2808 /* If the connection variable is set, then termination of the
2809 * link is requested.
2812 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2814 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2818 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2825 cmd->cmd_complete = addr_cmd_complete;
2827 dc.handle = cpu_to_le16(conn->handle);
2828 dc.reason = 0x13; /* Remote User Terminated Connection */
2829 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2831 mgmt_pending_remove(cmd);
2834 hci_dev_unlock(hdev);
2838 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2841 struct mgmt_cp_disconnect *cp = data;
2842 struct mgmt_rp_disconnect rp;
2843 struct pending_cmd *cmd;
2844 struct hci_conn *conn;
2849 memset(&rp, 0, sizeof(rp));
2850 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2851 rp.addr.type = cp->addr.type;
2853 if (!bdaddr_type_is_valid(cp->addr.type))
2854 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2855 MGMT_STATUS_INVALID_PARAMS,
2860 if (!test_bit(HCI_UP, &hdev->flags)) {
2861 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2862 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2866 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2867 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2868 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2872 if (cp->addr.type == BDADDR_BREDR)
2873 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2876 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2878 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2879 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2880 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2884 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2890 cmd->cmd_complete = generic_cmd_complete;
2892 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2894 mgmt_pending_remove(cmd);
2897 hci_dev_unlock(hdev);
2901 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2903 switch (link_type) {
2905 switch (addr_type) {
2906 case ADDR_LE_DEV_PUBLIC:
2907 return BDADDR_LE_PUBLIC;
2910 /* Fallback to LE Random address type */
2911 return BDADDR_LE_RANDOM;
2915 /* Fallback to BR/EDR type */
2916 return BDADDR_BREDR;
2920 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2923 struct mgmt_rp_get_connections *rp;
2933 if (!hdev_is_powered(hdev)) {
2934 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2935 MGMT_STATUS_NOT_POWERED);
2940 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2941 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2945 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2946 rp = kmalloc(rp_len, GFP_KERNEL);
2953 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2954 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2956 bacpy(&rp->addr[i].bdaddr, &c->dst);
2957 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2958 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2963 rp->conn_count = cpu_to_le16(i);
2965 /* Recalculate length in case of filtered SCO connections, etc */
2966 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2968 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2974 hci_dev_unlock(hdev);
2978 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2979 struct mgmt_cp_pin_code_neg_reply *cp)
2981 struct pending_cmd *cmd;
2984 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2989 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2990 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2992 mgmt_pending_remove(cmd);
2997 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3000 struct hci_conn *conn;
3001 struct mgmt_cp_pin_code_reply *cp = data;
3002 struct hci_cp_pin_code_reply reply;
3003 struct pending_cmd *cmd;
3010 if (!hdev_is_powered(hdev)) {
3011 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3012 MGMT_STATUS_NOT_POWERED);
3016 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3018 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3019 MGMT_STATUS_NOT_CONNECTED);
3023 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3024 struct mgmt_cp_pin_code_neg_reply ncp;
3026 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3028 BT_ERR("PIN code is not 16 bytes long");
3030 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3032 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3033 MGMT_STATUS_INVALID_PARAMS);
3038 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3044 cmd->cmd_complete = addr_cmd_complete;
3046 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3047 reply.pin_len = cp->pin_len;
3048 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3050 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3052 mgmt_pending_remove(cmd);
3055 hci_dev_unlock(hdev);
3059 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3062 struct mgmt_cp_set_io_capability *cp = data;
3066 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3067 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3068 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3072 hdev->io_capability = cp->io_capability;
3074 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3075 hdev->io_capability);
3077 hci_dev_unlock(hdev);
3079 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
3083 static struct pending_cmd *find_pairing(struct hci_conn *conn)
3085 struct hci_dev *hdev = conn->hdev;
3086 struct pending_cmd *cmd;
3088 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3089 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3092 if (cmd->user_data != conn)
3101 static void pairing_complete(struct pending_cmd *cmd, u8 status)
3103 struct mgmt_rp_pair_device rp;
3104 struct hci_conn *conn = cmd->user_data;
3106 bacpy(&rp.addr.bdaddr, &conn->dst);
3107 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3109 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3112 /* So we don't get further callbacks for this connection */
3113 conn->connect_cfm_cb = NULL;
3114 conn->security_cfm_cb = NULL;
3115 conn->disconn_cfm_cb = NULL;
3117 hci_conn_drop(conn);
3120 mgmt_pending_remove(cmd);
3122 /* The device is paired so there is no need to remove
3123 * its connection parameters anymore.
3125 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3128 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3130 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3131 struct pending_cmd *cmd;
3133 cmd = find_pairing(conn);
3135 cmd->cmd_complete(cmd, status);
3138 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3140 struct pending_cmd *cmd;
3142 BT_DBG("status %u", status);
3144 cmd = find_pairing(conn);
3146 BT_DBG("Unable to find a pending command");
3148 cmd->cmd_complete(cmd, mgmt_status(status));
3151 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3153 struct pending_cmd *cmd;
3155 BT_DBG("status %u", status);
3160 cmd = find_pairing(conn);
3162 BT_DBG("Unable to find a pending command");
3164 cmd->cmd_complete(cmd, mgmt_status(status));
3167 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3170 struct mgmt_cp_pair_device *cp = data;
3171 struct mgmt_rp_pair_device rp;
3172 struct pending_cmd *cmd;
3173 u8 sec_level, auth_type;
3174 struct hci_conn *conn;
3179 memset(&rp, 0, sizeof(rp));
3180 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3181 rp.addr.type = cp->addr.type;
3183 if (!bdaddr_type_is_valid(cp->addr.type))
3184 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3185 MGMT_STATUS_INVALID_PARAMS,
3188 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3189 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3190 MGMT_STATUS_INVALID_PARAMS,
3195 if (!hdev_is_powered(hdev)) {
3196 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3197 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3201 sec_level = BT_SECURITY_MEDIUM;
3202 auth_type = HCI_AT_DEDICATED_BONDING;
3204 if (cp->addr.type == BDADDR_BREDR) {
3205 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3210 /* Convert from L2CAP channel address type to HCI address type
3212 if (cp->addr.type == BDADDR_LE_PUBLIC)
3213 addr_type = ADDR_LE_DEV_PUBLIC;
3215 addr_type = ADDR_LE_DEV_RANDOM;
3217 /* When pairing a new device, it is expected to remember
3218 * this device for future connections. Adding the connection
3219 * parameter information ahead of time allows tracking
3220 * of the slave preferred values and will speed up any
3221 * further connection establishment.
3223 * If connection parameters already exist, then they
3224 * will be kept and this function does nothing.
3226 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3228 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3229 sec_level, HCI_LE_CONN_TIMEOUT,
3236 if (PTR_ERR(conn) == -EBUSY)
3237 status = MGMT_STATUS_BUSY;
3239 status = MGMT_STATUS_CONNECT_FAILED;
3241 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3247 if (conn->connect_cfm_cb) {
3248 hci_conn_drop(conn);
3249 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3250 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3254 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3257 hci_conn_drop(conn);
3261 cmd->cmd_complete = pairing_complete;
3263 /* For LE, just connecting isn't a proof that the pairing finished */
3264 if (cp->addr.type == BDADDR_BREDR) {
3265 conn->connect_cfm_cb = pairing_complete_cb;
3266 conn->security_cfm_cb = pairing_complete_cb;
3267 conn->disconn_cfm_cb = pairing_complete_cb;
3269 conn->connect_cfm_cb = le_pairing_complete_cb;
3270 conn->security_cfm_cb = le_pairing_complete_cb;
3271 conn->disconn_cfm_cb = le_pairing_complete_cb;
3274 conn->io_capability = cp->io_cap;
3275 cmd->user_data = hci_conn_get(conn);
3277 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3278 hci_conn_security(conn, sec_level, auth_type, true))
3279 pairing_complete(cmd, 0);
3284 hci_dev_unlock(hdev);
3288 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3291 struct mgmt_addr_info *addr = data;
3292 struct pending_cmd *cmd;
3293 struct hci_conn *conn;
3300 if (!hdev_is_powered(hdev)) {
3301 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3302 MGMT_STATUS_NOT_POWERED);
3306 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3308 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3309 MGMT_STATUS_INVALID_PARAMS);
3313 conn = cmd->user_data;
3315 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3316 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3317 MGMT_STATUS_INVALID_PARAMS);
3321 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
3323 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3324 addr, sizeof(*addr));
3326 hci_dev_unlock(hdev);
3330 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3331 struct mgmt_addr_info *addr, u16 mgmt_op,
3332 u16 hci_op, __le32 passkey)
3334 struct pending_cmd *cmd;
3335 struct hci_conn *conn;
3340 if (!hdev_is_powered(hdev)) {
3341 err = cmd_complete(sk, hdev->id, mgmt_op,
3342 MGMT_STATUS_NOT_POWERED, addr,
3347 if (addr->type == BDADDR_BREDR)
3348 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3350 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3353 err = cmd_complete(sk, hdev->id, mgmt_op,
3354 MGMT_STATUS_NOT_CONNECTED, addr,
3359 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3360 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3362 err = cmd_complete(sk, hdev->id, mgmt_op,
3363 MGMT_STATUS_SUCCESS, addr,
3366 err = cmd_complete(sk, hdev->id, mgmt_op,
3367 MGMT_STATUS_FAILED, addr,
3373 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3379 cmd->cmd_complete = addr_cmd_complete;
3381 /* Continue with pairing via HCI */
3382 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3383 struct hci_cp_user_passkey_reply cp;
3385 bacpy(&cp.bdaddr, &addr->bdaddr);
3386 cp.passkey = passkey;
3387 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3389 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3393 mgmt_pending_remove(cmd);
3396 hci_dev_unlock(hdev);
3400 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3401 void *data, u16 len)
3403 struct mgmt_cp_pin_code_neg_reply *cp = data;
3407 return user_pairing_resp(sk, hdev, &cp->addr,
3408 MGMT_OP_PIN_CODE_NEG_REPLY,
3409 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3412 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3415 struct mgmt_cp_user_confirm_reply *cp = data;
3419 if (len != sizeof(*cp))
3420 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3421 MGMT_STATUS_INVALID_PARAMS);
3423 return user_pairing_resp(sk, hdev, &cp->addr,
3424 MGMT_OP_USER_CONFIRM_REPLY,
3425 HCI_OP_USER_CONFIRM_REPLY, 0);
3428 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3429 void *data, u16 len)
3431 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3435 return user_pairing_resp(sk, hdev, &cp->addr,
3436 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3437 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3440 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3443 struct mgmt_cp_user_passkey_reply *cp = data;
3447 return user_pairing_resp(sk, hdev, &cp->addr,
3448 MGMT_OP_USER_PASSKEY_REPLY,
3449 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3452 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3453 void *data, u16 len)
3455 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3459 return user_pairing_resp(sk, hdev, &cp->addr,
3460 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3461 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3464 static void update_name(struct hci_request *req)
3466 struct hci_dev *hdev = req->hdev;
3467 struct hci_cp_write_local_name cp;
3469 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3471 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3474 static void set_name_complete(struct hci_dev *hdev, u8 status)
3476 struct mgmt_cp_set_local_name *cp;
3477 struct pending_cmd *cmd;
3479 BT_DBG("status 0x%02x", status);
3483 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3490 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3491 mgmt_status(status));
3493 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3496 mgmt_pending_remove(cmd);
3499 hci_dev_unlock(hdev);
3502 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3505 struct mgmt_cp_set_local_name *cp = data;
3506 struct pending_cmd *cmd;
3507 struct hci_request req;
3514 /* If the old values are the same as the new ones just return a
3515 * direct command complete event.
3517 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3518 !memcmp(hdev->short_name, cp->short_name,
3519 sizeof(hdev->short_name))) {
3520 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3525 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3527 if (!hdev_is_powered(hdev)) {
3528 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3530 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3535 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3541 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3547 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3549 hci_req_init(&req, hdev);
3551 if (lmp_bredr_capable(hdev)) {
3556 /* The name is stored in the scan response data and so
3557 * no need to udpate the advertising data here.
3559 if (lmp_le_capable(hdev))
3560 update_scan_rsp_data(&req);
3562 err = hci_req_run(&req, set_name_complete);
3564 mgmt_pending_remove(cmd);
3567 hci_dev_unlock(hdev);
3571 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3572 void *data, u16 data_len)
3574 struct pending_cmd *cmd;
3577 BT_DBG("%s", hdev->name);
3581 if (!hdev_is_powered(hdev)) {
3582 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3583 MGMT_STATUS_NOT_POWERED);
3587 if (!lmp_ssp_capable(hdev)) {
3588 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3589 MGMT_STATUS_NOT_SUPPORTED);
3593 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3594 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3599 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3605 if (bredr_sc_enabled(hdev))
3606 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3609 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3612 mgmt_pending_remove(cmd);
3615 hci_dev_unlock(hdev);
3619 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3620 void *data, u16 len)
3624 BT_DBG("%s ", hdev->name);
3628 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3629 struct mgmt_cp_add_remote_oob_data *cp = data;
3632 if (cp->addr.type != BDADDR_BREDR) {
3633 err = cmd_complete(sk, hdev->id,
3634 MGMT_OP_ADD_REMOTE_OOB_DATA,
3635 MGMT_STATUS_INVALID_PARAMS,
3636 &cp->addr, sizeof(cp->addr));
3640 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3641 cp->addr.type, cp->hash,
3642 cp->rand, NULL, NULL);
3644 status = MGMT_STATUS_FAILED;
3646 status = MGMT_STATUS_SUCCESS;
3648 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3649 status, &cp->addr, sizeof(cp->addr));
3650 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3651 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3652 u8 *rand192, *hash192;
3655 if (cp->addr.type != BDADDR_BREDR) {
3656 err = cmd_complete(sk, hdev->id,
3657 MGMT_OP_ADD_REMOTE_OOB_DATA,
3658 MGMT_STATUS_INVALID_PARAMS,
3659 &cp->addr, sizeof(cp->addr));
3663 if (bdaddr_type_is_le(cp->addr.type)) {
3667 rand192 = cp->rand192;
3668 hash192 = cp->hash192;
3671 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3672 cp->addr.type, hash192, rand192,
3673 cp->hash256, cp->rand256);
3675 status = MGMT_STATUS_FAILED;
3677 status = MGMT_STATUS_SUCCESS;
3679 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3680 status, &cp->addr, sizeof(cp->addr));
3682 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3683 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3684 MGMT_STATUS_INVALID_PARAMS);
3688 hci_dev_unlock(hdev);
3692 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3693 void *data, u16 len)
3695 struct mgmt_cp_remove_remote_oob_data *cp = data;
3699 BT_DBG("%s", hdev->name);
3701 if (cp->addr.type != BDADDR_BREDR)
3702 return cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3703 MGMT_STATUS_INVALID_PARAMS,
3704 &cp->addr, sizeof(cp->addr));
3708 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3709 hci_remote_oob_data_clear(hdev);
3710 status = MGMT_STATUS_SUCCESS;
3714 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3716 status = MGMT_STATUS_INVALID_PARAMS;
3718 status = MGMT_STATUS_SUCCESS;
3721 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3722 status, &cp->addr, sizeof(cp->addr));
3724 hci_dev_unlock(hdev);
3728 static bool trigger_discovery(struct hci_request *req, u8 *status)
3730 struct hci_dev *hdev = req->hdev;
3731 struct hci_cp_le_set_scan_param param_cp;
3732 struct hci_cp_le_set_scan_enable enable_cp;
3733 struct hci_cp_inquiry inq_cp;
3734 /* General inquiry access code (GIAC) */
3735 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3739 switch (hdev->discovery.type) {
3740 case DISCOV_TYPE_BREDR:
3741 *status = mgmt_bredr_support(hdev);
3745 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3746 *status = MGMT_STATUS_BUSY;
3750 hci_inquiry_cache_flush(hdev);
3752 memset(&inq_cp, 0, sizeof(inq_cp));
3753 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3754 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3755 hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3758 case DISCOV_TYPE_LE:
3759 case DISCOV_TYPE_INTERLEAVED:
3760 *status = mgmt_le_support(hdev);
3764 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3765 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3766 *status = MGMT_STATUS_NOT_SUPPORTED;
3770 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3771 /* Don't let discovery abort an outgoing
3772 * connection attempt that's using directed
3775 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3777 *status = MGMT_STATUS_REJECTED;
3781 disable_advertising(req);
3784 /* If controller is scanning, it means the background scanning
3785 * is running. Thus, we should temporarily stop it in order to
3786 * set the discovery scanning parameters.
3788 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3789 hci_req_add_le_scan_disable(req);
3791 memset(¶m_cp, 0, sizeof(param_cp));
3793 /* All active scans will be done with either a resolvable
3794 * private address (when privacy feature has been enabled)
3795 * or non-resolvable private address.
3797 err = hci_update_random_address(req, true, &own_addr_type);
3799 *status = MGMT_STATUS_FAILED;
3803 param_cp.type = LE_SCAN_ACTIVE;
3804 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3805 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3806 param_cp.own_address_type = own_addr_type;
3807 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3810 memset(&enable_cp, 0, sizeof(enable_cp));
3811 enable_cp.enable = LE_SCAN_ENABLE;
3812 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3813 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3818 *status = MGMT_STATUS_INVALID_PARAMS;
3825 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3827 struct pending_cmd *cmd;
3828 unsigned long timeout;
3830 BT_DBG("status %d", status);
3834 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3836 cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3839 cmd->cmd_complete(cmd, mgmt_status(status));
3840 mgmt_pending_remove(cmd);
3844 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3848 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3850 switch (hdev->discovery.type) {
3851 case DISCOV_TYPE_LE:
3852 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3854 case DISCOV_TYPE_INTERLEAVED:
3855 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3857 case DISCOV_TYPE_BREDR:
3861 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3867 queue_delayed_work(hdev->workqueue,
3868 &hdev->le_scan_disable, timeout);
3871 hci_dev_unlock(hdev);
3874 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3875 void *data, u16 len)
3877 struct mgmt_cp_start_discovery *cp = data;
3878 struct pending_cmd *cmd;
3879 struct hci_request req;
3883 BT_DBG("%s", hdev->name);
3887 if (!hdev_is_powered(hdev)) {
3888 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3889 MGMT_STATUS_NOT_POWERED,
3890 &cp->type, sizeof(cp->type));
3894 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3895 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3896 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3897 MGMT_STATUS_BUSY, &cp->type,
3902 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
3908 cmd->cmd_complete = generic_cmd_complete;
3910 /* Clear the discovery filter first to free any previously
3911 * allocated memory for the UUID list.
3913 hci_discovery_filter_clear(hdev);
3915 hdev->discovery.type = cp->type;
3916 hdev->discovery.report_invalid_rssi = false;
3918 hci_req_init(&req, hdev);
3920 if (!trigger_discovery(&req, &status)) {
3921 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3922 status, &cp->type, sizeof(cp->type));
3923 mgmt_pending_remove(cmd);
3927 err = hci_req_run(&req, start_discovery_complete);
3929 mgmt_pending_remove(cmd);
3933 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3936 hci_dev_unlock(hdev);
3940 static void service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status)
3942 cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param, 1);
3945 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
3946 void *data, u16 len)
3948 struct mgmt_cp_start_service_discovery *cp = data;
3949 struct pending_cmd *cmd;
3950 struct hci_request req;
3951 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
3952 u16 uuid_count, expected_len;
3956 BT_DBG("%s", hdev->name);
3960 if (!hdev_is_powered(hdev)) {
3961 err = cmd_complete(sk, hdev->id,
3962 MGMT_OP_START_SERVICE_DISCOVERY,
3963 MGMT_STATUS_NOT_POWERED,
3964 &cp->type, sizeof(cp->type));
3968 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3969 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3970 err = cmd_complete(sk, hdev->id,
3971 MGMT_OP_START_SERVICE_DISCOVERY,
3972 MGMT_STATUS_BUSY, &cp->type,
3977 uuid_count = __le16_to_cpu(cp->uuid_count);
3978 if (uuid_count > max_uuid_count) {
3979 BT_ERR("service_discovery: too big uuid_count value %u",
3981 err = cmd_complete(sk, hdev->id,
3982 MGMT_OP_START_SERVICE_DISCOVERY,
3983 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3988 expected_len = sizeof(*cp) + uuid_count * 16;
3989 if (expected_len != len) {
3990 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
3992 err = cmd_complete(sk, hdev->id,
3993 MGMT_OP_START_SERVICE_DISCOVERY,
3994 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3999 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4006 cmd->cmd_complete = service_discovery_cmd_complete;
4008 /* Clear the discovery filter first to free any previously
4009 * allocated memory for the UUID list.
4011 hci_discovery_filter_clear(hdev);
4013 hdev->discovery.type = cp->type;
4014 hdev->discovery.rssi = cp->rssi;
4015 hdev->discovery.uuid_count = uuid_count;
4017 if (uuid_count > 0) {
4018 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4020 if (!hdev->discovery.uuids) {
4021 err = cmd_complete(sk, hdev->id,
4022 MGMT_OP_START_SERVICE_DISCOVERY,
4024 &cp->type, sizeof(cp->type));
4025 mgmt_pending_remove(cmd);
4030 hci_req_init(&req, hdev);
4032 if (!trigger_discovery(&req, &status)) {
4033 err = cmd_complete(sk, hdev->id,
4034 MGMT_OP_START_SERVICE_DISCOVERY,
4035 status, &cp->type, sizeof(cp->type));
4036 mgmt_pending_remove(cmd);
4040 err = hci_req_run(&req, start_discovery_complete);
4042 mgmt_pending_remove(cmd);
4046 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4049 hci_dev_unlock(hdev);
4053 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
4055 struct pending_cmd *cmd;
4057 BT_DBG("status %d", status);
4061 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4063 cmd->cmd_complete(cmd, mgmt_status(status));
4064 mgmt_pending_remove(cmd);
4068 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4070 hci_dev_unlock(hdev);
4073 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4076 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4077 struct pending_cmd *cmd;
4078 struct hci_request req;
4081 BT_DBG("%s", hdev->name);
4085 if (!hci_discovery_active(hdev)) {
4086 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4087 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4088 sizeof(mgmt_cp->type));
4092 if (hdev->discovery.type != mgmt_cp->type) {
4093 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4094 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
4095 sizeof(mgmt_cp->type));
4099 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4105 cmd->cmd_complete = generic_cmd_complete;
4107 hci_req_init(&req, hdev);
4109 hci_stop_discovery(&req);
4111 err = hci_req_run(&req, stop_discovery_complete);
4113 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4117 mgmt_pending_remove(cmd);
4119 /* If no HCI commands were sent we're done */
4120 if (err == -ENODATA) {
4121 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4122 &mgmt_cp->type, sizeof(mgmt_cp->type));
4123 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4127 hci_dev_unlock(hdev);
4131 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4134 struct mgmt_cp_confirm_name *cp = data;
4135 struct inquiry_entry *e;
4138 BT_DBG("%s", hdev->name);
4142 if (!hci_discovery_active(hdev)) {
4143 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4144 MGMT_STATUS_FAILED, &cp->addr,
4149 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4151 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4152 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4157 if (cp->name_known) {
4158 e->name_state = NAME_KNOWN;
4161 e->name_state = NAME_NEEDED;
4162 hci_inquiry_cache_update_resolve(hdev, e);
4165 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
4169 hci_dev_unlock(hdev);
4173 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4176 struct mgmt_cp_block_device *cp = data;
4180 BT_DBG("%s", hdev->name);
4182 if (!bdaddr_type_is_valid(cp->addr.type))
4183 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4184 MGMT_STATUS_INVALID_PARAMS,
4185 &cp->addr, sizeof(cp->addr));
4189 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4192 status = MGMT_STATUS_FAILED;
4196 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4198 status = MGMT_STATUS_SUCCESS;
4201 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4202 &cp->addr, sizeof(cp->addr));
4204 hci_dev_unlock(hdev);
4209 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4212 struct mgmt_cp_unblock_device *cp = data;
4216 BT_DBG("%s", hdev->name);
4218 if (!bdaddr_type_is_valid(cp->addr.type))
4219 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4220 MGMT_STATUS_INVALID_PARAMS,
4221 &cp->addr, sizeof(cp->addr));
4225 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4228 status = MGMT_STATUS_INVALID_PARAMS;
4232 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4234 status = MGMT_STATUS_SUCCESS;
4237 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4238 &cp->addr, sizeof(cp->addr));
4240 hci_dev_unlock(hdev);
4245 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4248 struct mgmt_cp_set_device_id *cp = data;
4249 struct hci_request req;
4253 BT_DBG("%s", hdev->name);
4255 source = __le16_to_cpu(cp->source);
4257 if (source > 0x0002)
4258 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4259 MGMT_STATUS_INVALID_PARAMS);
4263 hdev->devid_source = source;
4264 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4265 hdev->devid_product = __le16_to_cpu(cp->product);
4266 hdev->devid_version = __le16_to_cpu(cp->version);
4268 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4270 hci_req_init(&req, hdev);
4272 hci_req_run(&req, NULL);
4274 hci_dev_unlock(hdev);
4279 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
4281 struct cmd_lookup match = { NULL, hdev };
4286 u8 mgmt_err = mgmt_status(status);
4288 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4289 cmd_status_rsp, &mgmt_err);
4293 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4294 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4296 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4298 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4301 new_settings(hdev, match.sk);
4307 hci_dev_unlock(hdev);
4310 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4313 struct mgmt_mode *cp = data;
4314 struct pending_cmd *cmd;
4315 struct hci_request req;
4316 u8 val, enabled, status;
4319 BT_DBG("request for %s", hdev->name);
4321 status = mgmt_le_support(hdev);
4323 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4326 if (cp->val != 0x00 && cp->val != 0x01)
4327 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4328 MGMT_STATUS_INVALID_PARAMS);
4333 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4335 /* The following conditions are ones which mean that we should
4336 * not do any HCI communication but directly send a mgmt
4337 * response to user space (after toggling the flag if
4340 if (!hdev_is_powered(hdev) || val == enabled ||
4341 hci_conn_num(hdev, LE_LINK) > 0 ||
4342 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4343 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4344 bool changed = false;
4346 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4347 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4351 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4356 err = new_settings(hdev, sk);
4361 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4362 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4363 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4368 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4374 hci_req_init(&req, hdev);
4377 enable_advertising(&req);
4379 disable_advertising(&req);
4381 err = hci_req_run(&req, set_advertising_complete);
4383 mgmt_pending_remove(cmd);
4386 hci_dev_unlock(hdev);
4390 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4391 void *data, u16 len)
4393 struct mgmt_cp_set_static_address *cp = data;
4396 BT_DBG("%s", hdev->name);
4398 if (!lmp_le_capable(hdev))
4399 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4400 MGMT_STATUS_NOT_SUPPORTED);
4402 if (hdev_is_powered(hdev))
4403 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4404 MGMT_STATUS_REJECTED);
4406 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4407 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4408 return cmd_status(sk, hdev->id,
4409 MGMT_OP_SET_STATIC_ADDRESS,
4410 MGMT_STATUS_INVALID_PARAMS);
4412 /* Two most significant bits shall be set */
4413 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4414 return cmd_status(sk, hdev->id,
4415 MGMT_OP_SET_STATIC_ADDRESS,
4416 MGMT_STATUS_INVALID_PARAMS);
4421 bacpy(&hdev->static_addr, &cp->bdaddr);
4423 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4425 hci_dev_unlock(hdev);
4430 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4431 void *data, u16 len)
4433 struct mgmt_cp_set_scan_params *cp = data;
4434 __u16 interval, window;
4437 BT_DBG("%s", hdev->name);
4439 if (!lmp_le_capable(hdev))
4440 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4441 MGMT_STATUS_NOT_SUPPORTED);
4443 interval = __le16_to_cpu(cp->interval);
4445 if (interval < 0x0004 || interval > 0x4000)
4446 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4447 MGMT_STATUS_INVALID_PARAMS);
4449 window = __le16_to_cpu(cp->window);
4451 if (window < 0x0004 || window > 0x4000)
4452 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4453 MGMT_STATUS_INVALID_PARAMS);
4455 if (window > interval)
4456 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4457 MGMT_STATUS_INVALID_PARAMS);
4461 hdev->le_scan_interval = interval;
4462 hdev->le_scan_window = window;
4464 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4466 /* If background scan is running, restart it so new parameters are
4469 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4470 hdev->discovery.state == DISCOVERY_STOPPED) {
4471 struct hci_request req;
4473 hci_req_init(&req, hdev);
4475 hci_req_add_le_scan_disable(&req);
4476 hci_req_add_le_passive_scan(&req);
4478 hci_req_run(&req, NULL);
4481 hci_dev_unlock(hdev);
4486 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4488 struct pending_cmd *cmd;
4490 BT_DBG("status 0x%02x", status);
4494 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4499 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4500 mgmt_status(status));
4502 struct mgmt_mode *cp = cmd->param;
4505 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4507 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4509 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4510 new_settings(hdev, cmd->sk);
4513 mgmt_pending_remove(cmd);
4516 hci_dev_unlock(hdev);
4519 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4520 void *data, u16 len)
4522 struct mgmt_mode *cp = data;
4523 struct pending_cmd *cmd;
4524 struct hci_request req;
4527 BT_DBG("%s", hdev->name);
4529 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4530 hdev->hci_ver < BLUETOOTH_VER_1_2)
4531 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4532 MGMT_STATUS_NOT_SUPPORTED);
4534 if (cp->val != 0x00 && cp->val != 0x01)
4535 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4536 MGMT_STATUS_INVALID_PARAMS);
4538 if (!hdev_is_powered(hdev))
4539 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4540 MGMT_STATUS_NOT_POWERED);
4542 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4543 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4544 MGMT_STATUS_REJECTED);
4548 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4549 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4554 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4555 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4560 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4567 hci_req_init(&req, hdev);
4569 write_fast_connectable(&req, cp->val);
4571 err = hci_req_run(&req, fast_connectable_complete);
4573 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4574 MGMT_STATUS_FAILED);
4575 mgmt_pending_remove(cmd);
4579 hci_dev_unlock(hdev);
4584 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4586 struct pending_cmd *cmd;
4588 BT_DBG("status 0x%02x", status);
4592 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4597 u8 mgmt_err = mgmt_status(status);
4599 /* We need to restore the flag if related HCI commands
4602 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4604 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4606 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4607 new_settings(hdev, cmd->sk);
4610 mgmt_pending_remove(cmd);
4613 hci_dev_unlock(hdev);
4616 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4618 struct mgmt_mode *cp = data;
4619 struct pending_cmd *cmd;
4620 struct hci_request req;
4623 BT_DBG("request for %s", hdev->name);
4625 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4626 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4627 MGMT_STATUS_NOT_SUPPORTED);
4629 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4630 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4631 MGMT_STATUS_REJECTED);
4633 if (cp->val != 0x00 && cp->val != 0x01)
4634 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4635 MGMT_STATUS_INVALID_PARAMS);
4639 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4640 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4644 if (!hdev_is_powered(hdev)) {
4646 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4647 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4648 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4649 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4650 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4653 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4655 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4659 err = new_settings(hdev, sk);
4663 /* Reject disabling when powered on */
4665 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4666 MGMT_STATUS_REJECTED);
4670 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4671 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4676 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4682 /* We need to flip the bit already here so that update_adv_data
4683 * generates the correct flags.
4685 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4687 hci_req_init(&req, hdev);
4689 write_fast_connectable(&req, false);
4690 hci_update_page_scan(hdev, &req);
4692 /* Since only the advertising data flags will change, there
4693 * is no need to update the scan response data.
4695 update_adv_data(&req);
4697 err = hci_req_run(&req, set_bredr_complete);
4699 mgmt_pending_remove(cmd);
4702 hci_dev_unlock(hdev);
4706 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4707 void *data, u16 len)
4709 struct mgmt_mode *cp = data;
4710 struct pending_cmd *cmd;
4714 BT_DBG("request for %s", hdev->name);
4716 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4717 !lmp_sc_capable(hdev) && !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4718 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4719 MGMT_STATUS_NOT_SUPPORTED);
4721 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4722 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4723 MGMT_STATUS_INVALID_PARAMS);
4727 if (!hdev_is_powered(hdev) ||
4728 (!lmp_sc_capable(hdev) &&
4729 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) ||
4730 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4734 changed = !test_and_set_bit(HCI_SC_ENABLED,
4736 if (cp->val == 0x02)
4737 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4739 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4741 changed = test_and_clear_bit(HCI_SC_ENABLED,
4743 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4746 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4751 err = new_settings(hdev, sk);
4756 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4757 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4764 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4765 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4766 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4770 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4776 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4778 mgmt_pending_remove(cmd);
4782 if (cp->val == 0x02)
4783 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4785 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4788 hci_dev_unlock(hdev);
4792 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4793 void *data, u16 len)
4795 struct mgmt_mode *cp = data;
4796 bool changed, use_changed;
4799 BT_DBG("request for %s", hdev->name);
4801 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4802 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4803 MGMT_STATUS_INVALID_PARAMS);
4808 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4811 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4814 if (cp->val == 0x02)
4815 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4818 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4821 if (hdev_is_powered(hdev) && use_changed &&
4822 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4823 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4824 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4825 sizeof(mode), &mode);
4828 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4833 err = new_settings(hdev, sk);
4836 hci_dev_unlock(hdev);
4840 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4843 struct mgmt_cp_set_privacy *cp = cp_data;
4847 BT_DBG("request for %s", hdev->name);
4849 if (!lmp_le_capable(hdev))
4850 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4851 MGMT_STATUS_NOT_SUPPORTED);
4853 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4854 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4855 MGMT_STATUS_INVALID_PARAMS);
4857 if (hdev_is_powered(hdev))
4858 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4859 MGMT_STATUS_REJECTED);
4863 /* If user space supports this command it is also expected to
4864 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4866 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4869 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4870 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4871 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4873 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4874 memset(hdev->irk, 0, sizeof(hdev->irk));
4875 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4878 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4883 err = new_settings(hdev, sk);
4886 hci_dev_unlock(hdev);
4890 static bool irk_is_valid(struct mgmt_irk_info *irk)
4892 switch (irk->addr.type) {
4893 case BDADDR_LE_PUBLIC:
4896 case BDADDR_LE_RANDOM:
4897 /* Two most significant bits shall be set */
4898 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4906 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4909 struct mgmt_cp_load_irks *cp = cp_data;
4910 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4911 sizeof(struct mgmt_irk_info));
4912 u16 irk_count, expected_len;
4915 BT_DBG("request for %s", hdev->name);
4917 if (!lmp_le_capable(hdev))
4918 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4919 MGMT_STATUS_NOT_SUPPORTED);
4921 irk_count = __le16_to_cpu(cp->irk_count);
4922 if (irk_count > max_irk_count) {
4923 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4924 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4925 MGMT_STATUS_INVALID_PARAMS);
4928 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4929 if (expected_len != len) {
4930 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4932 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4933 MGMT_STATUS_INVALID_PARAMS);
4936 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4938 for (i = 0; i < irk_count; i++) {
4939 struct mgmt_irk_info *key = &cp->irks[i];
4941 if (!irk_is_valid(key))
4942 return cmd_status(sk, hdev->id,
4944 MGMT_STATUS_INVALID_PARAMS);
4949 hci_smp_irks_clear(hdev);
4951 for (i = 0; i < irk_count; i++) {
4952 struct mgmt_irk_info *irk = &cp->irks[i];
4955 if (irk->addr.type == BDADDR_LE_PUBLIC)
4956 addr_type = ADDR_LE_DEV_PUBLIC;
4958 addr_type = ADDR_LE_DEV_RANDOM;
4960 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4964 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4966 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4968 hci_dev_unlock(hdev);
4973 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4975 if (key->master != 0x00 && key->master != 0x01)
4978 switch (key->addr.type) {
4979 case BDADDR_LE_PUBLIC:
4982 case BDADDR_LE_RANDOM:
4983 /* Two most significant bits shall be set */
4984 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4992 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4993 void *cp_data, u16 len)
4995 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4996 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4997 sizeof(struct mgmt_ltk_info));
4998 u16 key_count, expected_len;
5001 BT_DBG("request for %s", hdev->name);
5003 if (!lmp_le_capable(hdev))
5004 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5005 MGMT_STATUS_NOT_SUPPORTED);
5007 key_count = __le16_to_cpu(cp->key_count);
5008 if (key_count > max_key_count) {
5009 BT_ERR("load_ltks: too big key_count value %u", key_count);
5010 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5011 MGMT_STATUS_INVALID_PARAMS);
5014 expected_len = sizeof(*cp) + key_count *
5015 sizeof(struct mgmt_ltk_info);
5016 if (expected_len != len) {
5017 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5019 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5020 MGMT_STATUS_INVALID_PARAMS);
5023 BT_DBG("%s key_count %u", hdev->name, key_count);
5025 for (i = 0; i < key_count; i++) {
5026 struct mgmt_ltk_info *key = &cp->keys[i];
5028 if (!ltk_is_valid(key))
5029 return cmd_status(sk, hdev->id,
5030 MGMT_OP_LOAD_LONG_TERM_KEYS,
5031 MGMT_STATUS_INVALID_PARAMS);
5036 hci_smp_ltks_clear(hdev);
5038 for (i = 0; i < key_count; i++) {
5039 struct mgmt_ltk_info *key = &cp->keys[i];
5040 u8 type, addr_type, authenticated;
5042 if (key->addr.type == BDADDR_LE_PUBLIC)
5043 addr_type = ADDR_LE_DEV_PUBLIC;
5045 addr_type = ADDR_LE_DEV_RANDOM;
5047 switch (key->type) {
5048 case MGMT_LTK_UNAUTHENTICATED:
5049 authenticated = 0x00;
5050 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5052 case MGMT_LTK_AUTHENTICATED:
5053 authenticated = 0x01;
5054 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5056 case MGMT_LTK_P256_UNAUTH:
5057 authenticated = 0x00;
5058 type = SMP_LTK_P256;
5060 case MGMT_LTK_P256_AUTH:
5061 authenticated = 0x01;
5062 type = SMP_LTK_P256;
5064 case MGMT_LTK_P256_DEBUG:
5065 authenticated = 0x00;
5066 type = SMP_LTK_P256_DEBUG;
5071 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5072 authenticated, key->val, key->enc_size, key->ediv,
5076 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5079 hci_dev_unlock(hdev);
5084 static void conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5086 struct hci_conn *conn = cmd->user_data;
5087 struct mgmt_rp_get_conn_info rp;
5089 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5091 if (status == MGMT_STATUS_SUCCESS) {
5092 rp.rssi = conn->rssi;
5093 rp.tx_power = conn->tx_power;
5094 rp.max_tx_power = conn->max_tx_power;
5096 rp.rssi = HCI_RSSI_INVALID;
5097 rp.tx_power = HCI_TX_POWER_INVALID;
5098 rp.max_tx_power = HCI_TX_POWER_INVALID;
5101 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
5104 hci_conn_drop(conn);
5108 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status)
5110 struct hci_cp_read_rssi *cp;
5111 struct pending_cmd *cmd;
5112 struct hci_conn *conn;
5116 BT_DBG("status 0x%02x", hci_status);
5120 /* Commands sent in request are either Read RSSI or Read Transmit Power
5121 * Level so we check which one was last sent to retrieve connection
5122 * handle. Both commands have handle as first parameter so it's safe to
5123 * cast data on the same command struct.
5125 * First command sent is always Read RSSI and we fail only if it fails.
5126 * In other case we simply override error to indicate success as we
5127 * already remembered if TX power value is actually valid.
5129 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5131 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5132 status = MGMT_STATUS_SUCCESS;
5134 status = mgmt_status(hci_status);
5138 BT_ERR("invalid sent_cmd in conn_info response");
5142 handle = __le16_to_cpu(cp->handle);
5143 conn = hci_conn_hash_lookup_handle(hdev, handle);
5145 BT_ERR("unknown handle (%d) in conn_info response", handle);
5149 cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5153 cmd->cmd_complete(cmd, status);
5154 mgmt_pending_remove(cmd);
5157 hci_dev_unlock(hdev);
5160 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5163 struct mgmt_cp_get_conn_info *cp = data;
5164 struct mgmt_rp_get_conn_info rp;
5165 struct hci_conn *conn;
5166 unsigned long conn_info_age;
5169 BT_DBG("%s", hdev->name);
5171 memset(&rp, 0, sizeof(rp));
5172 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5173 rp.addr.type = cp->addr.type;
5175 if (!bdaddr_type_is_valid(cp->addr.type))
5176 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5177 MGMT_STATUS_INVALID_PARAMS,
5182 if (!hdev_is_powered(hdev)) {
5183 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5184 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5188 if (cp->addr.type == BDADDR_BREDR)
5189 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5192 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5194 if (!conn || conn->state != BT_CONNECTED) {
5195 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5196 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
5200 if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5201 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5202 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5206 /* To avoid client trying to guess when to poll again for information we
5207 * calculate conn info age as random value between min/max set in hdev.
5209 conn_info_age = hdev->conn_info_min_age +
5210 prandom_u32_max(hdev->conn_info_max_age -
5211 hdev->conn_info_min_age);
5213 /* Query controller to refresh cached values if they are too old or were
5216 if (time_after(jiffies, conn->conn_info_timestamp +
5217 msecs_to_jiffies(conn_info_age)) ||
5218 !conn->conn_info_timestamp) {
5219 struct hci_request req;
5220 struct hci_cp_read_tx_power req_txp_cp;
5221 struct hci_cp_read_rssi req_rssi_cp;
5222 struct pending_cmd *cmd;
5224 hci_req_init(&req, hdev);
5225 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5226 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5229 /* For LE links TX power does not change thus we don't need to
5230 * query for it once value is known.
5232 if (!bdaddr_type_is_le(cp->addr.type) ||
5233 conn->tx_power == HCI_TX_POWER_INVALID) {
5234 req_txp_cp.handle = cpu_to_le16(conn->handle);
5235 req_txp_cp.type = 0x00;
5236 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5237 sizeof(req_txp_cp), &req_txp_cp);
5240 /* Max TX power needs to be read only once per connection */
5241 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5242 req_txp_cp.handle = cpu_to_le16(conn->handle);
5243 req_txp_cp.type = 0x01;
5244 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5245 sizeof(req_txp_cp), &req_txp_cp);
5248 err = hci_req_run(&req, conn_info_refresh_complete);
5252 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5259 hci_conn_hold(conn);
5260 cmd->user_data = hci_conn_get(conn);
5261 cmd->cmd_complete = conn_info_cmd_complete;
5263 conn->conn_info_timestamp = jiffies;
5265 /* Cache is valid, just reply with values cached in hci_conn */
5266 rp.rssi = conn->rssi;
5267 rp.tx_power = conn->tx_power;
5268 rp.max_tx_power = conn->max_tx_power;
5270 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5271 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5275 hci_dev_unlock(hdev);
5279 static void clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5281 struct hci_conn *conn = cmd->user_data;
5282 struct mgmt_rp_get_clock_info rp;
5283 struct hci_dev *hdev;
5285 memset(&rp, 0, sizeof(rp));
5286 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5291 hdev = hci_dev_get(cmd->index);
5293 rp.local_clock = cpu_to_le32(hdev->clock);
5298 rp.piconet_clock = cpu_to_le32(conn->clock);
5299 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5303 cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp, sizeof(rp));
5306 hci_conn_drop(conn);
5311 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
5313 struct hci_cp_read_clock *hci_cp;
5314 struct pending_cmd *cmd;
5315 struct hci_conn *conn;
5317 BT_DBG("%s status %u", hdev->name, status);
5321 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5325 if (hci_cp->which) {
5326 u16 handle = __le16_to_cpu(hci_cp->handle);
5327 conn = hci_conn_hash_lookup_handle(hdev, handle);
5332 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5336 cmd->cmd_complete(cmd, mgmt_status(status));
5337 mgmt_pending_remove(cmd);
5340 hci_dev_unlock(hdev);
5343 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5346 struct mgmt_cp_get_clock_info *cp = data;
5347 struct mgmt_rp_get_clock_info rp;
5348 struct hci_cp_read_clock hci_cp;
5349 struct pending_cmd *cmd;
5350 struct hci_request req;
5351 struct hci_conn *conn;
5354 BT_DBG("%s", hdev->name);
5356 memset(&rp, 0, sizeof(rp));
5357 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5358 rp.addr.type = cp->addr.type;
5360 if (cp->addr.type != BDADDR_BREDR)
5361 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5362 MGMT_STATUS_INVALID_PARAMS,
5367 if (!hdev_is_powered(hdev)) {
5368 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5369 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5373 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5374 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5376 if (!conn || conn->state != BT_CONNECTED) {
5377 err = cmd_complete(sk, hdev->id,
5378 MGMT_OP_GET_CLOCK_INFO,
5379 MGMT_STATUS_NOT_CONNECTED,
5387 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5393 cmd->cmd_complete = clock_info_cmd_complete;
5395 hci_req_init(&req, hdev);
5397 memset(&hci_cp, 0, sizeof(hci_cp));
5398 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5401 hci_conn_hold(conn);
5402 cmd->user_data = hci_conn_get(conn);
5404 hci_cp.handle = cpu_to_le16(conn->handle);
5405 hci_cp.which = 0x01; /* Piconet clock */
5406 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5409 err = hci_req_run(&req, get_clock_info_complete);
5411 mgmt_pending_remove(cmd);
5414 hci_dev_unlock(hdev);
5418 static void device_added(struct sock *sk, struct hci_dev *hdev,
5419 bdaddr_t *bdaddr, u8 type, u8 action)
5421 struct mgmt_ev_device_added ev;
5423 bacpy(&ev.addr.bdaddr, bdaddr);
5424 ev.addr.type = type;
5427 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5430 static int add_device(struct sock *sk, struct hci_dev *hdev,
5431 void *data, u16 len)
5433 struct mgmt_cp_add_device *cp = data;
5434 u8 auto_conn, addr_type;
5437 BT_DBG("%s", hdev->name);
5439 if (!bdaddr_type_is_valid(cp->addr.type) ||
5440 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5441 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5442 MGMT_STATUS_INVALID_PARAMS,
5443 &cp->addr, sizeof(cp->addr));
5445 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5446 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5447 MGMT_STATUS_INVALID_PARAMS,
5448 &cp->addr, sizeof(cp->addr));
5452 if (cp->addr.type == BDADDR_BREDR) {
5453 /* Only incoming connections action is supported for now */
5454 if (cp->action != 0x01) {
5455 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5456 MGMT_STATUS_INVALID_PARAMS,
5457 &cp->addr, sizeof(cp->addr));
5461 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5466 hci_update_page_scan(hdev, NULL);
5471 if (cp->addr.type == BDADDR_LE_PUBLIC)
5472 addr_type = ADDR_LE_DEV_PUBLIC;
5474 addr_type = ADDR_LE_DEV_RANDOM;
5476 if (cp->action == 0x02)
5477 auto_conn = HCI_AUTO_CONN_ALWAYS;
5478 else if (cp->action == 0x01)
5479 auto_conn = HCI_AUTO_CONN_DIRECT;
5481 auto_conn = HCI_AUTO_CONN_REPORT;
5483 /* If the connection parameters don't exist for this device,
5484 * they will be created and configured with defaults.
5486 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5488 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5490 &cp->addr, sizeof(cp->addr));
5495 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5497 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5498 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5501 hci_dev_unlock(hdev);
5505 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5506 bdaddr_t *bdaddr, u8 type)
5508 struct mgmt_ev_device_removed ev;
5510 bacpy(&ev.addr.bdaddr, bdaddr);
5511 ev.addr.type = type;
5513 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5516 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5517 void *data, u16 len)
5519 struct mgmt_cp_remove_device *cp = data;
5522 BT_DBG("%s", hdev->name);
5526 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5527 struct hci_conn_params *params;
5530 if (!bdaddr_type_is_valid(cp->addr.type)) {
5531 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5532 MGMT_STATUS_INVALID_PARAMS,
5533 &cp->addr, sizeof(cp->addr));
5537 if (cp->addr.type == BDADDR_BREDR) {
5538 err = hci_bdaddr_list_del(&hdev->whitelist,
5542 err = cmd_complete(sk, hdev->id,
5543 MGMT_OP_REMOVE_DEVICE,
5544 MGMT_STATUS_INVALID_PARAMS,
5545 &cp->addr, sizeof(cp->addr));
5549 hci_update_page_scan(hdev, NULL);
5551 device_removed(sk, hdev, &cp->addr.bdaddr,
5556 if (cp->addr.type == BDADDR_LE_PUBLIC)
5557 addr_type = ADDR_LE_DEV_PUBLIC;
5559 addr_type = ADDR_LE_DEV_RANDOM;
5561 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5564 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5565 MGMT_STATUS_INVALID_PARAMS,
5566 &cp->addr, sizeof(cp->addr));
5570 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5571 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5572 MGMT_STATUS_INVALID_PARAMS,
5573 &cp->addr, sizeof(cp->addr));
5577 list_del(¶ms->action);
5578 list_del(¶ms->list);
5580 hci_update_background_scan(hdev);
5582 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5584 struct hci_conn_params *p, *tmp;
5585 struct bdaddr_list *b, *btmp;
5587 if (cp->addr.type) {
5588 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5589 MGMT_STATUS_INVALID_PARAMS,
5590 &cp->addr, sizeof(cp->addr));
5594 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5595 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5600 hci_update_page_scan(hdev, NULL);
5602 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5603 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5605 device_removed(sk, hdev, &p->addr, p->addr_type);
5606 list_del(&p->action);
5611 BT_DBG("All LE connection parameters were removed");
5613 hci_update_background_scan(hdev);
5617 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5618 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5621 hci_dev_unlock(hdev);
5625 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5628 struct mgmt_cp_load_conn_param *cp = data;
5629 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5630 sizeof(struct mgmt_conn_param));
5631 u16 param_count, expected_len;
5634 if (!lmp_le_capable(hdev))
5635 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5636 MGMT_STATUS_NOT_SUPPORTED);
5638 param_count = __le16_to_cpu(cp->param_count);
5639 if (param_count > max_param_count) {
5640 BT_ERR("load_conn_param: too big param_count value %u",
5642 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5643 MGMT_STATUS_INVALID_PARAMS);
5646 expected_len = sizeof(*cp) + param_count *
5647 sizeof(struct mgmt_conn_param);
5648 if (expected_len != len) {
5649 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5651 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5652 MGMT_STATUS_INVALID_PARAMS);
5655 BT_DBG("%s param_count %u", hdev->name, param_count);
5659 hci_conn_params_clear_disabled(hdev);
5661 for (i = 0; i < param_count; i++) {
5662 struct mgmt_conn_param *param = &cp->params[i];
5663 struct hci_conn_params *hci_param;
5664 u16 min, max, latency, timeout;
5667 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
5670 if (param->addr.type == BDADDR_LE_PUBLIC) {
5671 addr_type = ADDR_LE_DEV_PUBLIC;
5672 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5673 addr_type = ADDR_LE_DEV_RANDOM;
5675 BT_ERR("Ignoring invalid connection parameters");
5679 min = le16_to_cpu(param->min_interval);
5680 max = le16_to_cpu(param->max_interval);
5681 latency = le16_to_cpu(param->latency);
5682 timeout = le16_to_cpu(param->timeout);
5684 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5685 min, max, latency, timeout);
5687 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5688 BT_ERR("Ignoring invalid connection parameters");
5692 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
5695 BT_ERR("Failed to add connection parameters");
5699 hci_param->conn_min_interval = min;
5700 hci_param->conn_max_interval = max;
5701 hci_param->conn_latency = latency;
5702 hci_param->supervision_timeout = timeout;
5705 hci_dev_unlock(hdev);
5707 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5710 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5711 void *data, u16 len)
5713 struct mgmt_cp_set_external_config *cp = data;
5717 BT_DBG("%s", hdev->name);
5719 if (hdev_is_powered(hdev))
5720 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5721 MGMT_STATUS_REJECTED);
5723 if (cp->config != 0x00 && cp->config != 0x01)
5724 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5725 MGMT_STATUS_INVALID_PARAMS);
5727 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5728 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5729 MGMT_STATUS_NOT_SUPPORTED);
5734 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5737 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5740 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5747 err = new_options(hdev, sk);
5749 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5750 mgmt_index_removed(hdev);
5752 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5753 set_bit(HCI_CONFIG, &hdev->dev_flags);
5754 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5756 queue_work(hdev->req_workqueue, &hdev->power_on);
5758 set_bit(HCI_RAW, &hdev->flags);
5759 mgmt_index_added(hdev);
5764 hci_dev_unlock(hdev);
5768 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5769 void *data, u16 len)
5771 struct mgmt_cp_set_public_address *cp = data;
5775 BT_DBG("%s", hdev->name);
5777 if (hdev_is_powered(hdev))
5778 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5779 MGMT_STATUS_REJECTED);
5781 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5782 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5783 MGMT_STATUS_INVALID_PARAMS);
5785 if (!hdev->set_bdaddr)
5786 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5787 MGMT_STATUS_NOT_SUPPORTED);
5791 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5792 bacpy(&hdev->public_addr, &cp->bdaddr);
5794 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5801 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5802 err = new_options(hdev, sk);
5804 if (is_configured(hdev)) {
5805 mgmt_index_removed(hdev);
5807 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5809 set_bit(HCI_CONFIG, &hdev->dev_flags);
5810 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5812 queue_work(hdev->req_workqueue, &hdev->power_on);
5816 hci_dev_unlock(hdev);
5820 static const struct mgmt_handler {
5821 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5825 } mgmt_handlers[] = {
5826 { NULL }, /* 0x0000 (no command) */
5827 { read_version, false, MGMT_READ_VERSION_SIZE },
5828 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
5829 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
5830 { read_controller_info, false, MGMT_READ_INFO_SIZE },
5831 { set_powered, false, MGMT_SETTING_SIZE },
5832 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
5833 { set_connectable, false, MGMT_SETTING_SIZE },
5834 { set_fast_connectable, false, MGMT_SETTING_SIZE },
5835 { set_bondable, false, MGMT_SETTING_SIZE },
5836 { set_link_security, false, MGMT_SETTING_SIZE },
5837 { set_ssp, false, MGMT_SETTING_SIZE },
5838 { set_hs, false, MGMT_SETTING_SIZE },
5839 { set_le, false, MGMT_SETTING_SIZE },
5840 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
5841 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
5842 { add_uuid, false, MGMT_ADD_UUID_SIZE },
5843 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
5844 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
5845 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5846 { disconnect, false, MGMT_DISCONNECT_SIZE },
5847 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
5848 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
5849 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5850 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
5851 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
5852 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5853 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
5854 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
5855 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5856 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
5857 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5858 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5859 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5860 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5861 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
5862 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
5863 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
5864 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
5865 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
5866 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
5867 { set_advertising, false, MGMT_SETTING_SIZE },
5868 { set_bredr, false, MGMT_SETTING_SIZE },
5869 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
5870 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
5871 { set_secure_conn, false, MGMT_SETTING_SIZE },
5872 { set_debug_keys, false, MGMT_SETTING_SIZE },
5873 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
5874 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
5875 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5876 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5877 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5878 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5879 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
5880 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
5881 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
5882 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
5883 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
5884 { start_service_discovery,true, MGMT_START_SERVICE_DISCOVERY_SIZE },
5887 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5891 struct mgmt_hdr *hdr;
5892 u16 opcode, index, len;
5893 struct hci_dev *hdev = NULL;
5894 const struct mgmt_handler *handler;
5897 BT_DBG("got %zu bytes", msglen);
5899 if (msglen < sizeof(*hdr))
5902 buf = kmalloc(msglen, GFP_KERNEL);
5906 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
5912 opcode = __le16_to_cpu(hdr->opcode);
5913 index = __le16_to_cpu(hdr->index);
5914 len = __le16_to_cpu(hdr->len);
5916 if (len != msglen - sizeof(*hdr)) {
5921 if (index != MGMT_INDEX_NONE) {
5922 hdev = hci_dev_get(index);
5924 err = cmd_status(sk, index, opcode,
5925 MGMT_STATUS_INVALID_INDEX);
5929 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5930 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5931 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5932 err = cmd_status(sk, index, opcode,
5933 MGMT_STATUS_INVALID_INDEX);
5937 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
5938 opcode != MGMT_OP_READ_CONFIG_INFO &&
5939 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
5940 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
5941 err = cmd_status(sk, index, opcode,
5942 MGMT_STATUS_INVALID_INDEX);
5947 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5948 mgmt_handlers[opcode].func == NULL) {
5949 BT_DBG("Unknown op %u", opcode);
5950 err = cmd_status(sk, index, opcode,
5951 MGMT_STATUS_UNKNOWN_COMMAND);
5955 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
5956 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5957 err = cmd_status(sk, index, opcode,
5958 MGMT_STATUS_INVALID_INDEX);
5962 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
5963 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5964 err = cmd_status(sk, index, opcode,
5965 MGMT_STATUS_INVALID_INDEX);
5969 handler = &mgmt_handlers[opcode];
5971 if ((handler->var_len && len < handler->data_len) ||
5972 (!handler->var_len && len != handler->data_len)) {
5973 err = cmd_status(sk, index, opcode,
5974 MGMT_STATUS_INVALID_PARAMS);
5979 mgmt_init_hdev(sk, hdev);
5981 cp = buf + sizeof(*hdr);
5983 err = handler->func(sk, hdev, cp, len);
5997 void mgmt_index_added(struct hci_dev *hdev)
5999 if (hdev->dev_type != HCI_BREDR)
6002 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6005 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6006 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
6008 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
6011 void mgmt_index_removed(struct hci_dev *hdev)
6013 u8 status = MGMT_STATUS_INVALID_INDEX;
6015 if (hdev->dev_type != HCI_BREDR)
6018 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6021 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6023 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6024 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
6026 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
6029 /* This function requires the caller holds hdev->lock */
6030 static void restart_le_actions(struct hci_dev *hdev)
6032 struct hci_conn_params *p;
6034 list_for_each_entry(p, &hdev->le_conn_params, list) {
6035 /* Needed for AUTO_OFF case where might not "really"
6036 * have been powered off.
6038 list_del_init(&p->action);
6040 switch (p->auto_connect) {
6041 case HCI_AUTO_CONN_DIRECT:
6042 case HCI_AUTO_CONN_ALWAYS:
6043 list_add(&p->action, &hdev->pend_le_conns);
6045 case HCI_AUTO_CONN_REPORT:
6046 list_add(&p->action, &hdev->pend_le_reports);
6053 hci_update_background_scan(hdev);
6056 static void powered_complete(struct hci_dev *hdev, u8 status)
6058 struct cmd_lookup match = { NULL, hdev };
6060 BT_DBG("status 0x%02x", status);
6064 restart_le_actions(hdev);
6066 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6068 new_settings(hdev, match.sk);
6070 hci_dev_unlock(hdev);
6076 static int powered_update_hci(struct hci_dev *hdev)
6078 struct hci_request req;
6081 hci_req_init(&req, hdev);
6083 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
6084 !lmp_host_ssp_capable(hdev)) {
6087 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
6090 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
6091 lmp_bredr_capable(hdev)) {
6092 struct hci_cp_write_le_host_supported cp;
6097 /* Check first if we already have the right
6098 * host state (host features set)
6100 if (cp.le != lmp_host_le_capable(hdev) ||
6101 cp.simul != lmp_host_le_br_capable(hdev))
6102 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
6106 if (lmp_le_capable(hdev)) {
6107 /* Make sure the controller has a good default for
6108 * advertising data. This also applies to the case
6109 * where BR/EDR was toggled during the AUTO_OFF phase.
6111 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
6112 update_adv_data(&req);
6113 update_scan_rsp_data(&req);
6116 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6117 enable_advertising(&req);
6120 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
6121 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
6122 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
6123 sizeof(link_sec), &link_sec);
6125 if (lmp_bredr_capable(hdev)) {
6126 write_fast_connectable(&req, false);
6127 hci_update_page_scan(hdev, &req);
6133 return hci_req_run(&req, powered_complete);
6136 int mgmt_powered(struct hci_dev *hdev, u8 powered)
6138 struct cmd_lookup match = { NULL, hdev };
6139 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
6140 u8 zero_cod[] = { 0, 0, 0 };
6143 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
6147 if (powered_update_hci(hdev) == 0)
6150 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
6155 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6156 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status_not_powered);
6158 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6159 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6160 zero_cod, sizeof(zero_cod), NULL);
6163 err = new_settings(hdev, match.sk);
6171 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6173 struct pending_cmd *cmd;
6176 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6180 if (err == -ERFKILL)
6181 status = MGMT_STATUS_RFKILLED;
6183 status = MGMT_STATUS_FAILED;
6185 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6187 mgmt_pending_remove(cmd);
6190 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6192 struct hci_request req;
6196 /* When discoverable timeout triggers, then just make sure
6197 * the limited discoverable flag is cleared. Even in the case
6198 * of a timeout triggered from general discoverable, it is
6199 * safe to unconditionally clear the flag.
6201 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
6202 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6204 hci_req_init(&req, hdev);
6205 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
6206 u8 scan = SCAN_PAGE;
6207 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6208 sizeof(scan), &scan);
6211 update_adv_data(&req);
6212 hci_req_run(&req, NULL);
6214 hdev->discov_timeout = 0;
6216 new_settings(hdev, NULL);
6218 hci_dev_unlock(hdev);
6221 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6224 struct mgmt_ev_new_link_key ev;
6226 memset(&ev, 0, sizeof(ev));
6228 ev.store_hint = persistent;
6229 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6230 ev.key.addr.type = BDADDR_BREDR;
6231 ev.key.type = key->type;
6232 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6233 ev.key.pin_len = key->pin_len;
6235 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6238 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6240 switch (ltk->type) {
6243 if (ltk->authenticated)
6244 return MGMT_LTK_AUTHENTICATED;
6245 return MGMT_LTK_UNAUTHENTICATED;
6247 if (ltk->authenticated)
6248 return MGMT_LTK_P256_AUTH;
6249 return MGMT_LTK_P256_UNAUTH;
6250 case SMP_LTK_P256_DEBUG:
6251 return MGMT_LTK_P256_DEBUG;
6254 return MGMT_LTK_UNAUTHENTICATED;
6257 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6259 struct mgmt_ev_new_long_term_key ev;
6261 memset(&ev, 0, sizeof(ev));
6263 /* Devices using resolvable or non-resolvable random addresses
6264 * without providing an indentity resolving key don't require
6265 * to store long term keys. Their addresses will change the
6268 * Only when a remote device provides an identity address
6269 * make sure the long term key is stored. If the remote
6270 * identity is known, the long term keys are internally
6271 * mapped to the identity address. So allow static random
6272 * and public addresses here.
6274 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6275 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6276 ev.store_hint = 0x00;
6278 ev.store_hint = persistent;
6280 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6281 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6282 ev.key.type = mgmt_ltk_type(key);
6283 ev.key.enc_size = key->enc_size;
6284 ev.key.ediv = key->ediv;
6285 ev.key.rand = key->rand;
6287 if (key->type == SMP_LTK)
6290 memcpy(ev.key.val, key->val, sizeof(key->val));
6292 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6295 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6297 struct mgmt_ev_new_irk ev;
6299 memset(&ev, 0, sizeof(ev));
6301 /* For identity resolving keys from devices that are already
6302 * using a public address or static random address, do not
6303 * ask for storing this key. The identity resolving key really
6304 * is only mandatory for devices using resovlable random
6307 * Storing all identity resolving keys has the downside that
6308 * they will be also loaded on next boot of they system. More
6309 * identity resolving keys, means more time during scanning is
6310 * needed to actually resolve these addresses.
6312 if (bacmp(&irk->rpa, BDADDR_ANY))
6313 ev.store_hint = 0x01;
6315 ev.store_hint = 0x00;
6317 bacpy(&ev.rpa, &irk->rpa);
6318 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6319 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6320 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6322 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6325 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6328 struct mgmt_ev_new_csrk ev;
6330 memset(&ev, 0, sizeof(ev));
6332 /* Devices using resolvable or non-resolvable random addresses
6333 * without providing an indentity resolving key don't require
6334 * to store signature resolving keys. Their addresses will change
6335 * the next time around.
6337 * Only when a remote device provides an identity address
6338 * make sure the signature resolving key is stored. So allow
6339 * static random and public addresses here.
6341 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6342 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6343 ev.store_hint = 0x00;
6345 ev.store_hint = persistent;
6347 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6348 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6349 ev.key.master = csrk->master;
6350 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6352 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6355 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6356 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6357 u16 max_interval, u16 latency, u16 timeout)
6359 struct mgmt_ev_new_conn_param ev;
6361 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6364 memset(&ev, 0, sizeof(ev));
6365 bacpy(&ev.addr.bdaddr, bdaddr);
6366 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6367 ev.store_hint = store_hint;
6368 ev.min_interval = cpu_to_le16(min_interval);
6369 ev.max_interval = cpu_to_le16(max_interval);
6370 ev.latency = cpu_to_le16(latency);
6371 ev.timeout = cpu_to_le16(timeout);
6373 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6376 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6379 eir[eir_len++] = sizeof(type) + data_len;
6380 eir[eir_len++] = type;
6381 memcpy(&eir[eir_len], data, data_len);
6382 eir_len += data_len;
6387 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6388 u32 flags, u8 *name, u8 name_len)
6391 struct mgmt_ev_device_connected *ev = (void *) buf;
6394 bacpy(&ev->addr.bdaddr, &conn->dst);
6395 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6397 ev->flags = __cpu_to_le32(flags);
6399 /* We must ensure that the EIR Data fields are ordered and
6400 * unique. Keep it simple for now and avoid the problem by not
6401 * adding any BR/EDR data to the LE adv.
6403 if (conn->le_adv_data_len > 0) {
6404 memcpy(&ev->eir[eir_len],
6405 conn->le_adv_data, conn->le_adv_data_len);
6406 eir_len = conn->le_adv_data_len;
6409 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6412 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6413 eir_len = eir_append_data(ev->eir, eir_len,
6415 conn->dev_class, 3);
6418 ev->eir_len = cpu_to_le16(eir_len);
6420 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6421 sizeof(*ev) + eir_len, NULL);
6424 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6426 struct sock **sk = data;
6428 cmd->cmd_complete(cmd, 0);
6433 mgmt_pending_remove(cmd);
6436 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6438 struct hci_dev *hdev = data;
6439 struct mgmt_cp_unpair_device *cp = cmd->param;
6441 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6443 cmd->cmd_complete(cmd, 0);
6444 mgmt_pending_remove(cmd);
6447 bool mgmt_powering_down(struct hci_dev *hdev)
6449 struct pending_cmd *cmd;
6450 struct mgmt_mode *cp;
6452 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6463 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6464 u8 link_type, u8 addr_type, u8 reason,
6465 bool mgmt_connected)
6467 struct mgmt_ev_device_disconnected ev;
6468 struct sock *sk = NULL;
6470 /* The connection is still in hci_conn_hash so test for 1
6471 * instead of 0 to know if this is the last one.
6473 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6474 cancel_delayed_work(&hdev->power_off);
6475 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6478 if (!mgmt_connected)
6481 if (link_type != ACL_LINK && link_type != LE_LINK)
6484 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6486 bacpy(&ev.addr.bdaddr, bdaddr);
6487 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6490 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6495 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6499 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6500 u8 link_type, u8 addr_type, u8 status)
6502 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6503 struct mgmt_cp_disconnect *cp;
6504 struct pending_cmd *cmd;
6506 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6509 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6515 if (bacmp(bdaddr, &cp->addr.bdaddr))
6518 if (cp->addr.type != bdaddr_type)
6521 cmd->cmd_complete(cmd, mgmt_status(status));
6522 mgmt_pending_remove(cmd);
6525 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6526 u8 addr_type, u8 status)
6528 struct mgmt_ev_connect_failed ev;
6530 /* The connection is still in hci_conn_hash so test for 1
6531 * instead of 0 to know if this is the last one.
6533 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6534 cancel_delayed_work(&hdev->power_off);
6535 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6538 bacpy(&ev.addr.bdaddr, bdaddr);
6539 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6540 ev.status = mgmt_status(status);
6542 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6545 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6547 struct mgmt_ev_pin_code_request ev;
6549 bacpy(&ev.addr.bdaddr, bdaddr);
6550 ev.addr.type = BDADDR_BREDR;
6553 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6556 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6559 struct pending_cmd *cmd;
6561 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6565 cmd->cmd_complete(cmd, mgmt_status(status));
6566 mgmt_pending_remove(cmd);
6569 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6572 struct pending_cmd *cmd;
6574 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6578 cmd->cmd_complete(cmd, mgmt_status(status));
6579 mgmt_pending_remove(cmd);
6582 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6583 u8 link_type, u8 addr_type, u32 value,
6586 struct mgmt_ev_user_confirm_request ev;
6588 BT_DBG("%s", hdev->name);
6590 bacpy(&ev.addr.bdaddr, bdaddr);
6591 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6592 ev.confirm_hint = confirm_hint;
6593 ev.value = cpu_to_le32(value);
6595 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6599 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6600 u8 link_type, u8 addr_type)
6602 struct mgmt_ev_user_passkey_request ev;
6604 BT_DBG("%s", hdev->name);
6606 bacpy(&ev.addr.bdaddr, bdaddr);
6607 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6609 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6613 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6614 u8 link_type, u8 addr_type, u8 status,
6617 struct pending_cmd *cmd;
6619 cmd = mgmt_pending_find(opcode, hdev);
6623 cmd->cmd_complete(cmd, mgmt_status(status));
6624 mgmt_pending_remove(cmd);
6629 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6630 u8 link_type, u8 addr_type, u8 status)
6632 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6633 status, MGMT_OP_USER_CONFIRM_REPLY);
6636 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6637 u8 link_type, u8 addr_type, u8 status)
6639 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6641 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6644 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6645 u8 link_type, u8 addr_type, u8 status)
6647 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6648 status, MGMT_OP_USER_PASSKEY_REPLY);
6651 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6652 u8 link_type, u8 addr_type, u8 status)
6654 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6656 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6659 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6660 u8 link_type, u8 addr_type, u32 passkey,
6663 struct mgmt_ev_passkey_notify ev;
6665 BT_DBG("%s", hdev->name);
6667 bacpy(&ev.addr.bdaddr, bdaddr);
6668 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6669 ev.passkey = __cpu_to_le32(passkey);
6670 ev.entered = entered;
6672 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6675 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
6677 struct mgmt_ev_auth_failed ev;
6678 struct pending_cmd *cmd;
6679 u8 status = mgmt_status(hci_status);
6681 bacpy(&ev.addr.bdaddr, &conn->dst);
6682 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6685 cmd = find_pairing(conn);
6687 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
6688 cmd ? cmd->sk : NULL);
6691 pairing_complete(cmd, status);
6694 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6696 struct cmd_lookup match = { NULL, hdev };
6700 u8 mgmt_err = mgmt_status(status);
6701 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6702 cmd_status_rsp, &mgmt_err);
6706 if (test_bit(HCI_AUTH, &hdev->flags))
6707 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6710 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6713 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6717 new_settings(hdev, match.sk);
6723 static void clear_eir(struct hci_request *req)
6725 struct hci_dev *hdev = req->hdev;
6726 struct hci_cp_write_eir cp;
6728 if (!lmp_ext_inq_capable(hdev))
6731 memset(hdev->eir, 0, sizeof(hdev->eir));
6733 memset(&cp, 0, sizeof(cp));
6735 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6738 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6740 struct cmd_lookup match = { NULL, hdev };
6741 struct hci_request req;
6742 bool changed = false;
6745 u8 mgmt_err = mgmt_status(status);
6747 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6748 &hdev->dev_flags)) {
6749 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6750 new_settings(hdev, NULL);
6753 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6759 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6761 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6763 changed = test_and_clear_bit(HCI_HS_ENABLED,
6766 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6769 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6772 new_settings(hdev, match.sk);
6777 hci_req_init(&req, hdev);
6779 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6780 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6781 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6782 sizeof(enable), &enable);
6788 hci_req_run(&req, NULL);
6791 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6793 struct cmd_lookup match = { NULL, hdev };
6794 bool changed = false;
6797 u8 mgmt_err = mgmt_status(status);
6800 if (test_and_clear_bit(HCI_SC_ENABLED,
6802 new_settings(hdev, NULL);
6803 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6806 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6807 cmd_status_rsp, &mgmt_err);
6812 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6814 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6815 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6818 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6819 settings_rsp, &match);
6822 new_settings(hdev, match.sk);
6828 static void sk_lookup(struct pending_cmd *cmd, void *data)
6830 struct cmd_lookup *match = data;
6832 if (match->sk == NULL) {
6833 match->sk = cmd->sk;
6834 sock_hold(match->sk);
6838 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6841 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6843 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6844 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6845 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6848 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6855 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6857 struct mgmt_cp_set_local_name ev;
6858 struct pending_cmd *cmd;
6863 memset(&ev, 0, sizeof(ev));
6864 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6865 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6867 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6869 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6871 /* If this is a HCI command related to powering on the
6872 * HCI dev don't send any mgmt signals.
6874 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6878 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6879 cmd ? cmd->sk : NULL);
6882 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6883 u8 *rand192, u8 *hash256, u8 *rand256,
6886 struct pending_cmd *cmd;
6888 BT_DBG("%s status %u", hdev->name, status);
6890 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6895 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6896 mgmt_status(status));
6898 if (bredr_sc_enabled(hdev) && hash256 && rand256) {
6899 struct mgmt_rp_read_local_oob_ext_data rp;
6901 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6902 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
6904 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6905 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
6907 cmd_complete(cmd->sk, hdev->id,
6908 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6911 struct mgmt_rp_read_local_oob_data rp;
6913 memcpy(rp.hash, hash192, sizeof(rp.hash));
6914 memcpy(rp.rand, rand192, sizeof(rp.rand));
6916 cmd_complete(cmd->sk, hdev->id,
6917 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6922 mgmt_pending_remove(cmd);
6925 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
6929 for (i = 0; i < uuid_count; i++) {
6930 if (!memcmp(uuid, uuids[i], 16))
6937 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
6941 while (parsed < eir_len) {
6942 u8 field_len = eir[0];
6949 if (eir_len - parsed < field_len + 1)
6953 case EIR_UUID16_ALL:
6954 case EIR_UUID16_SOME:
6955 for (i = 0; i + 3 <= field_len; i += 2) {
6956 memcpy(uuid, bluetooth_base_uuid, 16);
6957 uuid[13] = eir[i + 3];
6958 uuid[12] = eir[i + 2];
6959 if (has_uuid(uuid, uuid_count, uuids))
6963 case EIR_UUID32_ALL:
6964 case EIR_UUID32_SOME:
6965 for (i = 0; i + 5 <= field_len; i += 4) {
6966 memcpy(uuid, bluetooth_base_uuid, 16);
6967 uuid[15] = eir[i + 5];
6968 uuid[14] = eir[i + 4];
6969 uuid[13] = eir[i + 3];
6970 uuid[12] = eir[i + 2];
6971 if (has_uuid(uuid, uuid_count, uuids))
6975 case EIR_UUID128_ALL:
6976 case EIR_UUID128_SOME:
6977 for (i = 0; i + 17 <= field_len; i += 16) {
6978 memcpy(uuid, eir + i + 2, 16);
6979 if (has_uuid(uuid, uuid_count, uuids))
6985 parsed += field_len + 1;
6986 eir += field_len + 1;
6992 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6993 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
6994 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
6997 struct mgmt_ev_device_found *ev = (void *) buf;
7001 /* Don't send events for a non-kernel initiated discovery. With
7002 * LE one exception is if we have pend_le_reports > 0 in which
7003 * case we're doing passive scanning and want these events.
7005 if (!hci_discovery_active(hdev)) {
7006 if (link_type == ACL_LINK)
7008 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7012 /* When using service discovery with a RSSI threshold, then check
7013 * if such a RSSI threshold is specified. If a RSSI threshold has
7014 * been specified, then all results with a RSSI smaller than the
7015 * RSSI threshold will be dropped.
7017 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7018 * the results are also dropped.
7020 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7021 (rssi < hdev->discovery.rssi || rssi == HCI_RSSI_INVALID))
7024 /* Make sure that the buffer is big enough. The 5 extra bytes
7025 * are for the potential CoD field.
7027 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7030 memset(buf, 0, sizeof(buf));
7032 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7033 * RSSI value was reported as 0 when not available. This behavior
7034 * is kept when using device discovery. This is required for full
7035 * backwards compatibility with the API.
7037 * However when using service discovery, the value 127 will be
7038 * returned when the RSSI is not available.
7040 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi)
7043 bacpy(&ev->addr.bdaddr, bdaddr);
7044 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7046 ev->flags = cpu_to_le32(flags);
7049 /* When using service discovery and a list of UUID is
7050 * provided, results with no matching UUID should be
7051 * dropped. In case there is a match the result is
7052 * kept and checking possible scan response data
7055 if (hdev->discovery.uuid_count > 0) {
7056 match = eir_has_uuids(eir, eir_len,
7057 hdev->discovery.uuid_count,
7058 hdev->discovery.uuids);
7063 /* Copy EIR or advertising data into event */
7064 memcpy(ev->eir, eir, eir_len);
7066 /* When using service discovery and a list of UUID is
7067 * provided, results with empty EIR or advertising data
7068 * should be dropped since they do not match any UUID.
7070 if (hdev->discovery.uuid_count > 0)
7074 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
7075 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7078 if (scan_rsp_len > 0) {
7079 /* When using service discovery and a list of UUID is
7080 * provided, results with no matching UUID should be
7081 * dropped if there is no previous match from the
7084 if (hdev->discovery.uuid_count > 0) {
7085 if (!match && !eir_has_uuids(scan_rsp, scan_rsp_len,
7086 hdev->discovery.uuid_count,
7087 hdev->discovery.uuids))
7091 /* Append scan response data to event */
7092 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7094 /* When using service discovery and a list of UUID is
7095 * provided, results with empty scan response and no
7096 * previous matched advertising data should be dropped.
7098 if (hdev->discovery.uuid_count > 0 && !match)
7102 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7103 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7105 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7108 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7109 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7111 struct mgmt_ev_device_found *ev;
7112 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7115 ev = (struct mgmt_ev_device_found *) buf;
7117 memset(buf, 0, sizeof(buf));
7119 bacpy(&ev->addr.bdaddr, bdaddr);
7120 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7123 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7126 ev->eir_len = cpu_to_le16(eir_len);
7128 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7131 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7133 struct mgmt_ev_discovering ev;
7135 BT_DBG("%s discovering %u", hdev->name, discovering);
7137 memset(&ev, 0, sizeof(ev));
7138 ev.type = hdev->discovery.type;
7139 ev.discovering = discovering;
7141 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7144 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
7146 BT_DBG("%s status %u", hdev->name, status);
7149 void mgmt_reenable_advertising(struct hci_dev *hdev)
7151 struct hci_request req;
7153 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7156 hci_req_init(&req, hdev);
7157 enable_advertising(&req);
7158 hci_req_run(&req, adv_enable_complete);