2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
33 #include <net/bluetooth/mgmt.h>
35 #include "mgmt_util.h"
37 static LIST_HEAD(mgmt_chan_list);
38 static DEFINE_MUTEX(mgmt_chan_list_lock);
40 static atomic_t monitor_promisc = ATOMIC_INIT(0);
42 /* ----- HCI socket interface ----- */
45 #define hci_pi(sk) ((struct hci_pinfo *) sk)
50 struct hci_filter filter;
52 unsigned short channel;
56 void hci_sock_set_flag(struct sock *sk, int nr)
58 set_bit(nr, &hci_pi(sk)->flags);
61 void hci_sock_clear_flag(struct sock *sk, int nr)
63 clear_bit(nr, &hci_pi(sk)->flags);
66 int hci_sock_test_flag(struct sock *sk, int nr)
68 return test_bit(nr, &hci_pi(sk)->flags);
71 unsigned short hci_sock_get_channel(struct sock *sk)
73 return hci_pi(sk)->channel;
76 static inline int hci_test_bit(int nr, const void *addr)
78 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
82 #define HCI_SFLT_MAX_OGF 5
84 struct hci_sec_filter {
87 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
90 static const struct hci_sec_filter hci_sec_filter = {
94 { 0x1000d9fe, 0x0000b00c },
99 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
100 /* OGF_LINK_POLICY */
101 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
103 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
105 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
106 /* OGF_STATUS_PARAM */
107 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
111 static struct bt_sock_list hci_sk_list = {
112 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
115 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
117 struct hci_filter *flt;
118 int flt_type, flt_event;
121 flt = &hci_pi(sk)->filter;
123 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
126 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
128 if (!test_bit(flt_type, &flt->type_mask))
131 /* Extra filter for event packets only */
132 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
135 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
137 if (!hci_test_bit(flt_event, &flt->event_mask))
140 /* Check filter only when opcode is set */
144 if (flt_event == HCI_EV_CMD_COMPLETE &&
145 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
148 if (flt_event == HCI_EV_CMD_STATUS &&
149 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
155 /* Send frame to RAW socket */
156 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
159 struct sk_buff *skb_copy = NULL;
161 BT_DBG("hdev %p len %d", hdev, skb->len);
163 read_lock(&hci_sk_list.lock);
165 sk_for_each(sk, &hci_sk_list.head) {
166 struct sk_buff *nskb;
168 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
171 /* Don't send frame to the socket it came from */
175 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
176 if (is_filtered_packet(sk, skb))
178 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
179 if (!bt_cb(skb)->incoming)
181 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
182 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
183 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
186 /* Don't send frame to other channel types */
191 /* Create a private copy with headroom */
192 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
196 /* Put type byte before the data */
197 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
200 nskb = skb_clone(skb_copy, GFP_ATOMIC);
204 if (sock_queue_rcv_skb(sk, nskb))
208 read_unlock(&hci_sk_list.lock);
213 /* Send frame to sockets with specific channel */
214 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
215 int flag, struct sock *skip_sk)
219 BT_DBG("channel %u len %d", channel, skb->len);
221 read_lock(&hci_sk_list.lock);
223 sk_for_each(sk, &hci_sk_list.head) {
224 struct sk_buff *nskb;
226 /* Ignore socket without the flag set */
227 if (!hci_sock_test_flag(sk, flag))
230 /* Skip the original socket */
234 if (sk->sk_state != BT_BOUND)
237 if (hci_pi(sk)->channel != channel)
240 nskb = skb_clone(skb, GFP_ATOMIC);
244 if (sock_queue_rcv_skb(sk, nskb))
248 read_unlock(&hci_sk_list.lock);
251 /* Send frame to monitor socket */
252 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
254 struct sk_buff *skb_copy = NULL;
255 struct hci_mon_hdr *hdr;
258 if (!atomic_read(&monitor_promisc))
261 BT_DBG("hdev %p len %d", hdev, skb->len);
263 switch (bt_cb(skb)->pkt_type) {
264 case HCI_COMMAND_PKT:
265 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
268 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
270 case HCI_ACLDATA_PKT:
271 if (bt_cb(skb)->incoming)
272 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
274 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
276 case HCI_SCODATA_PKT:
277 if (bt_cb(skb)->incoming)
278 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
280 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
286 /* Create a private copy with headroom */
287 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
291 /* Put header before the data */
292 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
293 hdr->opcode = opcode;
294 hdr->index = cpu_to_le16(hdev->id);
295 hdr->len = cpu_to_le16(skb->len);
297 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
298 HCI_SOCK_TRUSTED, NULL);
302 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
304 struct hci_mon_hdr *hdr;
305 struct hci_mon_new_index *ni;
306 struct hci_mon_index_info *ii;
312 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
316 ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
317 ni->type = hdev->dev_type;
319 bacpy(&ni->bdaddr, &hdev->bdaddr);
320 memcpy(ni->name, hdev->name, 8);
322 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
326 skb = bt_skb_alloc(0, GFP_ATOMIC);
330 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
334 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
338 ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
339 bacpy(&ii->bdaddr, &hdev->bdaddr);
340 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
342 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
346 skb = bt_skb_alloc(0, GFP_ATOMIC);
350 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
354 skb = bt_skb_alloc(0, GFP_ATOMIC);
358 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
365 __net_timestamp(skb);
367 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
368 hdr->opcode = opcode;
369 hdr->index = cpu_to_le16(hdev->id);
370 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
375 static void send_monitor_replay(struct sock *sk)
377 struct hci_dev *hdev;
379 read_lock(&hci_dev_list_lock);
381 list_for_each_entry(hdev, &hci_dev_list, list) {
384 skb = create_monitor_event(hdev, HCI_DEV_REG);
388 if (sock_queue_rcv_skb(sk, skb))
391 if (!test_bit(HCI_RUNNING, &hdev->flags))
394 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
398 if (sock_queue_rcv_skb(sk, skb))
401 if (!test_bit(HCI_UP, &hdev->flags))
404 skb = create_monitor_event(hdev, HCI_DEV_UP);
408 if (sock_queue_rcv_skb(sk, skb))
412 read_unlock(&hci_dev_list_lock);
415 /* Generate internal stack event */
416 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
418 struct hci_event_hdr *hdr;
419 struct hci_ev_stack_internal *ev;
422 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
426 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
427 hdr->evt = HCI_EV_STACK_INTERNAL;
428 hdr->plen = sizeof(*ev) + dlen;
430 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
432 memcpy(ev->data, data, dlen);
434 bt_cb(skb)->incoming = 1;
435 __net_timestamp(skb);
437 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
438 hci_send_to_sock(hdev, skb);
442 void hci_sock_dev_event(struct hci_dev *hdev, int event)
444 BT_DBG("hdev %s event %d", hdev->name, event);
446 if (atomic_read(&monitor_promisc)) {
449 /* Send event to monitor */
450 skb = create_monitor_event(hdev, event);
452 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
453 HCI_SOCK_TRUSTED, NULL);
458 if (event <= HCI_DEV_DOWN) {
459 struct hci_ev_si_device ev;
461 /* Send event to sockets */
463 ev.dev_id = hdev->id;
464 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
467 if (event == HCI_DEV_UNREG) {
470 /* Detach sockets from device */
471 read_lock(&hci_sk_list.lock);
472 sk_for_each(sk, &hci_sk_list.head) {
473 bh_lock_sock_nested(sk);
474 if (hci_pi(sk)->hdev == hdev) {
475 hci_pi(sk)->hdev = NULL;
477 sk->sk_state = BT_OPEN;
478 sk->sk_state_change(sk);
484 read_unlock(&hci_sk_list.lock);
488 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
490 struct hci_mgmt_chan *c;
492 list_for_each_entry(c, &mgmt_chan_list, list) {
493 if (c->channel == channel)
500 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
502 struct hci_mgmt_chan *c;
504 mutex_lock(&mgmt_chan_list_lock);
505 c = __hci_mgmt_chan_find(channel);
506 mutex_unlock(&mgmt_chan_list_lock);
511 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
513 if (c->channel < HCI_CHANNEL_CONTROL)
516 mutex_lock(&mgmt_chan_list_lock);
517 if (__hci_mgmt_chan_find(c->channel)) {
518 mutex_unlock(&mgmt_chan_list_lock);
522 list_add_tail(&c->list, &mgmt_chan_list);
524 mutex_unlock(&mgmt_chan_list_lock);
528 EXPORT_SYMBOL(hci_mgmt_chan_register);
530 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
532 mutex_lock(&mgmt_chan_list_lock);
534 mutex_unlock(&mgmt_chan_list_lock);
536 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
538 static int hci_sock_release(struct socket *sock)
540 struct sock *sk = sock->sk;
541 struct hci_dev *hdev;
543 BT_DBG("sock %p sk %p", sock, sk);
548 hdev = hci_pi(sk)->hdev;
550 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
551 atomic_dec(&monitor_promisc);
553 bt_sock_unlink(&hci_sk_list, sk);
556 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
557 /* When releasing an user channel exclusive access,
558 * call hci_dev_do_close directly instead of calling
559 * hci_dev_close to ensure the exclusive access will
560 * be released and the controller brought back down.
562 * The checking of HCI_AUTO_OFF is not needed in this
563 * case since it will have been cleared already when
564 * opening the user channel.
566 hci_dev_do_close(hdev);
567 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
568 mgmt_index_added(hdev);
571 atomic_dec(&hdev->promisc);
577 skb_queue_purge(&sk->sk_receive_queue);
578 skb_queue_purge(&sk->sk_write_queue);
584 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
589 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
594 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
596 hci_dev_unlock(hdev);
601 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
606 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
611 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
613 hci_dev_unlock(hdev);
618 /* Ioctls that require bound socket */
619 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
622 struct hci_dev *hdev = hci_pi(sk)->hdev;
627 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
630 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
633 if (hdev->dev_type != HCI_BREDR)
638 if (!capable(CAP_NET_ADMIN))
643 return hci_get_conn_info(hdev, (void __user *) arg);
646 return hci_get_auth_info(hdev, (void __user *) arg);
649 if (!capable(CAP_NET_ADMIN))
651 return hci_sock_blacklist_add(hdev, (void __user *) arg);
654 if (!capable(CAP_NET_ADMIN))
656 return hci_sock_blacklist_del(hdev, (void __user *) arg);
662 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
665 void __user *argp = (void __user *) arg;
666 struct sock *sk = sock->sk;
669 BT_DBG("cmd %x arg %lx", cmd, arg);
673 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
682 return hci_get_dev_list(argp);
685 return hci_get_dev_info(argp);
688 return hci_get_conn_list(argp);
691 if (!capable(CAP_NET_ADMIN))
693 return hci_dev_open(arg);
696 if (!capable(CAP_NET_ADMIN))
698 return hci_dev_close(arg);
701 if (!capable(CAP_NET_ADMIN))
703 return hci_dev_reset(arg);
706 if (!capable(CAP_NET_ADMIN))
708 return hci_dev_reset_stat(arg);
718 if (!capable(CAP_NET_ADMIN))
720 return hci_dev_cmd(cmd, argp);
723 return hci_inquiry(argp);
728 err = hci_sock_bound_ioctl(sk, cmd, arg);
735 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
738 struct sockaddr_hci haddr;
739 struct sock *sk = sock->sk;
740 struct hci_dev *hdev = NULL;
743 BT_DBG("sock %p sk %p", sock, sk);
748 memset(&haddr, 0, sizeof(haddr));
749 len = min_t(unsigned int, sizeof(haddr), addr_len);
750 memcpy(&haddr, addr, len);
752 if (haddr.hci_family != AF_BLUETOOTH)
757 if (sk->sk_state == BT_BOUND) {
762 switch (haddr.hci_channel) {
763 case HCI_CHANNEL_RAW:
764 if (hci_pi(sk)->hdev) {
769 if (haddr.hci_dev != HCI_DEV_NONE) {
770 hdev = hci_dev_get(haddr.hci_dev);
776 atomic_inc(&hdev->promisc);
779 hci_pi(sk)->hdev = hdev;
782 case HCI_CHANNEL_USER:
783 if (hci_pi(sk)->hdev) {
788 if (haddr.hci_dev == HCI_DEV_NONE) {
793 if (!capable(CAP_NET_ADMIN)) {
798 hdev = hci_dev_get(haddr.hci_dev);
804 if (test_bit(HCI_INIT, &hdev->flags) ||
805 hci_dev_test_flag(hdev, HCI_SETUP) ||
806 hci_dev_test_flag(hdev, HCI_CONFIG) ||
807 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
808 test_bit(HCI_UP, &hdev->flags))) {
814 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
820 mgmt_index_removed(hdev);
822 err = hci_dev_open(hdev->id);
824 if (err == -EALREADY) {
825 /* In case the transport is already up and
826 * running, clear the error here.
828 * This can happen when opening an user
829 * channel and HCI_AUTO_OFF grace period
834 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
835 mgmt_index_added(hdev);
841 atomic_inc(&hdev->promisc);
843 hci_pi(sk)->hdev = hdev;
846 case HCI_CHANNEL_MONITOR:
847 if (haddr.hci_dev != HCI_DEV_NONE) {
852 if (!capable(CAP_NET_RAW)) {
857 /* The monitor interface is restricted to CAP_NET_RAW
858 * capabilities and with that implicitly trusted.
860 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
862 send_monitor_replay(sk);
864 atomic_inc(&monitor_promisc);
868 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
873 if (haddr.hci_dev != HCI_DEV_NONE) {
878 /* Users with CAP_NET_ADMIN capabilities are allowed
879 * access to all management commands and events. For
880 * untrusted users the interface is restricted and
881 * also only untrusted events are sent.
883 if (capable(CAP_NET_ADMIN))
884 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
886 /* At the moment the index and unconfigured index events
887 * are enabled unconditionally. Setting them on each
888 * socket when binding keeps this functionality. They
889 * however might be cleared later and then sending of these
890 * events will be disabled, but that is then intentional.
892 * This also enables generic events that are safe to be
893 * received by untrusted users. Example for such events
894 * are changes to settings, class of device, name etc.
896 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
897 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
898 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
899 hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
905 hci_pi(sk)->channel = haddr.hci_channel;
906 sk->sk_state = BT_BOUND;
913 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
914 int *addr_len, int peer)
916 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
917 struct sock *sk = sock->sk;
918 struct hci_dev *hdev;
921 BT_DBG("sock %p sk %p", sock, sk);
928 hdev = hci_pi(sk)->hdev;
934 *addr_len = sizeof(*haddr);
935 haddr->hci_family = AF_BLUETOOTH;
936 haddr->hci_dev = hdev->id;
937 haddr->hci_channel= hci_pi(sk)->channel;
944 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
947 __u32 mask = hci_pi(sk)->cmsg_mask;
949 if (mask & HCI_CMSG_DIR) {
950 int incoming = bt_cb(skb)->incoming;
951 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
955 if (mask & HCI_CMSG_TSTAMP) {
957 struct compat_timeval ctv;
963 skb_get_timestamp(skb, &tv);
968 if (!COMPAT_USE_64BIT_TIME &&
969 (msg->msg_flags & MSG_CMSG_COMPAT)) {
970 ctv.tv_sec = tv.tv_sec;
971 ctv.tv_usec = tv.tv_usec;
977 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
981 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
984 int noblock = flags & MSG_DONTWAIT;
985 struct sock *sk = sock->sk;
989 BT_DBG("sock %p, sk %p", sock, sk);
991 if (flags & (MSG_OOB))
994 if (sk->sk_state == BT_CLOSED)
997 skb = skb_recv_datagram(sk, flags, noblock, &err);
1003 msg->msg_flags |= MSG_TRUNC;
1007 skb_reset_transport_header(skb);
1008 err = skb_copy_datagram_msg(skb, 0, msg, copied);
1010 switch (hci_pi(sk)->channel) {
1011 case HCI_CHANNEL_RAW:
1012 hci_sock_cmsg(sk, msg, skb);
1014 case HCI_CHANNEL_USER:
1015 case HCI_CHANNEL_MONITOR:
1016 sock_recv_timestamp(msg, sk, skb);
1019 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1020 sock_recv_timestamp(msg, sk, skb);
1024 skb_free_datagram(sk, skb);
1026 return err ? : copied;
1029 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1030 struct msghdr *msg, size_t msglen)
1034 struct mgmt_hdr *hdr;
1035 u16 opcode, index, len;
1036 struct hci_dev *hdev = NULL;
1037 const struct hci_mgmt_handler *handler;
1038 bool var_len, no_hdev;
1041 BT_DBG("got %zu bytes", msglen);
1043 if (msglen < sizeof(*hdr))
1046 buf = kmalloc(msglen, GFP_KERNEL);
1050 if (memcpy_from_msg(buf, msg, msglen)) {
1056 opcode = __le16_to_cpu(hdr->opcode);
1057 index = __le16_to_cpu(hdr->index);
1058 len = __le16_to_cpu(hdr->len);
1060 if (len != msglen - sizeof(*hdr)) {
1065 if (opcode >= chan->handler_count ||
1066 chan->handlers[opcode].func == NULL) {
1067 BT_DBG("Unknown op %u", opcode);
1068 err = mgmt_cmd_status(sk, index, opcode,
1069 MGMT_STATUS_UNKNOWN_COMMAND);
1073 handler = &chan->handlers[opcode];
1075 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1076 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1077 err = mgmt_cmd_status(sk, index, opcode,
1078 MGMT_STATUS_PERMISSION_DENIED);
1082 if (index != MGMT_INDEX_NONE) {
1083 hdev = hci_dev_get(index);
1085 err = mgmt_cmd_status(sk, index, opcode,
1086 MGMT_STATUS_INVALID_INDEX);
1090 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1091 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1092 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1093 err = mgmt_cmd_status(sk, index, opcode,
1094 MGMT_STATUS_INVALID_INDEX);
1098 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1099 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1100 err = mgmt_cmd_status(sk, index, opcode,
1101 MGMT_STATUS_INVALID_INDEX);
1106 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1107 if (no_hdev != !hdev) {
1108 err = mgmt_cmd_status(sk, index, opcode,
1109 MGMT_STATUS_INVALID_INDEX);
1113 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1114 if ((var_len && len < handler->data_len) ||
1115 (!var_len && len != handler->data_len)) {
1116 err = mgmt_cmd_status(sk, index, opcode,
1117 MGMT_STATUS_INVALID_PARAMS);
1121 if (hdev && chan->hdev_init)
1122 chan->hdev_init(sk, hdev);
1124 cp = buf + sizeof(*hdr);
1126 err = handler->func(sk, hdev, cp, len);
1140 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1143 struct sock *sk = sock->sk;
1144 struct hci_mgmt_chan *chan;
1145 struct hci_dev *hdev;
1146 struct sk_buff *skb;
1149 BT_DBG("sock %p sk %p", sock, sk);
1151 if (msg->msg_flags & MSG_OOB)
1154 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
1157 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1162 switch (hci_pi(sk)->channel) {
1163 case HCI_CHANNEL_RAW:
1164 case HCI_CHANNEL_USER:
1166 case HCI_CHANNEL_MONITOR:
1170 mutex_lock(&mgmt_chan_list_lock);
1171 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1173 err = hci_mgmt_cmd(chan, sk, msg, len);
1177 mutex_unlock(&mgmt_chan_list_lock);
1181 hdev = hci_pi(sk)->hdev;
1187 if (!test_bit(HCI_UP, &hdev->flags)) {
1192 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1196 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1201 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
1204 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1205 /* No permission check is needed for user channel
1206 * since that gets enforced when binding the socket.
1208 * However check that the packet type is valid.
1210 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
1211 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1212 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1217 skb_queue_tail(&hdev->raw_q, skb);
1218 queue_work(hdev->workqueue, &hdev->tx_work);
1219 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
1220 u16 opcode = get_unaligned_le16(skb->data);
1221 u16 ogf = hci_opcode_ogf(opcode);
1222 u16 ocf = hci_opcode_ocf(opcode);
1224 if (((ogf > HCI_SFLT_MAX_OGF) ||
1225 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1226 &hci_sec_filter.ocf_mask[ogf])) &&
1227 !capable(CAP_NET_RAW)) {
1233 skb_queue_tail(&hdev->raw_q, skb);
1234 queue_work(hdev->workqueue, &hdev->tx_work);
1236 /* Stand-alone HCI commands must be flagged as
1237 * single-command requests.
1239 bt_cb(skb)->req.start = true;
1241 skb_queue_tail(&hdev->cmd_q, skb);
1242 queue_work(hdev->workqueue, &hdev->cmd_work);
1245 if (!capable(CAP_NET_RAW)) {
1250 skb_queue_tail(&hdev->raw_q, skb);
1251 queue_work(hdev->workqueue, &hdev->tx_work);
1265 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1266 char __user *optval, unsigned int len)
1268 struct hci_ufilter uf = { .opcode = 0 };
1269 struct sock *sk = sock->sk;
1270 int err = 0, opt = 0;
1272 BT_DBG("sk %p, opt %d", sk, optname);
1276 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1283 if (get_user(opt, (int __user *)optval)) {
1289 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1291 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1294 case HCI_TIME_STAMP:
1295 if (get_user(opt, (int __user *)optval)) {
1301 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1303 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1308 struct hci_filter *f = &hci_pi(sk)->filter;
1310 uf.type_mask = f->type_mask;
1311 uf.opcode = f->opcode;
1312 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1313 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1316 len = min_t(unsigned int, len, sizeof(uf));
1317 if (copy_from_user(&uf, optval, len)) {
1322 if (!capable(CAP_NET_RAW)) {
1323 uf.type_mask &= hci_sec_filter.type_mask;
1324 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1325 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1329 struct hci_filter *f = &hci_pi(sk)->filter;
1331 f->type_mask = uf.type_mask;
1332 f->opcode = uf.opcode;
1333 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1334 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1348 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1349 char __user *optval, int __user *optlen)
1351 struct hci_ufilter uf;
1352 struct sock *sk = sock->sk;
1353 int len, opt, err = 0;
1355 BT_DBG("sk %p, opt %d", sk, optname);
1357 if (get_user(len, optlen))
1362 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1369 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1374 if (put_user(opt, optval))
1378 case HCI_TIME_STAMP:
1379 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1384 if (put_user(opt, optval))
1390 struct hci_filter *f = &hci_pi(sk)->filter;
1392 memset(&uf, 0, sizeof(uf));
1393 uf.type_mask = f->type_mask;
1394 uf.opcode = f->opcode;
1395 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1396 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1399 len = min_t(unsigned int, len, sizeof(uf));
1400 if (copy_to_user(optval, &uf, len))
1414 static const struct proto_ops hci_sock_ops = {
1415 .family = PF_BLUETOOTH,
1416 .owner = THIS_MODULE,
1417 .release = hci_sock_release,
1418 .bind = hci_sock_bind,
1419 .getname = hci_sock_getname,
1420 .sendmsg = hci_sock_sendmsg,
1421 .recvmsg = hci_sock_recvmsg,
1422 .ioctl = hci_sock_ioctl,
1423 .poll = datagram_poll,
1424 .listen = sock_no_listen,
1425 .shutdown = sock_no_shutdown,
1426 .setsockopt = hci_sock_setsockopt,
1427 .getsockopt = hci_sock_getsockopt,
1428 .connect = sock_no_connect,
1429 .socketpair = sock_no_socketpair,
1430 .accept = sock_no_accept,
1431 .mmap = sock_no_mmap
1434 static struct proto hci_sk_proto = {
1436 .owner = THIS_MODULE,
1437 .obj_size = sizeof(struct hci_pinfo)
1440 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1445 BT_DBG("sock %p", sock);
1447 if (sock->type != SOCK_RAW)
1448 return -ESOCKTNOSUPPORT;
1450 sock->ops = &hci_sock_ops;
1452 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
1456 sock_init_data(sock, sk);
1458 sock_reset_flag(sk, SOCK_ZAPPED);
1460 sk->sk_protocol = protocol;
1462 sock->state = SS_UNCONNECTED;
1463 sk->sk_state = BT_OPEN;
1465 bt_sock_link(&hci_sk_list, sk);
1469 static const struct net_proto_family hci_sock_family_ops = {
1470 .family = PF_BLUETOOTH,
1471 .owner = THIS_MODULE,
1472 .create = hci_sock_create,
1475 int __init hci_sock_init(void)
1479 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1481 err = proto_register(&hci_sk_proto, 0);
1485 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1487 BT_ERR("HCI socket registration failed");
1491 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1493 BT_ERR("Failed to create HCI proc file");
1494 bt_sock_unregister(BTPROTO_HCI);
1498 BT_INFO("HCI socket layer initialized");
1503 proto_unregister(&hci_sk_proto);
1507 void hci_sock_cleanup(void)
1509 bt_procfs_cleanup(&init_net, "hci");
1510 bt_sock_unregister(BTPROTO_HCI);
1511 proto_unregister(&hci_sk_proto);