2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
34 static atomic_t monitor_promisc = ATOMIC_INIT(0);
36 /* ----- HCI socket interface ----- */
39 #define hci_pi(sk) ((struct hci_pinfo *) sk)
44 struct hci_filter filter;
46 unsigned short channel;
49 static inline int hci_test_bit(int nr, const void *addr)
51 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
55 #define HCI_SFLT_MAX_OGF 5
57 struct hci_sec_filter {
60 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
63 static const struct hci_sec_filter hci_sec_filter = {
67 { 0x1000d9fe, 0x0000b00c },
72 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
74 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
76 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
78 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
79 /* OGF_STATUS_PARAM */
80 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
84 static struct bt_sock_list hci_sk_list = {
85 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
88 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
90 struct hci_filter *flt;
91 int flt_type, flt_event;
94 flt = &hci_pi(sk)->filter;
96 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
99 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
101 if (!test_bit(flt_type, &flt->type_mask))
104 /* Extra filter for event packets only */
105 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
108 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
110 if (!hci_test_bit(flt_event, &flt->event_mask))
113 /* Check filter only when opcode is set */
117 if (flt_event == HCI_EV_CMD_COMPLETE &&
118 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
121 if (flt_event == HCI_EV_CMD_STATUS &&
122 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
128 /* Send frame to RAW socket */
129 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
132 struct sk_buff *skb_copy = NULL;
134 BT_DBG("hdev %p len %d", hdev, skb->len);
136 read_lock(&hci_sk_list.lock);
138 sk_for_each(sk, &hci_sk_list.head) {
139 struct sk_buff *nskb;
141 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
144 /* Don't send frame to the socket it came from */
148 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
149 if (is_filtered_packet(sk, skb))
151 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
152 if (!bt_cb(skb)->incoming)
154 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
155 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
156 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
159 /* Don't send frame to other channel types */
164 /* Create a private copy with headroom */
165 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
169 /* Put type byte before the data */
170 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
173 nskb = skb_clone(skb_copy, GFP_ATOMIC);
177 if (sock_queue_rcv_skb(sk, nskb))
181 read_unlock(&hci_sk_list.lock);
186 /* Send frame to sockets with specific channel */
187 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
188 struct sock *skip_sk)
192 BT_DBG("channel %u len %d", channel, skb->len);
194 read_lock(&hci_sk_list.lock);
196 sk_for_each(sk, &hci_sk_list.head) {
197 struct sk_buff *nskb;
199 /* Skip the original socket */
203 if (sk->sk_state != BT_BOUND)
206 if (hci_pi(sk)->channel != channel)
209 nskb = skb_clone(skb, GFP_ATOMIC);
213 if (sock_queue_rcv_skb(sk, nskb))
217 read_unlock(&hci_sk_list.lock);
220 /* Send frame to monitor socket */
221 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
223 struct sk_buff *skb_copy = NULL;
224 struct hci_mon_hdr *hdr;
227 if (!atomic_read(&monitor_promisc))
230 BT_DBG("hdev %p len %d", hdev, skb->len);
232 switch (bt_cb(skb)->pkt_type) {
233 case HCI_COMMAND_PKT:
234 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
237 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
239 case HCI_ACLDATA_PKT:
240 if (bt_cb(skb)->incoming)
241 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
243 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
245 case HCI_SCODATA_PKT:
246 if (bt_cb(skb)->incoming)
247 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
249 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
255 /* Create a private copy with headroom */
256 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
260 /* Put header before the data */
261 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
262 hdr->opcode = opcode;
263 hdr->index = cpu_to_le16(hdev->id);
264 hdr->len = cpu_to_le16(skb->len);
266 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy, NULL);
270 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
272 struct hci_mon_hdr *hdr;
273 struct hci_mon_new_index *ni;
279 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
283 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
284 ni->type = hdev->dev_type;
286 bacpy(&ni->bdaddr, &hdev->bdaddr);
287 memcpy(ni->name, hdev->name, 8);
289 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
293 skb = bt_skb_alloc(0, GFP_ATOMIC);
297 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
304 __net_timestamp(skb);
306 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
307 hdr->opcode = opcode;
308 hdr->index = cpu_to_le16(hdev->id);
309 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
314 static void send_monitor_replay(struct sock *sk)
316 struct hci_dev *hdev;
318 read_lock(&hci_dev_list_lock);
320 list_for_each_entry(hdev, &hci_dev_list, list) {
323 skb = create_monitor_event(hdev, HCI_DEV_REG);
327 if (sock_queue_rcv_skb(sk, skb))
331 read_unlock(&hci_dev_list_lock);
334 /* Generate internal stack event */
335 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
337 struct hci_event_hdr *hdr;
338 struct hci_ev_stack_internal *ev;
341 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
345 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
346 hdr->evt = HCI_EV_STACK_INTERNAL;
347 hdr->plen = sizeof(*ev) + dlen;
349 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
351 memcpy(ev->data, data, dlen);
353 bt_cb(skb)->incoming = 1;
354 __net_timestamp(skb);
356 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
357 hci_send_to_sock(hdev, skb);
361 void hci_sock_dev_event(struct hci_dev *hdev, int event)
363 struct hci_ev_si_device ev;
365 BT_DBG("hdev %s event %d", hdev->name, event);
367 /* Send event to monitor */
368 if (atomic_read(&monitor_promisc)) {
371 skb = create_monitor_event(hdev, event);
373 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, NULL);
378 /* Send event to sockets */
380 ev.dev_id = hdev->id;
381 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
383 if (event == HCI_DEV_UNREG) {
386 /* Detach sockets from device */
387 read_lock(&hci_sk_list.lock);
388 sk_for_each(sk, &hci_sk_list.head) {
389 bh_lock_sock_nested(sk);
390 if (hci_pi(sk)->hdev == hdev) {
391 hci_pi(sk)->hdev = NULL;
393 sk->sk_state = BT_OPEN;
394 sk->sk_state_change(sk);
400 read_unlock(&hci_sk_list.lock);
404 static int hci_sock_release(struct socket *sock)
406 struct sock *sk = sock->sk;
407 struct hci_dev *hdev;
409 BT_DBG("sock %p sk %p", sock, sk);
414 hdev = hci_pi(sk)->hdev;
416 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
417 atomic_dec(&monitor_promisc);
419 bt_sock_unlink(&hci_sk_list, sk);
422 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
423 mgmt_index_added(hdev);
424 clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
425 hci_dev_close(hdev->id);
428 atomic_dec(&hdev->promisc);
434 skb_queue_purge(&sk->sk_receive_queue);
435 skb_queue_purge(&sk->sk_write_queue);
441 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
446 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
451 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
453 hci_dev_unlock(hdev);
458 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
463 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
468 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
470 hci_dev_unlock(hdev);
475 /* Ioctls that require bound socket */
476 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
479 struct hci_dev *hdev = hci_pi(sk)->hdev;
484 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
487 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
490 if (hdev->dev_type != HCI_BREDR)
495 if (!capable(CAP_NET_ADMIN))
500 return hci_get_conn_info(hdev, (void __user *) arg);
503 return hci_get_auth_info(hdev, (void __user *) arg);
506 if (!capable(CAP_NET_ADMIN))
508 return hci_sock_blacklist_add(hdev, (void __user *) arg);
511 if (!capable(CAP_NET_ADMIN))
513 return hci_sock_blacklist_del(hdev, (void __user *) arg);
519 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
522 void __user *argp = (void __user *) arg;
523 struct sock *sk = sock->sk;
526 BT_DBG("cmd %x arg %lx", cmd, arg);
530 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
539 return hci_get_dev_list(argp);
542 return hci_get_dev_info(argp);
545 return hci_get_conn_list(argp);
548 if (!capable(CAP_NET_ADMIN))
550 return hci_dev_open(arg);
553 if (!capable(CAP_NET_ADMIN))
555 return hci_dev_close(arg);
558 if (!capable(CAP_NET_ADMIN))
560 return hci_dev_reset(arg);
563 if (!capable(CAP_NET_ADMIN))
565 return hci_dev_reset_stat(arg);
575 if (!capable(CAP_NET_ADMIN))
577 return hci_dev_cmd(cmd, argp);
580 return hci_inquiry(argp);
585 err = hci_sock_bound_ioctl(sk, cmd, arg);
592 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
595 struct sockaddr_hci haddr;
596 struct sock *sk = sock->sk;
597 struct hci_dev *hdev = NULL;
600 BT_DBG("sock %p sk %p", sock, sk);
605 memset(&haddr, 0, sizeof(haddr));
606 len = min_t(unsigned int, sizeof(haddr), addr_len);
607 memcpy(&haddr, addr, len);
609 if (haddr.hci_family != AF_BLUETOOTH)
614 if (sk->sk_state == BT_BOUND) {
619 switch (haddr.hci_channel) {
620 case HCI_CHANNEL_RAW:
621 if (hci_pi(sk)->hdev) {
626 if (haddr.hci_dev != HCI_DEV_NONE) {
627 hdev = hci_dev_get(haddr.hci_dev);
633 atomic_inc(&hdev->promisc);
636 hci_pi(sk)->hdev = hdev;
639 case HCI_CHANNEL_USER:
640 if (hci_pi(sk)->hdev) {
645 if (haddr.hci_dev == HCI_DEV_NONE) {
650 if (!capable(CAP_NET_ADMIN)) {
655 hdev = hci_dev_get(haddr.hci_dev);
661 if (test_bit(HCI_UP, &hdev->flags) ||
662 test_bit(HCI_INIT, &hdev->flags) ||
663 test_bit(HCI_SETUP, &hdev->dev_flags) ||
664 test_bit(HCI_CONFIG, &hdev->dev_flags)) {
670 if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
676 mgmt_index_removed(hdev);
678 err = hci_dev_open(hdev->id);
680 clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
681 mgmt_index_added(hdev);
686 atomic_inc(&hdev->promisc);
688 hci_pi(sk)->hdev = hdev;
691 case HCI_CHANNEL_CONTROL:
692 if (haddr.hci_dev != HCI_DEV_NONE) {
697 if (!capable(CAP_NET_ADMIN)) {
704 case HCI_CHANNEL_MONITOR:
705 if (haddr.hci_dev != HCI_DEV_NONE) {
710 if (!capable(CAP_NET_RAW)) {
715 send_monitor_replay(sk);
717 atomic_inc(&monitor_promisc);
726 hci_pi(sk)->channel = haddr.hci_channel;
727 sk->sk_state = BT_BOUND;
734 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
735 int *addr_len, int peer)
737 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
738 struct sock *sk = sock->sk;
739 struct hci_dev *hdev;
742 BT_DBG("sock %p sk %p", sock, sk);
749 hdev = hci_pi(sk)->hdev;
755 *addr_len = sizeof(*haddr);
756 haddr->hci_family = AF_BLUETOOTH;
757 haddr->hci_dev = hdev->id;
758 haddr->hci_channel= hci_pi(sk)->channel;
765 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
768 __u32 mask = hci_pi(sk)->cmsg_mask;
770 if (mask & HCI_CMSG_DIR) {
771 int incoming = bt_cb(skb)->incoming;
772 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
776 if (mask & HCI_CMSG_TSTAMP) {
778 struct compat_timeval ctv;
784 skb_get_timestamp(skb, &tv);
789 if (!COMPAT_USE_64BIT_TIME &&
790 (msg->msg_flags & MSG_CMSG_COMPAT)) {
791 ctv.tv_sec = tv.tv_sec;
792 ctv.tv_usec = tv.tv_usec;
798 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
802 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
805 int noblock = flags & MSG_DONTWAIT;
806 struct sock *sk = sock->sk;
810 BT_DBG("sock %p, sk %p", sock, sk);
812 if (flags & (MSG_OOB))
815 if (sk->sk_state == BT_CLOSED)
818 skb = skb_recv_datagram(sk, flags, noblock, &err);
824 msg->msg_flags |= MSG_TRUNC;
828 skb_reset_transport_header(skb);
829 err = skb_copy_datagram_msg(skb, 0, msg, copied);
831 switch (hci_pi(sk)->channel) {
832 case HCI_CHANNEL_RAW:
833 hci_sock_cmsg(sk, msg, skb);
835 case HCI_CHANNEL_USER:
836 case HCI_CHANNEL_CONTROL:
837 case HCI_CHANNEL_MONITOR:
838 sock_recv_timestamp(msg, sk, skb);
842 skb_free_datagram(sk, skb);
844 return err ? : copied;
847 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
850 struct sock *sk = sock->sk;
851 struct hci_dev *hdev;
855 BT_DBG("sock %p sk %p", sock, sk);
857 if (msg->msg_flags & MSG_OOB)
860 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
863 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
868 switch (hci_pi(sk)->channel) {
869 case HCI_CHANNEL_RAW:
870 case HCI_CHANNEL_USER:
872 case HCI_CHANNEL_CONTROL:
873 err = mgmt_control(sk, msg, len);
875 case HCI_CHANNEL_MONITOR:
883 hdev = hci_pi(sk)->hdev;
889 if (!test_bit(HCI_UP, &hdev->flags)) {
894 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
898 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
903 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
906 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
907 /* No permission check is needed for user channel
908 * since that gets enforced when binding the socket.
910 * However check that the packet type is valid.
912 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
913 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
914 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
919 skb_queue_tail(&hdev->raw_q, skb);
920 queue_work(hdev->workqueue, &hdev->tx_work);
921 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
922 u16 opcode = get_unaligned_le16(skb->data);
923 u16 ogf = hci_opcode_ogf(opcode);
924 u16 ocf = hci_opcode_ocf(opcode);
926 if (((ogf > HCI_SFLT_MAX_OGF) ||
927 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
928 &hci_sec_filter.ocf_mask[ogf])) &&
929 !capable(CAP_NET_RAW)) {
935 skb_queue_tail(&hdev->raw_q, skb);
936 queue_work(hdev->workqueue, &hdev->tx_work);
938 /* Stand-alone HCI commands must be flagged as
939 * single-command requests.
941 bt_cb(skb)->req_start = 1;
943 skb_queue_tail(&hdev->cmd_q, skb);
944 queue_work(hdev->workqueue, &hdev->cmd_work);
947 if (!capable(CAP_NET_RAW)) {
952 skb_queue_tail(&hdev->raw_q, skb);
953 queue_work(hdev->workqueue, &hdev->tx_work);
967 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
968 char __user *optval, unsigned int len)
970 struct hci_ufilter uf = { .opcode = 0 };
971 struct sock *sk = sock->sk;
972 int err = 0, opt = 0;
974 BT_DBG("sk %p, opt %d", sk, optname);
978 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
985 if (get_user(opt, (int __user *)optval)) {
991 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
993 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
997 if (get_user(opt, (int __user *)optval)) {
1003 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1005 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1010 struct hci_filter *f = &hci_pi(sk)->filter;
1012 uf.type_mask = f->type_mask;
1013 uf.opcode = f->opcode;
1014 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1015 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1018 len = min_t(unsigned int, len, sizeof(uf));
1019 if (copy_from_user(&uf, optval, len)) {
1024 if (!capable(CAP_NET_RAW)) {
1025 uf.type_mask &= hci_sec_filter.type_mask;
1026 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1027 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1031 struct hci_filter *f = &hci_pi(sk)->filter;
1033 f->type_mask = uf.type_mask;
1034 f->opcode = uf.opcode;
1035 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1036 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1050 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1051 char __user *optval, int __user *optlen)
1053 struct hci_ufilter uf;
1054 struct sock *sk = sock->sk;
1055 int len, opt, err = 0;
1057 BT_DBG("sk %p, opt %d", sk, optname);
1059 if (get_user(len, optlen))
1064 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1071 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1076 if (put_user(opt, optval))
1080 case HCI_TIME_STAMP:
1081 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1086 if (put_user(opt, optval))
1092 struct hci_filter *f = &hci_pi(sk)->filter;
1094 memset(&uf, 0, sizeof(uf));
1095 uf.type_mask = f->type_mask;
1096 uf.opcode = f->opcode;
1097 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1098 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1101 len = min_t(unsigned int, len, sizeof(uf));
1102 if (copy_to_user(optval, &uf, len))
1116 static const struct proto_ops hci_sock_ops = {
1117 .family = PF_BLUETOOTH,
1118 .owner = THIS_MODULE,
1119 .release = hci_sock_release,
1120 .bind = hci_sock_bind,
1121 .getname = hci_sock_getname,
1122 .sendmsg = hci_sock_sendmsg,
1123 .recvmsg = hci_sock_recvmsg,
1124 .ioctl = hci_sock_ioctl,
1125 .poll = datagram_poll,
1126 .listen = sock_no_listen,
1127 .shutdown = sock_no_shutdown,
1128 .setsockopt = hci_sock_setsockopt,
1129 .getsockopt = hci_sock_getsockopt,
1130 .connect = sock_no_connect,
1131 .socketpair = sock_no_socketpair,
1132 .accept = sock_no_accept,
1133 .mmap = sock_no_mmap
1136 static struct proto hci_sk_proto = {
1138 .owner = THIS_MODULE,
1139 .obj_size = sizeof(struct hci_pinfo)
1142 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1147 BT_DBG("sock %p", sock);
1149 if (sock->type != SOCK_RAW)
1150 return -ESOCKTNOSUPPORT;
1152 sock->ops = &hci_sock_ops;
1154 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1158 sock_init_data(sock, sk);
1160 sock_reset_flag(sk, SOCK_ZAPPED);
1162 sk->sk_protocol = protocol;
1164 sock->state = SS_UNCONNECTED;
1165 sk->sk_state = BT_OPEN;
1167 bt_sock_link(&hci_sk_list, sk);
1171 static const struct net_proto_family hci_sock_family_ops = {
1172 .family = PF_BLUETOOTH,
1173 .owner = THIS_MODULE,
1174 .create = hci_sock_create,
1177 int __init hci_sock_init(void)
1181 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1183 err = proto_register(&hci_sk_proto, 0);
1187 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1189 BT_ERR("HCI socket registration failed");
1193 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1195 BT_ERR("Failed to create HCI proc file");
1196 bt_sock_unregister(BTPROTO_HCI);
1200 BT_INFO("HCI socket layer initialized");
1205 proto_unregister(&hci_sk_proto);
1209 void hci_sock_cleanup(void)
1211 bt_procfs_cleanup(&init_net, "hci");
1212 bt_sock_unregister(BTPROTO_HCI);
1213 proto_unregister(&hci_sk_proto);