9bf30db89d894456c0eb7a3b4199d98ce5ce0cf2
[cascardo/linux.git] / net / bluetooth / hci_sock.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI sockets. */
26
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
33 #include <net/bluetooth/mgmt.h>
34
35 #include "mgmt_util.h"
36
37 static LIST_HEAD(mgmt_chan_list);
38 static DEFINE_MUTEX(mgmt_chan_list_lock);
39
40 static atomic_t monitor_promisc = ATOMIC_INIT(0);
41
42 /* ----- HCI socket interface ----- */
43
44 /* Socket info */
45 #define hci_pi(sk) ((struct hci_pinfo *) sk)
46
47 struct hci_pinfo {
48         struct bt_sock    bt;
49         struct hci_dev    *hdev;
50         struct hci_filter filter;
51         __u32             cmsg_mask;
52         unsigned short    channel;
53         unsigned long     flags;
54 };
55
56 void hci_sock_set_flag(struct sock *sk, int nr)
57 {
58         set_bit(nr, &hci_pi(sk)->flags);
59 }
60
61 void hci_sock_clear_flag(struct sock *sk, int nr)
62 {
63         clear_bit(nr, &hci_pi(sk)->flags);
64 }
65
66 int hci_sock_test_flag(struct sock *sk, int nr)
67 {
68         return test_bit(nr, &hci_pi(sk)->flags);
69 }
70
71 unsigned short hci_sock_get_channel(struct sock *sk)
72 {
73         return hci_pi(sk)->channel;
74 }
75
76 static inline int hci_test_bit(int nr, const void *addr)
77 {
78         return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
79 }
80
81 /* Security filter */
82 #define HCI_SFLT_MAX_OGF  5
83
84 struct hci_sec_filter {
85         __u32 type_mask;
86         __u32 event_mask[2];
87         __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
88 };
89
90 static const struct hci_sec_filter hci_sec_filter = {
91         /* Packet types */
92         0x10,
93         /* Events */
94         { 0x1000d9fe, 0x0000b00c },
95         /* Commands */
96         {
97                 { 0x0 },
98                 /* OGF_LINK_CTL */
99                 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
100                 /* OGF_LINK_POLICY */
101                 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
102                 /* OGF_HOST_CTL */
103                 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
104                 /* OGF_INFO_PARAM */
105                 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
106                 /* OGF_STATUS_PARAM */
107                 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
108         }
109 };
110
111 static struct bt_sock_list hci_sk_list = {
112         .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
113 };
114
115 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
116 {
117         struct hci_filter *flt;
118         int flt_type, flt_event;
119
120         /* Apply filter */
121         flt = &hci_pi(sk)->filter;
122
123         if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
124                 flt_type = 0;
125         else
126                 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
127
128         if (!test_bit(flt_type, &flt->type_mask))
129                 return true;
130
131         /* Extra filter for event packets only */
132         if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
133                 return false;
134
135         flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
136
137         if (!hci_test_bit(flt_event, &flt->event_mask))
138                 return true;
139
140         /* Check filter only when opcode is set */
141         if (!flt->opcode)
142                 return false;
143
144         if (flt_event == HCI_EV_CMD_COMPLETE &&
145             flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
146                 return true;
147
148         if (flt_event == HCI_EV_CMD_STATUS &&
149             flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
150                 return true;
151
152         return false;
153 }
154
155 /* Send frame to RAW socket */
156 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
157 {
158         struct sock *sk;
159         struct sk_buff *skb_copy = NULL;
160
161         BT_DBG("hdev %p len %d", hdev, skb->len);
162
163         read_lock(&hci_sk_list.lock);
164
165         sk_for_each(sk, &hci_sk_list.head) {
166                 struct sk_buff *nskb;
167
168                 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
169                         continue;
170
171                 /* Don't send frame to the socket it came from */
172                 if (skb->sk == sk)
173                         continue;
174
175                 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
176                         if (is_filtered_packet(sk, skb))
177                                 continue;
178                 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
179                         if (!bt_cb(skb)->incoming)
180                                 continue;
181                         if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
182                             bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
183                             bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
184                                 continue;
185                 } else {
186                         /* Don't send frame to other channel types */
187                         continue;
188                 }
189
190                 if (!skb_copy) {
191                         /* Create a private copy with headroom */
192                         skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
193                         if (!skb_copy)
194                                 continue;
195
196                         /* Put type byte before the data */
197                         memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
198                 }
199
200                 nskb = skb_clone(skb_copy, GFP_ATOMIC);
201                 if (!nskb)
202                         continue;
203
204                 if (sock_queue_rcv_skb(sk, nskb))
205                         kfree_skb(nskb);
206         }
207
208         read_unlock(&hci_sk_list.lock);
209
210         kfree_skb(skb_copy);
211 }
212
213 /* Send frame to sockets with specific channel */
214 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
215                          int flag, struct sock *skip_sk)
216 {
217         struct sock *sk;
218
219         BT_DBG("channel %u len %d", channel, skb->len);
220
221         read_lock(&hci_sk_list.lock);
222
223         sk_for_each(sk, &hci_sk_list.head) {
224                 struct sk_buff *nskb;
225
226                 /* Ignore socket without the flag set */
227                 if (!hci_sock_test_flag(sk, flag))
228                         continue;
229
230                 /* Skip the original socket */
231                 if (sk == skip_sk)
232                         continue;
233
234                 if (sk->sk_state != BT_BOUND)
235                         continue;
236
237                 if (hci_pi(sk)->channel != channel)
238                         continue;
239
240                 nskb = skb_clone(skb, GFP_ATOMIC);
241                 if (!nskb)
242                         continue;
243
244                 if (sock_queue_rcv_skb(sk, nskb))
245                         kfree_skb(nskb);
246         }
247
248         read_unlock(&hci_sk_list.lock);
249 }
250
251 /* Send frame to monitor socket */
252 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
253 {
254         struct sk_buff *skb_copy = NULL;
255         struct hci_mon_hdr *hdr;
256         __le16 opcode;
257
258         if (!atomic_read(&monitor_promisc))
259                 return;
260
261         BT_DBG("hdev %p len %d", hdev, skb->len);
262
263         switch (bt_cb(skb)->pkt_type) {
264         case HCI_COMMAND_PKT:
265                 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
266                 break;
267         case HCI_EVENT_PKT:
268                 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
269                 break;
270         case HCI_ACLDATA_PKT:
271                 if (bt_cb(skb)->incoming)
272                         opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
273                 else
274                         opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
275                 break;
276         case HCI_SCODATA_PKT:
277                 if (bt_cb(skb)->incoming)
278                         opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
279                 else
280                         opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
281                 break;
282         default:
283                 return;
284         }
285
286         /* Create a private copy with headroom */
287         skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
288         if (!skb_copy)
289                 return;
290
291         /* Put header before the data */
292         hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
293         hdr->opcode = opcode;
294         hdr->index = cpu_to_le16(hdev->id);
295         hdr->len = cpu_to_le16(skb->len);
296
297         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
298                             HCI_SOCK_TRUSTED, NULL);
299         kfree_skb(skb_copy);
300 }
301
302 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
303 {
304         struct hci_mon_hdr *hdr;
305         struct hci_mon_new_index *ni;
306         struct hci_mon_index_info *ii;
307         struct sk_buff *skb;
308         __le16 opcode;
309
310         switch (event) {
311         case HCI_DEV_REG:
312                 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
313                 if (!skb)
314                         return NULL;
315
316                 ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
317                 ni->type = hdev->dev_type;
318                 ni->bus = hdev->bus;
319                 bacpy(&ni->bdaddr, &hdev->bdaddr);
320                 memcpy(ni->name, hdev->name, 8);
321
322                 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
323                 break;
324
325         case HCI_DEV_UNREG:
326                 skb = bt_skb_alloc(0, GFP_ATOMIC);
327                 if (!skb)
328                         return NULL;
329
330                 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
331                 break;
332
333         case HCI_DEV_UP:
334                 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
335                 if (!skb)
336                         return NULL;
337
338                 ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
339                 bacpy(&ii->bdaddr, &hdev->bdaddr);
340                 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
341
342                 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
343                 break;
344
345         case HCI_DEV_OPEN:
346                 skb = bt_skb_alloc(0, GFP_ATOMIC);
347                 if (!skb)
348                         return NULL;
349
350                 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
351                 break;
352
353         case HCI_DEV_CLOSE:
354                 skb = bt_skb_alloc(0, GFP_ATOMIC);
355                 if (!skb)
356                         return NULL;
357
358                 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
359                 break;
360
361         default:
362                 return NULL;
363         }
364
365         __net_timestamp(skb);
366
367         hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
368         hdr->opcode = opcode;
369         hdr->index = cpu_to_le16(hdev->id);
370         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
371
372         return skb;
373 }
374
375 static void send_monitor_replay(struct sock *sk)
376 {
377         struct hci_dev *hdev;
378
379         read_lock(&hci_dev_list_lock);
380
381         list_for_each_entry(hdev, &hci_dev_list, list) {
382                 struct sk_buff *skb;
383
384                 skb = create_monitor_event(hdev, HCI_DEV_REG);
385                 if (!skb)
386                         continue;
387
388                 if (sock_queue_rcv_skb(sk, skb))
389                         kfree_skb(skb);
390
391                 if (!test_bit(HCI_RUNNING, &hdev->flags))
392                         continue;
393
394                 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
395                 if (!skb)
396                         continue;
397
398                 if (sock_queue_rcv_skb(sk, skb))
399                         kfree_skb(skb);
400
401                 if (!test_bit(HCI_UP, &hdev->flags))
402                         continue;
403
404                 skb = create_monitor_event(hdev, HCI_DEV_UP);
405                 if (!skb)
406                         continue;
407
408                 if (sock_queue_rcv_skb(sk, skb))
409                         kfree_skb(skb);
410         }
411
412         read_unlock(&hci_dev_list_lock);
413 }
414
415 /* Generate internal stack event */
416 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
417 {
418         struct hci_event_hdr *hdr;
419         struct hci_ev_stack_internal *ev;
420         struct sk_buff *skb;
421
422         skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
423         if (!skb)
424                 return;
425
426         hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
427         hdr->evt  = HCI_EV_STACK_INTERNAL;
428         hdr->plen = sizeof(*ev) + dlen;
429
430         ev  = (void *) skb_put(skb, sizeof(*ev) + dlen);
431         ev->type = type;
432         memcpy(ev->data, data, dlen);
433
434         bt_cb(skb)->incoming = 1;
435         __net_timestamp(skb);
436
437         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
438         hci_send_to_sock(hdev, skb);
439         kfree_skb(skb);
440 }
441
442 void hci_sock_dev_event(struct hci_dev *hdev, int event)
443 {
444         BT_DBG("hdev %s event %d", hdev->name, event);
445
446         if (atomic_read(&monitor_promisc)) {
447                 struct sk_buff *skb;
448
449                 /* Send event to monitor */
450                 skb = create_monitor_event(hdev, event);
451                 if (skb) {
452                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
453                                             HCI_SOCK_TRUSTED, NULL);
454                         kfree_skb(skb);
455                 }
456         }
457
458         if (event <= HCI_DEV_DOWN) {
459                 struct hci_ev_si_device ev;
460
461                 /* Send event to sockets */
462                 ev.event  = event;
463                 ev.dev_id = hdev->id;
464                 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
465         }
466
467         if (event == HCI_DEV_UNREG) {
468                 struct sock *sk;
469
470                 /* Detach sockets from device */
471                 read_lock(&hci_sk_list.lock);
472                 sk_for_each(sk, &hci_sk_list.head) {
473                         bh_lock_sock_nested(sk);
474                         if (hci_pi(sk)->hdev == hdev) {
475                                 hci_pi(sk)->hdev = NULL;
476                                 sk->sk_err = EPIPE;
477                                 sk->sk_state = BT_OPEN;
478                                 sk->sk_state_change(sk);
479
480                                 hci_dev_put(hdev);
481                         }
482                         bh_unlock_sock(sk);
483                 }
484                 read_unlock(&hci_sk_list.lock);
485         }
486 }
487
488 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
489 {
490         struct hci_mgmt_chan *c;
491
492         list_for_each_entry(c, &mgmt_chan_list, list) {
493                 if (c->channel == channel)
494                         return c;
495         }
496
497         return NULL;
498 }
499
500 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
501 {
502         struct hci_mgmt_chan *c;
503
504         mutex_lock(&mgmt_chan_list_lock);
505         c = __hci_mgmt_chan_find(channel);
506         mutex_unlock(&mgmt_chan_list_lock);
507
508         return c;
509 }
510
511 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
512 {
513         if (c->channel < HCI_CHANNEL_CONTROL)
514                 return -EINVAL;
515
516         mutex_lock(&mgmt_chan_list_lock);
517         if (__hci_mgmt_chan_find(c->channel)) {
518                 mutex_unlock(&mgmt_chan_list_lock);
519                 return -EALREADY;
520         }
521
522         list_add_tail(&c->list, &mgmt_chan_list);
523
524         mutex_unlock(&mgmt_chan_list_lock);
525
526         return 0;
527 }
528 EXPORT_SYMBOL(hci_mgmt_chan_register);
529
530 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
531 {
532         mutex_lock(&mgmt_chan_list_lock);
533         list_del(&c->list);
534         mutex_unlock(&mgmt_chan_list_lock);
535 }
536 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
537
538 static int hci_sock_release(struct socket *sock)
539 {
540         struct sock *sk = sock->sk;
541         struct hci_dev *hdev;
542
543         BT_DBG("sock %p sk %p", sock, sk);
544
545         if (!sk)
546                 return 0;
547
548         hdev = hci_pi(sk)->hdev;
549
550         if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
551                 atomic_dec(&monitor_promisc);
552
553         bt_sock_unlink(&hci_sk_list, sk);
554
555         if (hdev) {
556                 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
557                         /* When releasing an user channel exclusive access,
558                          * call hci_dev_do_close directly instead of calling
559                          * hci_dev_close to ensure the exclusive access will
560                          * be released and the controller brought back down.
561                          *
562                          * The checking of HCI_AUTO_OFF is not needed in this
563                          * case since it will have been cleared already when
564                          * opening the user channel.
565                          */
566                         hci_dev_do_close(hdev);
567                         hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
568                         mgmt_index_added(hdev);
569                 }
570
571                 atomic_dec(&hdev->promisc);
572                 hci_dev_put(hdev);
573         }
574
575         sock_orphan(sk);
576
577         skb_queue_purge(&sk->sk_receive_queue);
578         skb_queue_purge(&sk->sk_write_queue);
579
580         sock_put(sk);
581         return 0;
582 }
583
584 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
585 {
586         bdaddr_t bdaddr;
587         int err;
588
589         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
590                 return -EFAULT;
591
592         hci_dev_lock(hdev);
593
594         err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
595
596         hci_dev_unlock(hdev);
597
598         return err;
599 }
600
601 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
602 {
603         bdaddr_t bdaddr;
604         int err;
605
606         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
607                 return -EFAULT;
608
609         hci_dev_lock(hdev);
610
611         err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
612
613         hci_dev_unlock(hdev);
614
615         return err;
616 }
617
618 /* Ioctls that require bound socket */
619 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
620                                 unsigned long arg)
621 {
622         struct hci_dev *hdev = hci_pi(sk)->hdev;
623
624         if (!hdev)
625                 return -EBADFD;
626
627         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
628                 return -EBUSY;
629
630         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
631                 return -EOPNOTSUPP;
632
633         if (hdev->dev_type != HCI_BREDR)
634                 return -EOPNOTSUPP;
635
636         switch (cmd) {
637         case HCISETRAW:
638                 if (!capable(CAP_NET_ADMIN))
639                         return -EPERM;
640                 return -EOPNOTSUPP;
641
642         case HCIGETCONNINFO:
643                 return hci_get_conn_info(hdev, (void __user *) arg);
644
645         case HCIGETAUTHINFO:
646                 return hci_get_auth_info(hdev, (void __user *) arg);
647
648         case HCIBLOCKADDR:
649                 if (!capable(CAP_NET_ADMIN))
650                         return -EPERM;
651                 return hci_sock_blacklist_add(hdev, (void __user *) arg);
652
653         case HCIUNBLOCKADDR:
654                 if (!capable(CAP_NET_ADMIN))
655                         return -EPERM;
656                 return hci_sock_blacklist_del(hdev, (void __user *) arg);
657         }
658
659         return -ENOIOCTLCMD;
660 }
661
662 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
663                           unsigned long arg)
664 {
665         void __user *argp = (void __user *) arg;
666         struct sock *sk = sock->sk;
667         int err;
668
669         BT_DBG("cmd %x arg %lx", cmd, arg);
670
671         lock_sock(sk);
672
673         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
674                 err = -EBADFD;
675                 goto done;
676         }
677
678         release_sock(sk);
679
680         switch (cmd) {
681         case HCIGETDEVLIST:
682                 return hci_get_dev_list(argp);
683
684         case HCIGETDEVINFO:
685                 return hci_get_dev_info(argp);
686
687         case HCIGETCONNLIST:
688                 return hci_get_conn_list(argp);
689
690         case HCIDEVUP:
691                 if (!capable(CAP_NET_ADMIN))
692                         return -EPERM;
693                 return hci_dev_open(arg);
694
695         case HCIDEVDOWN:
696                 if (!capable(CAP_NET_ADMIN))
697                         return -EPERM;
698                 return hci_dev_close(arg);
699
700         case HCIDEVRESET:
701                 if (!capable(CAP_NET_ADMIN))
702                         return -EPERM;
703                 return hci_dev_reset(arg);
704
705         case HCIDEVRESTAT:
706                 if (!capable(CAP_NET_ADMIN))
707                         return -EPERM;
708                 return hci_dev_reset_stat(arg);
709
710         case HCISETSCAN:
711         case HCISETAUTH:
712         case HCISETENCRYPT:
713         case HCISETPTYPE:
714         case HCISETLINKPOL:
715         case HCISETLINKMODE:
716         case HCISETACLMTU:
717         case HCISETSCOMTU:
718                 if (!capable(CAP_NET_ADMIN))
719                         return -EPERM;
720                 return hci_dev_cmd(cmd, argp);
721
722         case HCIINQUIRY:
723                 return hci_inquiry(argp);
724         }
725
726         lock_sock(sk);
727
728         err = hci_sock_bound_ioctl(sk, cmd, arg);
729
730 done:
731         release_sock(sk);
732         return err;
733 }
734
735 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
736                          int addr_len)
737 {
738         struct sockaddr_hci haddr;
739         struct sock *sk = sock->sk;
740         struct hci_dev *hdev = NULL;
741         int len, err = 0;
742
743         BT_DBG("sock %p sk %p", sock, sk);
744
745         if (!addr)
746                 return -EINVAL;
747
748         memset(&haddr, 0, sizeof(haddr));
749         len = min_t(unsigned int, sizeof(haddr), addr_len);
750         memcpy(&haddr, addr, len);
751
752         if (haddr.hci_family != AF_BLUETOOTH)
753                 return -EINVAL;
754
755         lock_sock(sk);
756
757         if (sk->sk_state == BT_BOUND) {
758                 err = -EALREADY;
759                 goto done;
760         }
761
762         switch (haddr.hci_channel) {
763         case HCI_CHANNEL_RAW:
764                 if (hci_pi(sk)->hdev) {
765                         err = -EALREADY;
766                         goto done;
767                 }
768
769                 if (haddr.hci_dev != HCI_DEV_NONE) {
770                         hdev = hci_dev_get(haddr.hci_dev);
771                         if (!hdev) {
772                                 err = -ENODEV;
773                                 goto done;
774                         }
775
776                         atomic_inc(&hdev->promisc);
777                 }
778
779                 hci_pi(sk)->hdev = hdev;
780                 break;
781
782         case HCI_CHANNEL_USER:
783                 if (hci_pi(sk)->hdev) {
784                         err = -EALREADY;
785                         goto done;
786                 }
787
788                 if (haddr.hci_dev == HCI_DEV_NONE) {
789                         err = -EINVAL;
790                         goto done;
791                 }
792
793                 if (!capable(CAP_NET_ADMIN)) {
794                         err = -EPERM;
795                         goto done;
796                 }
797
798                 hdev = hci_dev_get(haddr.hci_dev);
799                 if (!hdev) {
800                         err = -ENODEV;
801                         goto done;
802                 }
803
804                 if (test_bit(HCI_INIT, &hdev->flags) ||
805                     hci_dev_test_flag(hdev, HCI_SETUP) ||
806                     hci_dev_test_flag(hdev, HCI_CONFIG) ||
807                     (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
808                      test_bit(HCI_UP, &hdev->flags))) {
809                         err = -EBUSY;
810                         hci_dev_put(hdev);
811                         goto done;
812                 }
813
814                 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
815                         err = -EUSERS;
816                         hci_dev_put(hdev);
817                         goto done;
818                 }
819
820                 mgmt_index_removed(hdev);
821
822                 err = hci_dev_open(hdev->id);
823                 if (err) {
824                         if (err == -EALREADY) {
825                                 /* In case the transport is already up and
826                                  * running, clear the error here.
827                                  *
828                                  * This can happen when opening an user
829                                  * channel and HCI_AUTO_OFF grace period
830                                  * is still active.
831                                  */
832                                 err = 0;
833                         } else {
834                                 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
835                                 mgmt_index_added(hdev);
836                                 hci_dev_put(hdev);
837                                 goto done;
838                         }
839                 }
840
841                 atomic_inc(&hdev->promisc);
842
843                 hci_pi(sk)->hdev = hdev;
844                 break;
845
846         case HCI_CHANNEL_MONITOR:
847                 if (haddr.hci_dev != HCI_DEV_NONE) {
848                         err = -EINVAL;
849                         goto done;
850                 }
851
852                 if (!capable(CAP_NET_RAW)) {
853                         err = -EPERM;
854                         goto done;
855                 }
856
857                 /* The monitor interface is restricted to CAP_NET_RAW
858                  * capabilities and with that implicitly trusted.
859                  */
860                 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
861
862                 send_monitor_replay(sk);
863
864                 atomic_inc(&monitor_promisc);
865                 break;
866
867         default:
868                 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
869                         err = -EINVAL;
870                         goto done;
871                 }
872
873                 if (haddr.hci_dev != HCI_DEV_NONE) {
874                         err = -EINVAL;
875                         goto done;
876                 }
877
878                 /* Users with CAP_NET_ADMIN capabilities are allowed
879                  * access to all management commands and events. For
880                  * untrusted users the interface is restricted and
881                  * also only untrusted events are sent.
882                  */
883                 if (capable(CAP_NET_ADMIN))
884                         hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
885
886                 /* At the moment the index and unconfigured index events
887                  * are enabled unconditionally. Setting them on each
888                  * socket when binding keeps this functionality. They
889                  * however might be cleared later and then sending of these
890                  * events will be disabled, but that is then intentional.
891                  *
892                  * This also enables generic events that are safe to be
893                  * received by untrusted users. Example for such events
894                  * are changes to settings, class of device, name etc.
895                  */
896                 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
897                         hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
898                         hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
899                         hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
900                 }
901                 break;
902         }
903
904
905         hci_pi(sk)->channel = haddr.hci_channel;
906         sk->sk_state = BT_BOUND;
907
908 done:
909         release_sock(sk);
910         return err;
911 }
912
913 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
914                             int *addr_len, int peer)
915 {
916         struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
917         struct sock *sk = sock->sk;
918         struct hci_dev *hdev;
919         int err = 0;
920
921         BT_DBG("sock %p sk %p", sock, sk);
922
923         if (peer)
924                 return -EOPNOTSUPP;
925
926         lock_sock(sk);
927
928         hdev = hci_pi(sk)->hdev;
929         if (!hdev) {
930                 err = -EBADFD;
931                 goto done;
932         }
933
934         *addr_len = sizeof(*haddr);
935         haddr->hci_family = AF_BLUETOOTH;
936         haddr->hci_dev    = hdev->id;
937         haddr->hci_channel= hci_pi(sk)->channel;
938
939 done:
940         release_sock(sk);
941         return err;
942 }
943
944 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
945                           struct sk_buff *skb)
946 {
947         __u32 mask = hci_pi(sk)->cmsg_mask;
948
949         if (mask & HCI_CMSG_DIR) {
950                 int incoming = bt_cb(skb)->incoming;
951                 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
952                          &incoming);
953         }
954
955         if (mask & HCI_CMSG_TSTAMP) {
956 #ifdef CONFIG_COMPAT
957                 struct compat_timeval ctv;
958 #endif
959                 struct timeval tv;
960                 void *data;
961                 int len;
962
963                 skb_get_timestamp(skb, &tv);
964
965                 data = &tv;
966                 len = sizeof(tv);
967 #ifdef CONFIG_COMPAT
968                 if (!COMPAT_USE_64BIT_TIME &&
969                     (msg->msg_flags & MSG_CMSG_COMPAT)) {
970                         ctv.tv_sec = tv.tv_sec;
971                         ctv.tv_usec = tv.tv_usec;
972                         data = &ctv;
973                         len = sizeof(ctv);
974                 }
975 #endif
976
977                 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
978         }
979 }
980
981 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
982                             int flags)
983 {
984         int noblock = flags & MSG_DONTWAIT;
985         struct sock *sk = sock->sk;
986         struct sk_buff *skb;
987         int copied, err;
988
989         BT_DBG("sock %p, sk %p", sock, sk);
990
991         if (flags & (MSG_OOB))
992                 return -EOPNOTSUPP;
993
994         if (sk->sk_state == BT_CLOSED)
995                 return 0;
996
997         skb = skb_recv_datagram(sk, flags, noblock, &err);
998         if (!skb)
999                 return err;
1000
1001         copied = skb->len;
1002         if (len < copied) {
1003                 msg->msg_flags |= MSG_TRUNC;
1004                 copied = len;
1005         }
1006
1007         skb_reset_transport_header(skb);
1008         err = skb_copy_datagram_msg(skb, 0, msg, copied);
1009
1010         switch (hci_pi(sk)->channel) {
1011         case HCI_CHANNEL_RAW:
1012                 hci_sock_cmsg(sk, msg, skb);
1013                 break;
1014         case HCI_CHANNEL_USER:
1015         case HCI_CHANNEL_MONITOR:
1016                 sock_recv_timestamp(msg, sk, skb);
1017                 break;
1018         default:
1019                 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1020                         sock_recv_timestamp(msg, sk, skb);
1021                 break;
1022         }
1023
1024         skb_free_datagram(sk, skb);
1025
1026         return err ? : copied;
1027 }
1028
1029 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1030                         struct msghdr *msg, size_t msglen)
1031 {
1032         void *buf;
1033         u8 *cp;
1034         struct mgmt_hdr *hdr;
1035         u16 opcode, index, len;
1036         struct hci_dev *hdev = NULL;
1037         const struct hci_mgmt_handler *handler;
1038         bool var_len, no_hdev;
1039         int err;
1040
1041         BT_DBG("got %zu bytes", msglen);
1042
1043         if (msglen < sizeof(*hdr))
1044                 return -EINVAL;
1045
1046         buf = kmalloc(msglen, GFP_KERNEL);
1047         if (!buf)
1048                 return -ENOMEM;
1049
1050         if (memcpy_from_msg(buf, msg, msglen)) {
1051                 err = -EFAULT;
1052                 goto done;
1053         }
1054
1055         hdr = buf;
1056         opcode = __le16_to_cpu(hdr->opcode);
1057         index = __le16_to_cpu(hdr->index);
1058         len = __le16_to_cpu(hdr->len);
1059
1060         if (len != msglen - sizeof(*hdr)) {
1061                 err = -EINVAL;
1062                 goto done;
1063         }
1064
1065         if (opcode >= chan->handler_count ||
1066             chan->handlers[opcode].func == NULL) {
1067                 BT_DBG("Unknown op %u", opcode);
1068                 err = mgmt_cmd_status(sk, index, opcode,
1069                                       MGMT_STATUS_UNKNOWN_COMMAND);
1070                 goto done;
1071         }
1072
1073         handler = &chan->handlers[opcode];
1074
1075         if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1076             !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1077                 err = mgmt_cmd_status(sk, index, opcode,
1078                                       MGMT_STATUS_PERMISSION_DENIED);
1079                 goto done;
1080         }
1081
1082         if (index != MGMT_INDEX_NONE) {
1083                 hdev = hci_dev_get(index);
1084                 if (!hdev) {
1085                         err = mgmt_cmd_status(sk, index, opcode,
1086                                               MGMT_STATUS_INVALID_INDEX);
1087                         goto done;
1088                 }
1089
1090                 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1091                     hci_dev_test_flag(hdev, HCI_CONFIG) ||
1092                     hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1093                         err = mgmt_cmd_status(sk, index, opcode,
1094                                               MGMT_STATUS_INVALID_INDEX);
1095                         goto done;
1096                 }
1097
1098                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1099                     !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1100                         err = mgmt_cmd_status(sk, index, opcode,
1101                                               MGMT_STATUS_INVALID_INDEX);
1102                         goto done;
1103                 }
1104         }
1105
1106         no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1107         if (no_hdev != !hdev) {
1108                 err = mgmt_cmd_status(sk, index, opcode,
1109                                       MGMT_STATUS_INVALID_INDEX);
1110                 goto done;
1111         }
1112
1113         var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1114         if ((var_len && len < handler->data_len) ||
1115             (!var_len && len != handler->data_len)) {
1116                 err = mgmt_cmd_status(sk, index, opcode,
1117                                       MGMT_STATUS_INVALID_PARAMS);
1118                 goto done;
1119         }
1120
1121         if (hdev && chan->hdev_init)
1122                 chan->hdev_init(sk, hdev);
1123
1124         cp = buf + sizeof(*hdr);
1125
1126         err = handler->func(sk, hdev, cp, len);
1127         if (err < 0)
1128                 goto done;
1129
1130         err = msglen;
1131
1132 done:
1133         if (hdev)
1134                 hci_dev_put(hdev);
1135
1136         kfree(buf);
1137         return err;
1138 }
1139
1140 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1141                             size_t len)
1142 {
1143         struct sock *sk = sock->sk;
1144         struct hci_mgmt_chan *chan;
1145         struct hci_dev *hdev;
1146         struct sk_buff *skb;
1147         int err;
1148
1149         BT_DBG("sock %p sk %p", sock, sk);
1150
1151         if (msg->msg_flags & MSG_OOB)
1152                 return -EOPNOTSUPP;
1153
1154         if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
1155                 return -EINVAL;
1156
1157         if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1158                 return -EINVAL;
1159
1160         lock_sock(sk);
1161
1162         switch (hci_pi(sk)->channel) {
1163         case HCI_CHANNEL_RAW:
1164         case HCI_CHANNEL_USER:
1165                 break;
1166         case HCI_CHANNEL_MONITOR:
1167                 err = -EOPNOTSUPP;
1168                 goto done;
1169         default:
1170                 mutex_lock(&mgmt_chan_list_lock);
1171                 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1172                 if (chan)
1173                         err = hci_mgmt_cmd(chan, sk, msg, len);
1174                 else
1175                         err = -EINVAL;
1176
1177                 mutex_unlock(&mgmt_chan_list_lock);
1178                 goto done;
1179         }
1180
1181         hdev = hci_pi(sk)->hdev;
1182         if (!hdev) {
1183                 err = -EBADFD;
1184                 goto done;
1185         }
1186
1187         if (!test_bit(HCI_UP, &hdev->flags)) {
1188                 err = -ENETDOWN;
1189                 goto done;
1190         }
1191
1192         skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1193         if (!skb)
1194                 goto done;
1195
1196         if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1197                 err = -EFAULT;
1198                 goto drop;
1199         }
1200
1201         bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
1202         skb_pull(skb, 1);
1203
1204         if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1205                 /* No permission check is needed for user channel
1206                  * since that gets enforced when binding the socket.
1207                  *
1208                  * However check that the packet type is valid.
1209                  */
1210                 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
1211                     bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1212                     bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1213                         err = -EINVAL;
1214                         goto drop;
1215                 }
1216
1217                 skb_queue_tail(&hdev->raw_q, skb);
1218                 queue_work(hdev->workqueue, &hdev->tx_work);
1219         } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
1220                 u16 opcode = get_unaligned_le16(skb->data);
1221                 u16 ogf = hci_opcode_ogf(opcode);
1222                 u16 ocf = hci_opcode_ocf(opcode);
1223
1224                 if (((ogf > HCI_SFLT_MAX_OGF) ||
1225                      !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1226                                    &hci_sec_filter.ocf_mask[ogf])) &&
1227                     !capable(CAP_NET_RAW)) {
1228                         err = -EPERM;
1229                         goto drop;
1230                 }
1231
1232                 if (ogf == 0x3f) {
1233                         skb_queue_tail(&hdev->raw_q, skb);
1234                         queue_work(hdev->workqueue, &hdev->tx_work);
1235                 } else {
1236                         /* Stand-alone HCI commands must be flagged as
1237                          * single-command requests.
1238                          */
1239                         bt_cb(skb)->req.start = true;
1240
1241                         skb_queue_tail(&hdev->cmd_q, skb);
1242                         queue_work(hdev->workqueue, &hdev->cmd_work);
1243                 }
1244         } else {
1245                 if (!capable(CAP_NET_RAW)) {
1246                         err = -EPERM;
1247                         goto drop;
1248                 }
1249
1250                 skb_queue_tail(&hdev->raw_q, skb);
1251                 queue_work(hdev->workqueue, &hdev->tx_work);
1252         }
1253
1254         err = len;
1255
1256 done:
1257         release_sock(sk);
1258         return err;
1259
1260 drop:
1261         kfree_skb(skb);
1262         goto done;
1263 }
1264
1265 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1266                                char __user *optval, unsigned int len)
1267 {
1268         struct hci_ufilter uf = { .opcode = 0 };
1269         struct sock *sk = sock->sk;
1270         int err = 0, opt = 0;
1271
1272         BT_DBG("sk %p, opt %d", sk, optname);
1273
1274         lock_sock(sk);
1275
1276         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1277                 err = -EBADFD;
1278                 goto done;
1279         }
1280
1281         switch (optname) {
1282         case HCI_DATA_DIR:
1283                 if (get_user(opt, (int __user *)optval)) {
1284                         err = -EFAULT;
1285                         break;
1286                 }
1287
1288                 if (opt)
1289                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1290                 else
1291                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1292                 break;
1293
1294         case HCI_TIME_STAMP:
1295                 if (get_user(opt, (int __user *)optval)) {
1296                         err = -EFAULT;
1297                         break;
1298                 }
1299
1300                 if (opt)
1301                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1302                 else
1303                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1304                 break;
1305
1306         case HCI_FILTER:
1307                 {
1308                         struct hci_filter *f = &hci_pi(sk)->filter;
1309
1310                         uf.type_mask = f->type_mask;
1311                         uf.opcode    = f->opcode;
1312                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1313                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1314                 }
1315
1316                 len = min_t(unsigned int, len, sizeof(uf));
1317                 if (copy_from_user(&uf, optval, len)) {
1318                         err = -EFAULT;
1319                         break;
1320                 }
1321
1322                 if (!capable(CAP_NET_RAW)) {
1323                         uf.type_mask &= hci_sec_filter.type_mask;
1324                         uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1325                         uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1326                 }
1327
1328                 {
1329                         struct hci_filter *f = &hci_pi(sk)->filter;
1330
1331                         f->type_mask = uf.type_mask;
1332                         f->opcode    = uf.opcode;
1333                         *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1334                         *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1335                 }
1336                 break;
1337
1338         default:
1339                 err = -ENOPROTOOPT;
1340                 break;
1341         }
1342
1343 done:
1344         release_sock(sk);
1345         return err;
1346 }
1347
1348 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1349                                char __user *optval, int __user *optlen)
1350 {
1351         struct hci_ufilter uf;
1352         struct sock *sk = sock->sk;
1353         int len, opt, err = 0;
1354
1355         BT_DBG("sk %p, opt %d", sk, optname);
1356
1357         if (get_user(len, optlen))
1358                 return -EFAULT;
1359
1360         lock_sock(sk);
1361
1362         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1363                 err = -EBADFD;
1364                 goto done;
1365         }
1366
1367         switch (optname) {
1368         case HCI_DATA_DIR:
1369                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1370                         opt = 1;
1371                 else
1372                         opt = 0;
1373
1374                 if (put_user(opt, optval))
1375                         err = -EFAULT;
1376                 break;
1377
1378         case HCI_TIME_STAMP:
1379                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1380                         opt = 1;
1381                 else
1382                         opt = 0;
1383
1384                 if (put_user(opt, optval))
1385                         err = -EFAULT;
1386                 break;
1387
1388         case HCI_FILTER:
1389                 {
1390                         struct hci_filter *f = &hci_pi(sk)->filter;
1391
1392                         memset(&uf, 0, sizeof(uf));
1393                         uf.type_mask = f->type_mask;
1394                         uf.opcode    = f->opcode;
1395                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1396                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1397                 }
1398
1399                 len = min_t(unsigned int, len, sizeof(uf));
1400                 if (copy_to_user(optval, &uf, len))
1401                         err = -EFAULT;
1402                 break;
1403
1404         default:
1405                 err = -ENOPROTOOPT;
1406                 break;
1407         }
1408
1409 done:
1410         release_sock(sk);
1411         return err;
1412 }
1413
1414 static const struct proto_ops hci_sock_ops = {
1415         .family         = PF_BLUETOOTH,
1416         .owner          = THIS_MODULE,
1417         .release        = hci_sock_release,
1418         .bind           = hci_sock_bind,
1419         .getname        = hci_sock_getname,
1420         .sendmsg        = hci_sock_sendmsg,
1421         .recvmsg        = hci_sock_recvmsg,
1422         .ioctl          = hci_sock_ioctl,
1423         .poll           = datagram_poll,
1424         .listen         = sock_no_listen,
1425         .shutdown       = sock_no_shutdown,
1426         .setsockopt     = hci_sock_setsockopt,
1427         .getsockopt     = hci_sock_getsockopt,
1428         .connect        = sock_no_connect,
1429         .socketpair     = sock_no_socketpair,
1430         .accept         = sock_no_accept,
1431         .mmap           = sock_no_mmap
1432 };
1433
1434 static struct proto hci_sk_proto = {
1435         .name           = "HCI",
1436         .owner          = THIS_MODULE,
1437         .obj_size       = sizeof(struct hci_pinfo)
1438 };
1439
1440 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1441                            int kern)
1442 {
1443         struct sock *sk;
1444
1445         BT_DBG("sock %p", sock);
1446
1447         if (sock->type != SOCK_RAW)
1448                 return -ESOCKTNOSUPPORT;
1449
1450         sock->ops = &hci_sock_ops;
1451
1452         sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
1453         if (!sk)
1454                 return -ENOMEM;
1455
1456         sock_init_data(sock, sk);
1457
1458         sock_reset_flag(sk, SOCK_ZAPPED);
1459
1460         sk->sk_protocol = protocol;
1461
1462         sock->state = SS_UNCONNECTED;
1463         sk->sk_state = BT_OPEN;
1464
1465         bt_sock_link(&hci_sk_list, sk);
1466         return 0;
1467 }
1468
1469 static const struct net_proto_family hci_sock_family_ops = {
1470         .family = PF_BLUETOOTH,
1471         .owner  = THIS_MODULE,
1472         .create = hci_sock_create,
1473 };
1474
1475 int __init hci_sock_init(void)
1476 {
1477         int err;
1478
1479         BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1480
1481         err = proto_register(&hci_sk_proto, 0);
1482         if (err < 0)
1483                 return err;
1484
1485         err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1486         if (err < 0) {
1487                 BT_ERR("HCI socket registration failed");
1488                 goto error;
1489         }
1490
1491         err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1492         if (err < 0) {
1493                 BT_ERR("Failed to create HCI proc file");
1494                 bt_sock_unregister(BTPROTO_HCI);
1495                 goto error;
1496         }
1497
1498         BT_INFO("HCI socket layer initialized");
1499
1500         return 0;
1501
1502 error:
1503         proto_unregister(&hci_sk_proto);
1504         return err;
1505 }
1506
1507 void hci_sock_cleanup(void)
1508 {
1509         bt_procfs_cleanup(&init_net, "hci");
1510         bt_sock_unregister(BTPROTO_HCI);
1511         proto_unregister(&hci_sk_proto);
1512 }