Merge tag 'drivers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm...
[cascardo/linux.git] / net / bluetooth / hci_sock.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI sockets. */
26
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
33
34 static atomic_t monitor_promisc = ATOMIC_INIT(0);
35
36 /* ----- HCI socket interface ----- */
37
38 /* Socket info */
39 #define hci_pi(sk) ((struct hci_pinfo *) sk)
40
41 struct hci_pinfo {
42         struct bt_sock    bt;
43         struct hci_dev    *hdev;
44         struct hci_filter filter;
45         __u32             cmsg_mask;
46         unsigned short    channel;
47 };
48
49 static inline int hci_test_bit(int nr, void *addr)
50 {
51         return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
52 }
53
54 /* Security filter */
55 #define HCI_SFLT_MAX_OGF  5
56
57 struct hci_sec_filter {
58         __u32 type_mask;
59         __u32 event_mask[2];
60         __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
61 };
62
63 static const struct hci_sec_filter hci_sec_filter = {
64         /* Packet types */
65         0x10,
66         /* Events */
67         { 0x1000d9fe, 0x0000b00c },
68         /* Commands */
69         {
70                 { 0x0 },
71                 /* OGF_LINK_CTL */
72                 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
73                 /* OGF_LINK_POLICY */
74                 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
75                 /* OGF_HOST_CTL */
76                 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
77                 /* OGF_INFO_PARAM */
78                 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
79                 /* OGF_STATUS_PARAM */
80                 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
81         }
82 };
83
84 static struct bt_sock_list hci_sk_list = {
85         .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
86 };
87
88 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
89 {
90         struct hci_filter *flt;
91         int flt_type, flt_event;
92
93         /* Apply filter */
94         flt = &hci_pi(sk)->filter;
95
96         if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
97                 flt_type = 0;
98         else
99                 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
100
101         if (!test_bit(flt_type, &flt->type_mask))
102                 return true;
103
104         /* Extra filter for event packets only */
105         if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
106                 return false;
107
108         flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
109
110         if (!hci_test_bit(flt_event, &flt->event_mask))
111                 return true;
112
113         /* Check filter only when opcode is set */
114         if (!flt->opcode)
115                 return false;
116
117         if (flt_event == HCI_EV_CMD_COMPLETE &&
118             flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
119                 return true;
120
121         if (flt_event == HCI_EV_CMD_STATUS &&
122             flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
123                 return true;
124
125         return false;
126 }
127
128 /* Send frame to RAW socket */
129 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
130 {
131         struct sock *sk;
132         struct sk_buff *skb_copy = NULL;
133
134         BT_DBG("hdev %p len %d", hdev, skb->len);
135
136         read_lock(&hci_sk_list.lock);
137
138         sk_for_each(sk, &hci_sk_list.head) {
139                 struct sk_buff *nskb;
140
141                 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
142                         continue;
143
144                 /* Don't send frame to the socket it came from */
145                 if (skb->sk == sk)
146                         continue;
147
148                 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
149                         if (is_filtered_packet(sk, skb))
150                                 continue;
151                 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
152                         if (!bt_cb(skb)->incoming)
153                                 continue;
154                         if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
155                             bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
156                             bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
157                                 continue;
158                 } else {
159                         /* Don't send frame to other channel types */
160                         continue;
161                 }
162
163                 if (!skb_copy) {
164                         /* Create a private copy with headroom */
165                         skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
166                         if (!skb_copy)
167                                 continue;
168
169                         /* Put type byte before the data */
170                         memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
171                 }
172
173                 nskb = skb_clone(skb_copy, GFP_ATOMIC);
174                 if (!nskb)
175                         continue;
176
177                 if (sock_queue_rcv_skb(sk, nskb))
178                         kfree_skb(nskb);
179         }
180
181         read_unlock(&hci_sk_list.lock);
182
183         kfree_skb(skb_copy);
184 }
185
186 /* Send frame to control socket */
187 void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
188 {
189         struct sock *sk;
190
191         BT_DBG("len %d", skb->len);
192
193         read_lock(&hci_sk_list.lock);
194
195         sk_for_each(sk, &hci_sk_list.head) {
196                 struct sk_buff *nskb;
197
198                 /* Skip the original socket */
199                 if (sk == skip_sk)
200                         continue;
201
202                 if (sk->sk_state != BT_BOUND)
203                         continue;
204
205                 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
206                         continue;
207
208                 nskb = skb_clone(skb, GFP_ATOMIC);
209                 if (!nskb)
210                         continue;
211
212                 if (sock_queue_rcv_skb(sk, nskb))
213                         kfree_skb(nskb);
214         }
215
216         read_unlock(&hci_sk_list.lock);
217 }
218
219 static void queue_monitor_skb(struct sk_buff *skb)
220 {
221         struct sock *sk;
222
223         BT_DBG("len %d", skb->len);
224
225         read_lock(&hci_sk_list.lock);
226
227         sk_for_each(sk, &hci_sk_list.head) {
228                 struct sk_buff *nskb;
229
230                 if (sk->sk_state != BT_BOUND)
231                         continue;
232
233                 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
234                         continue;
235
236                 nskb = skb_clone(skb, GFP_ATOMIC);
237                 if (!nskb)
238                         continue;
239
240                 if (sock_queue_rcv_skb(sk, nskb))
241                         kfree_skb(nskb);
242         }
243
244         read_unlock(&hci_sk_list.lock);
245 }
246
247 /* Send frame to monitor socket */
248 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
249 {
250         struct sk_buff *skb_copy = NULL;
251         struct hci_mon_hdr *hdr;
252         __le16 opcode;
253
254         if (!atomic_read(&monitor_promisc))
255                 return;
256
257         BT_DBG("hdev %p len %d", hdev, skb->len);
258
259         switch (bt_cb(skb)->pkt_type) {
260         case HCI_COMMAND_PKT:
261                 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
262                 break;
263         case HCI_EVENT_PKT:
264                 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
265                 break;
266         case HCI_ACLDATA_PKT:
267                 if (bt_cb(skb)->incoming)
268                         opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
269                 else
270                         opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
271                 break;
272         case HCI_SCODATA_PKT:
273                 if (bt_cb(skb)->incoming)
274                         opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
275                 else
276                         opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
277                 break;
278         default:
279                 return;
280         }
281
282         /* Create a private copy with headroom */
283         skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
284         if (!skb_copy)
285                 return;
286
287         /* Put header before the data */
288         hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
289         hdr->opcode = opcode;
290         hdr->index = cpu_to_le16(hdev->id);
291         hdr->len = cpu_to_le16(skb->len);
292
293         queue_monitor_skb(skb_copy);
294         kfree_skb(skb_copy);
295 }
296
297 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
298 {
299         struct hci_mon_hdr *hdr;
300         struct hci_mon_new_index *ni;
301         struct sk_buff *skb;
302         __le16 opcode;
303
304         switch (event) {
305         case HCI_DEV_REG:
306                 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
307                 if (!skb)
308                         return NULL;
309
310                 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
311                 ni->type = hdev->dev_type;
312                 ni->bus = hdev->bus;
313                 bacpy(&ni->bdaddr, &hdev->bdaddr);
314                 memcpy(ni->name, hdev->name, 8);
315
316                 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
317                 break;
318
319         case HCI_DEV_UNREG:
320                 skb = bt_skb_alloc(0, GFP_ATOMIC);
321                 if (!skb)
322                         return NULL;
323
324                 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
325                 break;
326
327         default:
328                 return NULL;
329         }
330
331         __net_timestamp(skb);
332
333         hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
334         hdr->opcode = opcode;
335         hdr->index = cpu_to_le16(hdev->id);
336         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
337
338         return skb;
339 }
340
341 static void send_monitor_replay(struct sock *sk)
342 {
343         struct hci_dev *hdev;
344
345         read_lock(&hci_dev_list_lock);
346
347         list_for_each_entry(hdev, &hci_dev_list, list) {
348                 struct sk_buff *skb;
349
350                 skb = create_monitor_event(hdev, HCI_DEV_REG);
351                 if (!skb)
352                         continue;
353
354                 if (sock_queue_rcv_skb(sk, skb))
355                         kfree_skb(skb);
356         }
357
358         read_unlock(&hci_dev_list_lock);
359 }
360
361 /* Generate internal stack event */
362 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
363 {
364         struct hci_event_hdr *hdr;
365         struct hci_ev_stack_internal *ev;
366         struct sk_buff *skb;
367
368         skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
369         if (!skb)
370                 return;
371
372         hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
373         hdr->evt  = HCI_EV_STACK_INTERNAL;
374         hdr->plen = sizeof(*ev) + dlen;
375
376         ev  = (void *) skb_put(skb, sizeof(*ev) + dlen);
377         ev->type = type;
378         memcpy(ev->data, data, dlen);
379
380         bt_cb(skb)->incoming = 1;
381         __net_timestamp(skb);
382
383         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
384         hci_send_to_sock(hdev, skb);
385         kfree_skb(skb);
386 }
387
388 void hci_sock_dev_event(struct hci_dev *hdev, int event)
389 {
390         struct hci_ev_si_device ev;
391
392         BT_DBG("hdev %s event %d", hdev->name, event);
393
394         /* Send event to monitor */
395         if (atomic_read(&monitor_promisc)) {
396                 struct sk_buff *skb;
397
398                 skb = create_monitor_event(hdev, event);
399                 if (skb) {
400                         queue_monitor_skb(skb);
401                         kfree_skb(skb);
402                 }
403         }
404
405         /* Send event to sockets */
406         ev.event  = event;
407         ev.dev_id = hdev->id;
408         hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
409
410         if (event == HCI_DEV_UNREG) {
411                 struct sock *sk;
412
413                 /* Detach sockets from device */
414                 read_lock(&hci_sk_list.lock);
415                 sk_for_each(sk, &hci_sk_list.head) {
416                         bh_lock_sock_nested(sk);
417                         if (hci_pi(sk)->hdev == hdev) {
418                                 hci_pi(sk)->hdev = NULL;
419                                 sk->sk_err = EPIPE;
420                                 sk->sk_state = BT_OPEN;
421                                 sk->sk_state_change(sk);
422
423                                 hci_dev_put(hdev);
424                         }
425                         bh_unlock_sock(sk);
426                 }
427                 read_unlock(&hci_sk_list.lock);
428         }
429 }
430
431 static int hci_sock_release(struct socket *sock)
432 {
433         struct sock *sk = sock->sk;
434         struct hci_dev *hdev;
435
436         BT_DBG("sock %p sk %p", sock, sk);
437
438         if (!sk)
439                 return 0;
440
441         hdev = hci_pi(sk)->hdev;
442
443         if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
444                 atomic_dec(&monitor_promisc);
445
446         bt_sock_unlink(&hci_sk_list, sk);
447
448         if (hdev) {
449                 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
450                         mgmt_index_added(hdev);
451                         clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
452                         hci_dev_close(hdev->id);
453                 }
454
455                 atomic_dec(&hdev->promisc);
456                 hci_dev_put(hdev);
457         }
458
459         sock_orphan(sk);
460
461         skb_queue_purge(&sk->sk_receive_queue);
462         skb_queue_purge(&sk->sk_write_queue);
463
464         sock_put(sk);
465         return 0;
466 }
467
468 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
469 {
470         bdaddr_t bdaddr;
471         int err;
472
473         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
474                 return -EFAULT;
475
476         hci_dev_lock(hdev);
477
478         err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
479
480         hci_dev_unlock(hdev);
481
482         return err;
483 }
484
485 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
486 {
487         bdaddr_t bdaddr;
488         int err;
489
490         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
491                 return -EFAULT;
492
493         hci_dev_lock(hdev);
494
495         err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
496
497         hci_dev_unlock(hdev);
498
499         return err;
500 }
501
502 /* Ioctls that require bound socket */
503 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
504                                 unsigned long arg)
505 {
506         struct hci_dev *hdev = hci_pi(sk)->hdev;
507
508         if (!hdev)
509                 return -EBADFD;
510
511         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
512                 return -EBUSY;
513
514         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
515                 return -EOPNOTSUPP;
516
517         if (hdev->dev_type != HCI_BREDR)
518                 return -EOPNOTSUPP;
519
520         switch (cmd) {
521         case HCISETRAW:
522                 if (!capable(CAP_NET_ADMIN))
523                         return -EPERM;
524                 return -EOPNOTSUPP;
525
526         case HCIGETCONNINFO:
527                 return hci_get_conn_info(hdev, (void __user *) arg);
528
529         case HCIGETAUTHINFO:
530                 return hci_get_auth_info(hdev, (void __user *) arg);
531
532         case HCIBLOCKADDR:
533                 if (!capable(CAP_NET_ADMIN))
534                         return -EPERM;
535                 return hci_sock_blacklist_add(hdev, (void __user *) arg);
536
537         case HCIUNBLOCKADDR:
538                 if (!capable(CAP_NET_ADMIN))
539                         return -EPERM;
540                 return hci_sock_blacklist_del(hdev, (void __user *) arg);
541         }
542
543         return -ENOIOCTLCMD;
544 }
545
546 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
547                           unsigned long arg)
548 {
549         void __user *argp = (void __user *) arg;
550         struct sock *sk = sock->sk;
551         int err;
552
553         BT_DBG("cmd %x arg %lx", cmd, arg);
554
555         lock_sock(sk);
556
557         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
558                 err = -EBADFD;
559                 goto done;
560         }
561
562         release_sock(sk);
563
564         switch (cmd) {
565         case HCIGETDEVLIST:
566                 return hci_get_dev_list(argp);
567
568         case HCIGETDEVINFO:
569                 return hci_get_dev_info(argp);
570
571         case HCIGETCONNLIST:
572                 return hci_get_conn_list(argp);
573
574         case HCIDEVUP:
575                 if (!capable(CAP_NET_ADMIN))
576                         return -EPERM;
577                 return hci_dev_open(arg);
578
579         case HCIDEVDOWN:
580                 if (!capable(CAP_NET_ADMIN))
581                         return -EPERM;
582                 return hci_dev_close(arg);
583
584         case HCIDEVRESET:
585                 if (!capable(CAP_NET_ADMIN))
586                         return -EPERM;
587                 return hci_dev_reset(arg);
588
589         case HCIDEVRESTAT:
590                 if (!capable(CAP_NET_ADMIN))
591                         return -EPERM;
592                 return hci_dev_reset_stat(arg);
593
594         case HCISETSCAN:
595         case HCISETAUTH:
596         case HCISETENCRYPT:
597         case HCISETPTYPE:
598         case HCISETLINKPOL:
599         case HCISETLINKMODE:
600         case HCISETACLMTU:
601         case HCISETSCOMTU:
602                 if (!capable(CAP_NET_ADMIN))
603                         return -EPERM;
604                 return hci_dev_cmd(cmd, argp);
605
606         case HCIINQUIRY:
607                 return hci_inquiry(argp);
608         }
609
610         lock_sock(sk);
611
612         err = hci_sock_bound_ioctl(sk, cmd, arg);
613
614 done:
615         release_sock(sk);
616         return err;
617 }
618
619 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
620                          int addr_len)
621 {
622         struct sockaddr_hci haddr;
623         struct sock *sk = sock->sk;
624         struct hci_dev *hdev = NULL;
625         int len, err = 0;
626
627         BT_DBG("sock %p sk %p", sock, sk);
628
629         if (!addr)
630                 return -EINVAL;
631
632         memset(&haddr, 0, sizeof(haddr));
633         len = min_t(unsigned int, sizeof(haddr), addr_len);
634         memcpy(&haddr, addr, len);
635
636         if (haddr.hci_family != AF_BLUETOOTH)
637                 return -EINVAL;
638
639         lock_sock(sk);
640
641         if (sk->sk_state == BT_BOUND) {
642                 err = -EALREADY;
643                 goto done;
644         }
645
646         switch (haddr.hci_channel) {
647         case HCI_CHANNEL_RAW:
648                 if (hci_pi(sk)->hdev) {
649                         err = -EALREADY;
650                         goto done;
651                 }
652
653                 if (haddr.hci_dev != HCI_DEV_NONE) {
654                         hdev = hci_dev_get(haddr.hci_dev);
655                         if (!hdev) {
656                                 err = -ENODEV;
657                                 goto done;
658                         }
659
660                         atomic_inc(&hdev->promisc);
661                 }
662
663                 hci_pi(sk)->hdev = hdev;
664                 break;
665
666         case HCI_CHANNEL_USER:
667                 if (hci_pi(sk)->hdev) {
668                         err = -EALREADY;
669                         goto done;
670                 }
671
672                 if (haddr.hci_dev == HCI_DEV_NONE) {
673                         err = -EINVAL;
674                         goto done;
675                 }
676
677                 if (!capable(CAP_NET_ADMIN)) {
678                         err = -EPERM;
679                         goto done;
680                 }
681
682                 hdev = hci_dev_get(haddr.hci_dev);
683                 if (!hdev) {
684                         err = -ENODEV;
685                         goto done;
686                 }
687
688                 if (test_bit(HCI_UP, &hdev->flags) ||
689                     test_bit(HCI_INIT, &hdev->flags) ||
690                     test_bit(HCI_SETUP, &hdev->dev_flags) ||
691                     test_bit(HCI_CONFIG, &hdev->dev_flags)) {
692                         err = -EBUSY;
693                         hci_dev_put(hdev);
694                         goto done;
695                 }
696
697                 if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
698                         err = -EUSERS;
699                         hci_dev_put(hdev);
700                         goto done;
701                 }
702
703                 mgmt_index_removed(hdev);
704
705                 err = hci_dev_open(hdev->id);
706                 if (err) {
707                         clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
708                         mgmt_index_added(hdev);
709                         hci_dev_put(hdev);
710                         goto done;
711                 }
712
713                 atomic_inc(&hdev->promisc);
714
715                 hci_pi(sk)->hdev = hdev;
716                 break;
717
718         case HCI_CHANNEL_CONTROL:
719                 if (haddr.hci_dev != HCI_DEV_NONE) {
720                         err = -EINVAL;
721                         goto done;
722                 }
723
724                 if (!capable(CAP_NET_ADMIN)) {
725                         err = -EPERM;
726                         goto done;
727                 }
728
729                 break;
730
731         case HCI_CHANNEL_MONITOR:
732                 if (haddr.hci_dev != HCI_DEV_NONE) {
733                         err = -EINVAL;
734                         goto done;
735                 }
736
737                 if (!capable(CAP_NET_RAW)) {
738                         err = -EPERM;
739                         goto done;
740                 }
741
742                 send_monitor_replay(sk);
743
744                 atomic_inc(&monitor_promisc);
745                 break;
746
747         default:
748                 err = -EINVAL;
749                 goto done;
750         }
751
752
753         hci_pi(sk)->channel = haddr.hci_channel;
754         sk->sk_state = BT_BOUND;
755
756 done:
757         release_sock(sk);
758         return err;
759 }
760
761 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
762                             int *addr_len, int peer)
763 {
764         struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
765         struct sock *sk = sock->sk;
766         struct hci_dev *hdev;
767         int err = 0;
768
769         BT_DBG("sock %p sk %p", sock, sk);
770
771         if (peer)
772                 return -EOPNOTSUPP;
773
774         lock_sock(sk);
775
776         hdev = hci_pi(sk)->hdev;
777         if (!hdev) {
778                 err = -EBADFD;
779                 goto done;
780         }
781
782         *addr_len = sizeof(*haddr);
783         haddr->hci_family = AF_BLUETOOTH;
784         haddr->hci_dev    = hdev->id;
785         haddr->hci_channel= hci_pi(sk)->channel;
786
787 done:
788         release_sock(sk);
789         return err;
790 }
791
792 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
793                           struct sk_buff *skb)
794 {
795         __u32 mask = hci_pi(sk)->cmsg_mask;
796
797         if (mask & HCI_CMSG_DIR) {
798                 int incoming = bt_cb(skb)->incoming;
799                 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
800                          &incoming);
801         }
802
803         if (mask & HCI_CMSG_TSTAMP) {
804 #ifdef CONFIG_COMPAT
805                 struct compat_timeval ctv;
806 #endif
807                 struct timeval tv;
808                 void *data;
809                 int len;
810
811                 skb_get_timestamp(skb, &tv);
812
813                 data = &tv;
814                 len = sizeof(tv);
815 #ifdef CONFIG_COMPAT
816                 if (!COMPAT_USE_64BIT_TIME &&
817                     (msg->msg_flags & MSG_CMSG_COMPAT)) {
818                         ctv.tv_sec = tv.tv_sec;
819                         ctv.tv_usec = tv.tv_usec;
820                         data = &ctv;
821                         len = sizeof(ctv);
822                 }
823 #endif
824
825                 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
826         }
827 }
828
829 static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
830                             struct msghdr *msg, size_t len, int flags)
831 {
832         int noblock = flags & MSG_DONTWAIT;
833         struct sock *sk = sock->sk;
834         struct sk_buff *skb;
835         int copied, err;
836
837         BT_DBG("sock %p, sk %p", sock, sk);
838
839         if (flags & (MSG_OOB))
840                 return -EOPNOTSUPP;
841
842         if (sk->sk_state == BT_CLOSED)
843                 return 0;
844
845         skb = skb_recv_datagram(sk, flags, noblock, &err);
846         if (!skb)
847                 return err;
848
849         copied = skb->len;
850         if (len < copied) {
851                 msg->msg_flags |= MSG_TRUNC;
852                 copied = len;
853         }
854
855         skb_reset_transport_header(skb);
856         err = skb_copy_datagram_msg(skb, 0, msg, copied);
857
858         switch (hci_pi(sk)->channel) {
859         case HCI_CHANNEL_RAW:
860                 hci_sock_cmsg(sk, msg, skb);
861                 break;
862         case HCI_CHANNEL_USER:
863         case HCI_CHANNEL_CONTROL:
864         case HCI_CHANNEL_MONITOR:
865                 sock_recv_timestamp(msg, sk, skb);
866                 break;
867         }
868
869         skb_free_datagram(sk, skb);
870
871         return err ? : copied;
872 }
873
874 static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
875                             struct msghdr *msg, size_t len)
876 {
877         struct sock *sk = sock->sk;
878         struct hci_dev *hdev;
879         struct sk_buff *skb;
880         int err;
881
882         BT_DBG("sock %p sk %p", sock, sk);
883
884         if (msg->msg_flags & MSG_OOB)
885                 return -EOPNOTSUPP;
886
887         if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
888                 return -EINVAL;
889
890         if (len < 4 || len > HCI_MAX_FRAME_SIZE)
891                 return -EINVAL;
892
893         lock_sock(sk);
894
895         switch (hci_pi(sk)->channel) {
896         case HCI_CHANNEL_RAW:
897         case HCI_CHANNEL_USER:
898                 break;
899         case HCI_CHANNEL_CONTROL:
900                 err = mgmt_control(sk, msg, len);
901                 goto done;
902         case HCI_CHANNEL_MONITOR:
903                 err = -EOPNOTSUPP;
904                 goto done;
905         default:
906                 err = -EINVAL;
907                 goto done;
908         }
909
910         hdev = hci_pi(sk)->hdev;
911         if (!hdev) {
912                 err = -EBADFD;
913                 goto done;
914         }
915
916         if (!test_bit(HCI_UP, &hdev->flags)) {
917                 err = -ENETDOWN;
918                 goto done;
919         }
920
921         skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
922         if (!skb)
923                 goto done;
924
925         if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
926                 err = -EFAULT;
927                 goto drop;
928         }
929
930         bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
931         skb_pull(skb, 1);
932
933         if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
934                 /* No permission check is needed for user channel
935                  * since that gets enforced when binding the socket.
936                  *
937                  * However check that the packet type is valid.
938                  */
939                 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
940                     bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
941                     bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
942                         err = -EINVAL;
943                         goto drop;
944                 }
945
946                 skb_queue_tail(&hdev->raw_q, skb);
947                 queue_work(hdev->workqueue, &hdev->tx_work);
948         } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
949                 u16 opcode = get_unaligned_le16(skb->data);
950                 u16 ogf = hci_opcode_ogf(opcode);
951                 u16 ocf = hci_opcode_ocf(opcode);
952
953                 if (((ogf > HCI_SFLT_MAX_OGF) ||
954                      !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
955                                    &hci_sec_filter.ocf_mask[ogf])) &&
956                     !capable(CAP_NET_RAW)) {
957                         err = -EPERM;
958                         goto drop;
959                 }
960
961                 if (ogf == 0x3f) {
962                         skb_queue_tail(&hdev->raw_q, skb);
963                         queue_work(hdev->workqueue, &hdev->tx_work);
964                 } else {
965                         /* Stand-alone HCI commands must be flagged as
966                          * single-command requests.
967                          */
968                         bt_cb(skb)->req.start = true;
969
970                         skb_queue_tail(&hdev->cmd_q, skb);
971                         queue_work(hdev->workqueue, &hdev->cmd_work);
972                 }
973         } else {
974                 if (!capable(CAP_NET_RAW)) {
975                         err = -EPERM;
976                         goto drop;
977                 }
978
979                 skb_queue_tail(&hdev->raw_q, skb);
980                 queue_work(hdev->workqueue, &hdev->tx_work);
981         }
982
983         err = len;
984
985 done:
986         release_sock(sk);
987         return err;
988
989 drop:
990         kfree_skb(skb);
991         goto done;
992 }
993
994 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
995                                char __user *optval, unsigned int len)
996 {
997         struct hci_ufilter uf = { .opcode = 0 };
998         struct sock *sk = sock->sk;
999         int err = 0, opt = 0;
1000
1001         BT_DBG("sk %p, opt %d", sk, optname);
1002
1003         lock_sock(sk);
1004
1005         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1006                 err = -EBADFD;
1007                 goto done;
1008         }
1009
1010         switch (optname) {
1011         case HCI_DATA_DIR:
1012                 if (get_user(opt, (int __user *)optval)) {
1013                         err = -EFAULT;
1014                         break;
1015                 }
1016
1017                 if (opt)
1018                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1019                 else
1020                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1021                 break;
1022
1023         case HCI_TIME_STAMP:
1024                 if (get_user(opt, (int __user *)optval)) {
1025                         err = -EFAULT;
1026                         break;
1027                 }
1028
1029                 if (opt)
1030                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1031                 else
1032                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1033                 break;
1034
1035         case HCI_FILTER:
1036                 {
1037                         struct hci_filter *f = &hci_pi(sk)->filter;
1038
1039                         uf.type_mask = f->type_mask;
1040                         uf.opcode    = f->opcode;
1041                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1042                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1043                 }
1044
1045                 len = min_t(unsigned int, len, sizeof(uf));
1046                 if (copy_from_user(&uf, optval, len)) {
1047                         err = -EFAULT;
1048                         break;
1049                 }
1050
1051                 if (!capable(CAP_NET_RAW)) {
1052                         uf.type_mask &= hci_sec_filter.type_mask;
1053                         uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1054                         uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1055                 }
1056
1057                 {
1058                         struct hci_filter *f = &hci_pi(sk)->filter;
1059
1060                         f->type_mask = uf.type_mask;
1061                         f->opcode    = uf.opcode;
1062                         *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1063                         *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1064                 }
1065                 break;
1066
1067         default:
1068                 err = -ENOPROTOOPT;
1069                 break;
1070         }
1071
1072 done:
1073         release_sock(sk);
1074         return err;
1075 }
1076
1077 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1078                                char __user *optval, int __user *optlen)
1079 {
1080         struct hci_ufilter uf;
1081         struct sock *sk = sock->sk;
1082         int len, opt, err = 0;
1083
1084         BT_DBG("sk %p, opt %d", sk, optname);
1085
1086         if (get_user(len, optlen))
1087                 return -EFAULT;
1088
1089         lock_sock(sk);
1090
1091         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1092                 err = -EBADFD;
1093                 goto done;
1094         }
1095
1096         switch (optname) {
1097         case HCI_DATA_DIR:
1098                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1099                         opt = 1;
1100                 else
1101                         opt = 0;
1102
1103                 if (put_user(opt, optval))
1104                         err = -EFAULT;
1105                 break;
1106
1107         case HCI_TIME_STAMP:
1108                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1109                         opt = 1;
1110                 else
1111                         opt = 0;
1112
1113                 if (put_user(opt, optval))
1114                         err = -EFAULT;
1115                 break;
1116
1117         case HCI_FILTER:
1118                 {
1119                         struct hci_filter *f = &hci_pi(sk)->filter;
1120
1121                         memset(&uf, 0, sizeof(uf));
1122                         uf.type_mask = f->type_mask;
1123                         uf.opcode    = f->opcode;
1124                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1125                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1126                 }
1127
1128                 len = min_t(unsigned int, len, sizeof(uf));
1129                 if (copy_to_user(optval, &uf, len))
1130                         err = -EFAULT;
1131                 break;
1132
1133         default:
1134                 err = -ENOPROTOOPT;
1135                 break;
1136         }
1137
1138 done:
1139         release_sock(sk);
1140         return err;
1141 }
1142
1143 static const struct proto_ops hci_sock_ops = {
1144         .family         = PF_BLUETOOTH,
1145         .owner          = THIS_MODULE,
1146         .release        = hci_sock_release,
1147         .bind           = hci_sock_bind,
1148         .getname        = hci_sock_getname,
1149         .sendmsg        = hci_sock_sendmsg,
1150         .recvmsg        = hci_sock_recvmsg,
1151         .ioctl          = hci_sock_ioctl,
1152         .poll           = datagram_poll,
1153         .listen         = sock_no_listen,
1154         .shutdown       = sock_no_shutdown,
1155         .setsockopt     = hci_sock_setsockopt,
1156         .getsockopt     = hci_sock_getsockopt,
1157         .connect        = sock_no_connect,
1158         .socketpair     = sock_no_socketpair,
1159         .accept         = sock_no_accept,
1160         .mmap           = sock_no_mmap
1161 };
1162
1163 static struct proto hci_sk_proto = {
1164         .name           = "HCI",
1165         .owner          = THIS_MODULE,
1166         .obj_size       = sizeof(struct hci_pinfo)
1167 };
1168
1169 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1170                            int kern)
1171 {
1172         struct sock *sk;
1173
1174         BT_DBG("sock %p", sock);
1175
1176         if (sock->type != SOCK_RAW)
1177                 return -ESOCKTNOSUPPORT;
1178
1179         sock->ops = &hci_sock_ops;
1180
1181         sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1182         if (!sk)
1183                 return -ENOMEM;
1184
1185         sock_init_data(sock, sk);
1186
1187         sock_reset_flag(sk, SOCK_ZAPPED);
1188
1189         sk->sk_protocol = protocol;
1190
1191         sock->state = SS_UNCONNECTED;
1192         sk->sk_state = BT_OPEN;
1193
1194         bt_sock_link(&hci_sk_list, sk);
1195         return 0;
1196 }
1197
1198 static const struct net_proto_family hci_sock_family_ops = {
1199         .family = PF_BLUETOOTH,
1200         .owner  = THIS_MODULE,
1201         .create = hci_sock_create,
1202 };
1203
1204 int __init hci_sock_init(void)
1205 {
1206         int err;
1207
1208         BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1209
1210         err = proto_register(&hci_sk_proto, 0);
1211         if (err < 0)
1212                 return err;
1213
1214         err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1215         if (err < 0) {
1216                 BT_ERR("HCI socket registration failed");
1217                 goto error;
1218         }
1219
1220         err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1221         if (err < 0) {
1222                 BT_ERR("Failed to create HCI proc file");
1223                 bt_sock_unregister(BTPROTO_HCI);
1224                 goto error;
1225         }
1226
1227         BT_INFO("HCI socket layer initialized");
1228
1229         return 0;
1230
1231 error:
1232         proto_unregister(&hci_sk_proto);
1233         return err;
1234 }
1235
1236 void hci_sock_cleanup(void)
1237 {
1238         bt_procfs_cleanup(&init_net, "hci");
1239         bt_sock_unregister(BTPROTO_HCI);
1240         proto_unregister(&hci_sk_proto);
1241 }