2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
78 /* ---- L2CAP channels ---- */
80 static inline void chan_hold(struct l2cap_chan *c)
82 atomic_inc(&c->refcnt);
85 static inline void chan_put(struct l2cap_chan *c)
87 if (atomic_dec_and_test(&c->refcnt))
91 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
95 list_for_each_entry(c, &conn->chan_l, list) {
103 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
105 struct l2cap_chan *c;
107 list_for_each_entry(c, &conn->chan_l, list) {
114 /* Find channel with given SCID.
115 * Returns locked socket */
116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
118 struct l2cap_chan *c;
120 read_lock(&conn->chan_lock);
121 c = __l2cap_get_chan_by_scid(conn, cid);
124 read_unlock(&conn->chan_lock);
128 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
130 struct l2cap_chan *c;
132 list_for_each_entry(c, &conn->chan_l, list) {
133 if (c->ident == ident)
139 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
141 struct l2cap_chan *c;
143 read_lock(&conn->chan_lock);
144 c = __l2cap_get_chan_by_ident(conn, ident);
147 read_unlock(&conn->chan_lock);
151 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
153 struct l2cap_chan *c;
155 list_for_each_entry(c, &chan_list, global_l) {
156 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
165 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
169 write_lock_bh(&chan_list_lock);
171 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
184 for (p = 0x1001; p < 0x1100; p += 2)
185 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
186 chan->psm = cpu_to_le16(p);
187 chan->sport = cpu_to_le16(p);
194 write_unlock_bh(&chan_list_lock);
198 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
200 write_lock_bh(&chan_list_lock);
204 write_unlock_bh(&chan_list_lock);
209 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
211 u16 cid = L2CAP_CID_DYN_START;
213 for (; cid < L2CAP_CID_DYN_END; cid++) {
214 if (!__l2cap_get_chan_by_scid(conn, cid))
221 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
223 BT_DBG("chan %p state %d timeout %ld", chan, chan->state, timeout);
225 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
229 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
231 BT_DBG("chan %p state %d", chan, chan->state);
233 if (timer_pending(timer) && del_timer(timer))
237 static void l2cap_state_change(struct l2cap_chan *chan, int state)
240 chan->ops->state_change(chan->data, state);
243 static void l2cap_chan_timeout(unsigned long arg)
245 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
246 struct sock *sk = chan->sk;
249 BT_DBG("chan %p state %d", chan, chan->state);
253 if (sock_owned_by_user(sk)) {
254 /* sk is owned by user. Try again later */
255 __set_chan_timer(chan, HZ / 5);
261 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
262 reason = ECONNREFUSED;
263 else if (chan->state == BT_CONNECT &&
264 chan->sec_level != BT_SECURITY_SDP)
265 reason = ECONNREFUSED;
269 l2cap_chan_close(chan, reason);
273 chan->ops->close(chan->data);
277 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
279 struct l2cap_chan *chan;
281 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
287 write_lock_bh(&chan_list_lock);
288 list_add(&chan->global_l, &chan_list);
289 write_unlock_bh(&chan_list_lock);
291 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
293 chan->state = BT_OPEN;
295 atomic_set(&chan->refcnt, 1);
300 void l2cap_chan_destroy(struct l2cap_chan *chan)
302 write_lock_bh(&chan_list_lock);
303 list_del(&chan->global_l);
304 write_unlock_bh(&chan_list_lock);
309 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
311 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
312 chan->psm, chan->dcid);
314 conn->disc_reason = 0x13;
318 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
319 if (conn->hcon->type == LE_LINK) {
321 chan->omtu = L2CAP_LE_DEFAULT_MTU;
322 chan->scid = L2CAP_CID_LE_DATA;
323 chan->dcid = L2CAP_CID_LE_DATA;
325 /* Alloc CID for connection-oriented socket */
326 chan->scid = l2cap_alloc_cid(conn);
327 chan->omtu = L2CAP_DEFAULT_MTU;
329 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
330 /* Connectionless socket */
331 chan->scid = L2CAP_CID_CONN_LESS;
332 chan->dcid = L2CAP_CID_CONN_LESS;
333 chan->omtu = L2CAP_DEFAULT_MTU;
335 /* Raw socket can send/recv signalling messages only */
336 chan->scid = L2CAP_CID_SIGNALING;
337 chan->dcid = L2CAP_CID_SIGNALING;
338 chan->omtu = L2CAP_DEFAULT_MTU;
341 chan->local_id = L2CAP_BESTEFFORT_ID;
342 chan->local_stype = L2CAP_SERV_BESTEFFORT;
343 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
344 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
345 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
346 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
350 list_add(&chan->list, &conn->chan_l);
354 * Must be called on the locked socket. */
355 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
357 struct sock *sk = chan->sk;
358 struct l2cap_conn *conn = chan->conn;
359 struct sock *parent = bt_sk(sk)->parent;
361 __clear_chan_timer(chan);
363 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
366 /* Delete from channel list */
367 write_lock_bh(&conn->chan_lock);
368 list_del(&chan->list);
369 write_unlock_bh(&conn->chan_lock);
373 hci_conn_put(conn->hcon);
376 l2cap_state_change(chan, BT_CLOSED);
377 sock_set_flag(sk, SOCK_ZAPPED);
383 bt_accept_unlink(sk);
384 parent->sk_data_ready(parent, 0);
386 sk->sk_state_change(sk);
388 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
389 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
392 skb_queue_purge(&chan->tx_q);
394 if (chan->mode == L2CAP_MODE_ERTM) {
395 struct srej_list *l, *tmp;
397 __clear_retrans_timer(chan);
398 __clear_monitor_timer(chan);
399 __clear_ack_timer(chan);
401 skb_queue_purge(&chan->srej_q);
403 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
410 static void l2cap_chan_cleanup_listen(struct sock *parent)
414 BT_DBG("parent %p", parent);
416 /* Close not yet accepted channels */
417 while ((sk = bt_accept_dequeue(parent, NULL))) {
418 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
419 __clear_chan_timer(chan);
421 l2cap_chan_close(chan, ECONNRESET);
423 chan->ops->close(chan->data);
427 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
429 struct l2cap_conn *conn = chan->conn;
430 struct sock *sk = chan->sk;
432 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
434 switch (chan->state) {
436 l2cap_chan_cleanup_listen(sk);
438 l2cap_state_change(chan, BT_CLOSED);
439 sock_set_flag(sk, SOCK_ZAPPED);
444 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
445 conn->hcon->type == ACL_LINK) {
446 __clear_chan_timer(chan);
447 __set_chan_timer(chan, sk->sk_sndtimeo);
448 l2cap_send_disconn_req(conn, chan, reason);
450 l2cap_chan_del(chan, reason);
454 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
455 conn->hcon->type == ACL_LINK) {
456 struct l2cap_conn_rsp rsp;
459 if (bt_sk(sk)->defer_setup)
460 result = L2CAP_CR_SEC_BLOCK;
462 result = L2CAP_CR_BAD_PSM;
463 l2cap_state_change(chan, BT_DISCONN);
465 rsp.scid = cpu_to_le16(chan->dcid);
466 rsp.dcid = cpu_to_le16(chan->scid);
467 rsp.result = cpu_to_le16(result);
468 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
469 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
473 l2cap_chan_del(chan, reason);
478 l2cap_chan_del(chan, reason);
482 sock_set_flag(sk, SOCK_ZAPPED);
487 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
489 if (chan->chan_type == L2CAP_CHAN_RAW) {
490 switch (chan->sec_level) {
491 case BT_SECURITY_HIGH:
492 return HCI_AT_DEDICATED_BONDING_MITM;
493 case BT_SECURITY_MEDIUM:
494 return HCI_AT_DEDICATED_BONDING;
496 return HCI_AT_NO_BONDING;
498 } else if (chan->psm == cpu_to_le16(0x0001)) {
499 if (chan->sec_level == BT_SECURITY_LOW)
500 chan->sec_level = BT_SECURITY_SDP;
502 if (chan->sec_level == BT_SECURITY_HIGH)
503 return HCI_AT_NO_BONDING_MITM;
505 return HCI_AT_NO_BONDING;
507 switch (chan->sec_level) {
508 case BT_SECURITY_HIGH:
509 return HCI_AT_GENERAL_BONDING_MITM;
510 case BT_SECURITY_MEDIUM:
511 return HCI_AT_GENERAL_BONDING;
513 return HCI_AT_NO_BONDING;
518 /* Service level security */
519 static inline int l2cap_check_security(struct l2cap_chan *chan)
521 struct l2cap_conn *conn = chan->conn;
524 auth_type = l2cap_get_auth_type(chan);
526 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
529 static u8 l2cap_get_ident(struct l2cap_conn *conn)
533 /* Get next available identificator.
534 * 1 - 128 are used by kernel.
535 * 129 - 199 are reserved.
536 * 200 - 254 are used by utilities like l2ping, etc.
539 spin_lock_bh(&conn->lock);
541 if (++conn->tx_ident > 128)
546 spin_unlock_bh(&conn->lock);
551 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
553 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
556 BT_DBG("code 0x%2.2x", code);
561 if (lmp_no_flush_capable(conn->hcon->hdev))
562 flags = ACL_START_NO_FLUSH;
566 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
567 skb->priority = HCI_PRIO_MAX;
569 hci_send_acl(conn->hcon, skb, flags);
572 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
575 struct l2cap_hdr *lh;
576 struct l2cap_conn *conn = chan->conn;
580 if (chan->state != BT_CONNECTED)
583 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
584 hlen = L2CAP_EXT_HDR_SIZE;
586 hlen = L2CAP_ENH_HDR_SIZE;
588 if (chan->fcs == L2CAP_FCS_CRC16)
589 hlen += L2CAP_FCS_SIZE;
591 BT_DBG("chan %p, control 0x%8.8x", chan, control);
593 count = min_t(unsigned int, conn->mtu, hlen);
595 control |= __set_sframe(chan);
597 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
598 control |= __set_ctrl_final(chan);
600 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
601 control |= __set_ctrl_poll(chan);
603 skb = bt_skb_alloc(count, GFP_ATOMIC);
607 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
608 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
609 lh->cid = cpu_to_le16(chan->dcid);
611 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
613 if (chan->fcs == L2CAP_FCS_CRC16) {
614 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
615 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
618 if (lmp_no_flush_capable(conn->hcon->hdev))
619 flags = ACL_START_NO_FLUSH;
623 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
625 hci_send_acl(chan->conn->hcon, skb, flags);
628 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
630 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
631 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
632 set_bit(CONN_RNR_SENT, &chan->conn_state);
634 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
636 control |= __set_reqseq(chan, chan->buffer_seq);
638 l2cap_send_sframe(chan, control);
641 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
643 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
646 static void l2cap_do_start(struct l2cap_chan *chan)
648 struct l2cap_conn *conn = chan->conn;
650 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
651 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
654 if (l2cap_check_security(chan) &&
655 __l2cap_no_conn_pending(chan)) {
656 struct l2cap_conn_req req;
657 req.scid = cpu_to_le16(chan->scid);
660 chan->ident = l2cap_get_ident(conn);
661 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
663 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
667 struct l2cap_info_req req;
668 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
670 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
671 conn->info_ident = l2cap_get_ident(conn);
673 mod_timer(&conn->info_timer, jiffies +
674 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
676 l2cap_send_cmd(conn, conn->info_ident,
677 L2CAP_INFO_REQ, sizeof(req), &req);
681 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
683 u32 local_feat_mask = l2cap_feat_mask;
685 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
688 case L2CAP_MODE_ERTM:
689 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
690 case L2CAP_MODE_STREAMING:
691 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
697 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
700 struct l2cap_disconn_req req;
707 if (chan->mode == L2CAP_MODE_ERTM) {
708 __clear_retrans_timer(chan);
709 __clear_monitor_timer(chan);
710 __clear_ack_timer(chan);
713 req.dcid = cpu_to_le16(chan->dcid);
714 req.scid = cpu_to_le16(chan->scid);
715 l2cap_send_cmd(conn, l2cap_get_ident(conn),
716 L2CAP_DISCONN_REQ, sizeof(req), &req);
718 l2cap_state_change(chan, BT_DISCONN);
722 /* ---- L2CAP connections ---- */
723 static void l2cap_conn_start(struct l2cap_conn *conn)
725 struct l2cap_chan *chan, *tmp;
727 BT_DBG("conn %p", conn);
729 read_lock(&conn->chan_lock);
731 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
732 struct sock *sk = chan->sk;
736 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
741 if (chan->state == BT_CONNECT) {
742 struct l2cap_conn_req req;
744 if (!l2cap_check_security(chan) ||
745 !__l2cap_no_conn_pending(chan)) {
750 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
751 && test_bit(CONF_STATE2_DEVICE,
752 &chan->conf_state)) {
753 /* l2cap_chan_close() calls list_del(chan)
754 * so release the lock */
755 read_unlock(&conn->chan_lock);
756 l2cap_chan_close(chan, ECONNRESET);
757 read_lock(&conn->chan_lock);
762 req.scid = cpu_to_le16(chan->scid);
765 chan->ident = l2cap_get_ident(conn);
766 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
768 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
771 } else if (chan->state == BT_CONNECT2) {
772 struct l2cap_conn_rsp rsp;
774 rsp.scid = cpu_to_le16(chan->dcid);
775 rsp.dcid = cpu_to_le16(chan->scid);
777 if (l2cap_check_security(chan)) {
778 if (bt_sk(sk)->defer_setup) {
779 struct sock *parent = bt_sk(sk)->parent;
780 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
781 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
783 parent->sk_data_ready(parent, 0);
786 l2cap_state_change(chan, BT_CONFIG);
787 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
788 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
791 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
792 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
795 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
798 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
799 rsp.result != L2CAP_CR_SUCCESS) {
804 set_bit(CONF_REQ_SENT, &chan->conf_state);
805 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
806 l2cap_build_conf_req(chan, buf), buf);
807 chan->num_conf_req++;
813 read_unlock(&conn->chan_lock);
816 /* Find socket with cid and source bdaddr.
817 * Returns closest match, locked.
819 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
821 struct l2cap_chan *c, *c1 = NULL;
823 read_lock(&chan_list_lock);
825 list_for_each_entry(c, &chan_list, global_l) {
826 struct sock *sk = c->sk;
828 if (state && c->state != state)
831 if (c->scid == cid) {
833 if (!bacmp(&bt_sk(sk)->src, src)) {
834 read_unlock(&chan_list_lock);
839 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
844 read_unlock(&chan_list_lock);
849 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
851 struct sock *parent, *sk;
852 struct l2cap_chan *chan, *pchan;
856 /* Check if we have socket listening on cid */
857 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
864 bh_lock_sock(parent);
866 /* Check for backlog size */
867 if (sk_acceptq_is_full(parent)) {
868 BT_DBG("backlog full %d", parent->sk_ack_backlog);
872 chan = pchan->ops->new_connection(pchan->data);
878 write_lock_bh(&conn->chan_lock);
880 hci_conn_hold(conn->hcon);
882 bacpy(&bt_sk(sk)->src, conn->src);
883 bacpy(&bt_sk(sk)->dst, conn->dst);
885 bt_accept_enqueue(parent, sk);
887 __l2cap_chan_add(conn, chan);
889 __set_chan_timer(chan, sk->sk_sndtimeo);
891 l2cap_state_change(chan, BT_CONNECTED);
892 parent->sk_data_ready(parent, 0);
894 write_unlock_bh(&conn->chan_lock);
897 bh_unlock_sock(parent);
900 static void l2cap_chan_ready(struct sock *sk)
902 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
903 struct sock *parent = bt_sk(sk)->parent;
905 BT_DBG("sk %p, parent %p", sk, parent);
907 chan->conf_state = 0;
908 __clear_chan_timer(chan);
910 l2cap_state_change(chan, BT_CONNECTED);
911 sk->sk_state_change(sk);
914 parent->sk_data_ready(parent, 0);
917 static void l2cap_conn_ready(struct l2cap_conn *conn)
919 struct l2cap_chan *chan;
921 BT_DBG("conn %p", conn);
923 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
924 l2cap_le_conn_ready(conn);
926 if (conn->hcon->out && conn->hcon->type == LE_LINK)
927 smp_conn_security(conn, conn->hcon->pending_sec_level);
929 read_lock(&conn->chan_lock);
931 list_for_each_entry(chan, &conn->chan_l, list) {
932 struct sock *sk = chan->sk;
936 if (conn->hcon->type == LE_LINK) {
937 if (smp_conn_security(conn, chan->sec_level))
938 l2cap_chan_ready(sk);
940 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
941 __clear_chan_timer(chan);
942 l2cap_state_change(chan, BT_CONNECTED);
943 sk->sk_state_change(sk);
945 } else if (chan->state == BT_CONNECT)
946 l2cap_do_start(chan);
951 read_unlock(&conn->chan_lock);
954 /* Notify sockets that we cannot guaranty reliability anymore */
955 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
957 struct l2cap_chan *chan;
959 BT_DBG("conn %p", conn);
961 read_lock(&conn->chan_lock);
963 list_for_each_entry(chan, &conn->chan_l, list) {
964 struct sock *sk = chan->sk;
966 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
970 read_unlock(&conn->chan_lock);
973 static void l2cap_info_timeout(unsigned long arg)
975 struct l2cap_conn *conn = (void *) arg;
977 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
978 conn->info_ident = 0;
980 l2cap_conn_start(conn);
983 static void l2cap_conn_del(struct hci_conn *hcon, int err)
985 struct l2cap_conn *conn = hcon->l2cap_data;
986 struct l2cap_chan *chan, *l;
992 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
994 kfree_skb(conn->rx_skb);
997 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1000 l2cap_chan_del(chan, err);
1002 chan->ops->close(chan->data);
1005 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1006 del_timer_sync(&conn->info_timer);
1008 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
1009 del_timer(&conn->security_timer);
1010 smp_chan_destroy(conn);
1013 hcon->l2cap_data = NULL;
1017 static void security_timeout(unsigned long arg)
1019 struct l2cap_conn *conn = (void *) arg;
1021 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1024 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1026 struct l2cap_conn *conn = hcon->l2cap_data;
1031 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1035 hcon->l2cap_data = conn;
1038 BT_DBG("hcon %p conn %p", hcon, conn);
1040 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1041 conn->mtu = hcon->hdev->le_mtu;
1043 conn->mtu = hcon->hdev->acl_mtu;
1045 conn->src = &hcon->hdev->bdaddr;
1046 conn->dst = &hcon->dst;
1048 conn->feat_mask = 0;
1050 spin_lock_init(&conn->lock);
1051 rwlock_init(&conn->chan_lock);
1053 INIT_LIST_HEAD(&conn->chan_l);
1055 if (hcon->type == LE_LINK)
1056 setup_timer(&conn->security_timer, security_timeout,
1057 (unsigned long) conn);
1059 setup_timer(&conn->info_timer, l2cap_info_timeout,
1060 (unsigned long) conn);
1062 conn->disc_reason = 0x13;
1067 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1069 write_lock_bh(&conn->chan_lock);
1070 __l2cap_chan_add(conn, chan);
1071 write_unlock_bh(&conn->chan_lock);
1074 /* ---- Socket interface ---- */
1076 /* Find socket with psm and source bdaddr.
1077 * Returns closest match.
1079 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1081 struct l2cap_chan *c, *c1 = NULL;
1083 read_lock(&chan_list_lock);
1085 list_for_each_entry(c, &chan_list, global_l) {
1086 struct sock *sk = c->sk;
1088 if (state && c->state != state)
1091 if (c->psm == psm) {
1093 if (!bacmp(&bt_sk(sk)->src, src)) {
1094 read_unlock(&chan_list_lock);
1099 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1104 read_unlock(&chan_list_lock);
1109 int l2cap_chan_connect(struct l2cap_chan *chan)
1111 struct sock *sk = chan->sk;
1112 bdaddr_t *src = &bt_sk(sk)->src;
1113 bdaddr_t *dst = &bt_sk(sk)->dst;
1114 struct l2cap_conn *conn;
1115 struct hci_conn *hcon;
1116 struct hci_dev *hdev;
1120 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1123 hdev = hci_get_route(dst, src);
1125 return -EHOSTUNREACH;
1127 hci_dev_lock_bh(hdev);
1129 auth_type = l2cap_get_auth_type(chan);
1131 if (chan->dcid == L2CAP_CID_LE_DATA)
1132 hcon = hci_connect(hdev, LE_LINK, dst,
1133 chan->sec_level, auth_type);
1135 hcon = hci_connect(hdev, ACL_LINK, dst,
1136 chan->sec_level, auth_type);
1139 err = PTR_ERR(hcon);
1143 conn = l2cap_conn_add(hcon, 0);
1150 /* Update source addr of the socket */
1151 bacpy(src, conn->src);
1153 l2cap_chan_add(conn, chan);
1155 l2cap_state_change(chan, BT_CONNECT);
1156 __set_chan_timer(chan, sk->sk_sndtimeo);
1158 if (hcon->state == BT_CONNECTED) {
1159 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1160 __clear_chan_timer(chan);
1161 if (l2cap_check_security(chan))
1162 l2cap_state_change(chan, BT_CONNECTED);
1164 l2cap_do_start(chan);
1170 hci_dev_unlock_bh(hdev);
1175 int __l2cap_wait_ack(struct sock *sk)
1177 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1178 DECLARE_WAITQUEUE(wait, current);
1182 add_wait_queue(sk_sleep(sk), &wait);
1183 set_current_state(TASK_INTERRUPTIBLE);
1184 while (chan->unacked_frames > 0 && chan->conn) {
1188 if (signal_pending(current)) {
1189 err = sock_intr_errno(timeo);
1194 timeo = schedule_timeout(timeo);
1196 set_current_state(TASK_INTERRUPTIBLE);
1198 err = sock_error(sk);
1202 set_current_state(TASK_RUNNING);
1203 remove_wait_queue(sk_sleep(sk), &wait);
1207 static void l2cap_monitor_timeout(unsigned long arg)
1209 struct l2cap_chan *chan = (void *) arg;
1210 struct sock *sk = chan->sk;
1212 BT_DBG("chan %p", chan);
1215 if (chan->retry_count >= chan->remote_max_tx) {
1216 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1221 chan->retry_count++;
1222 __set_monitor_timer(chan);
1224 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1228 static void l2cap_retrans_timeout(unsigned long arg)
1230 struct l2cap_chan *chan = (void *) arg;
1231 struct sock *sk = chan->sk;
1233 BT_DBG("chan %p", chan);
1236 chan->retry_count = 1;
1237 __set_monitor_timer(chan);
1239 set_bit(CONN_WAIT_F, &chan->conn_state);
1241 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1245 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1247 struct sk_buff *skb;
1249 while ((skb = skb_peek(&chan->tx_q)) &&
1250 chan->unacked_frames) {
1251 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1254 skb = skb_dequeue(&chan->tx_q);
1257 chan->unacked_frames--;
1260 if (!chan->unacked_frames)
1261 __clear_retrans_timer(chan);
1264 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1266 struct hci_conn *hcon = chan->conn->hcon;
1269 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
1272 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1273 lmp_no_flush_capable(hcon->hdev))
1274 flags = ACL_START_NO_FLUSH;
1278 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1279 hci_send_acl(hcon, skb, flags);
1282 static void l2cap_streaming_send(struct l2cap_chan *chan)
1284 struct sk_buff *skb;
1288 while ((skb = skb_dequeue(&chan->tx_q))) {
1289 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1290 control |= __set_txseq(chan, chan->next_tx_seq);
1291 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1293 if (chan->fcs == L2CAP_FCS_CRC16) {
1294 fcs = crc16(0, (u8 *)skb->data,
1295 skb->len - L2CAP_FCS_SIZE);
1296 put_unaligned_le16(fcs,
1297 skb->data + skb->len - L2CAP_FCS_SIZE);
1300 l2cap_do_send(chan, skb);
1302 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1306 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1308 struct sk_buff *skb, *tx_skb;
1312 skb = skb_peek(&chan->tx_q);
1317 if (bt_cb(skb)->tx_seq == tx_seq)
1320 if (skb_queue_is_last(&chan->tx_q, skb))
1323 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1325 if (chan->remote_max_tx &&
1326 bt_cb(skb)->retries == chan->remote_max_tx) {
1327 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1331 tx_skb = skb_clone(skb, GFP_ATOMIC);
1332 bt_cb(skb)->retries++;
1334 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1335 control &= __get_sar_mask(chan);
1337 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1338 control |= __set_ctrl_final(chan);
1340 control |= __set_reqseq(chan, chan->buffer_seq);
1341 control |= __set_txseq(chan, tx_seq);
1343 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1345 if (chan->fcs == L2CAP_FCS_CRC16) {
1346 fcs = crc16(0, (u8 *)tx_skb->data,
1347 tx_skb->len - L2CAP_FCS_SIZE);
1348 put_unaligned_le16(fcs,
1349 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1352 l2cap_do_send(chan, tx_skb);
1355 static int l2cap_ertm_send(struct l2cap_chan *chan)
1357 struct sk_buff *skb, *tx_skb;
1362 if (chan->state != BT_CONNECTED)
1365 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1367 if (chan->remote_max_tx &&
1368 bt_cb(skb)->retries == chan->remote_max_tx) {
1369 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1373 tx_skb = skb_clone(skb, GFP_ATOMIC);
1375 bt_cb(skb)->retries++;
1377 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1378 control &= __get_sar_mask(chan);
1380 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1381 control |= __set_ctrl_final(chan);
1383 control |= __set_reqseq(chan, chan->buffer_seq);
1384 control |= __set_txseq(chan, chan->next_tx_seq);
1386 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1388 if (chan->fcs == L2CAP_FCS_CRC16) {
1389 fcs = crc16(0, (u8 *)skb->data,
1390 tx_skb->len - L2CAP_FCS_SIZE);
1391 put_unaligned_le16(fcs, skb->data +
1392 tx_skb->len - L2CAP_FCS_SIZE);
1395 l2cap_do_send(chan, tx_skb);
1397 __set_retrans_timer(chan);
1399 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1401 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1403 if (bt_cb(skb)->retries == 1)
1404 chan->unacked_frames++;
1406 chan->frames_sent++;
1408 if (skb_queue_is_last(&chan->tx_q, skb))
1409 chan->tx_send_head = NULL;
1411 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1419 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1423 if (!skb_queue_empty(&chan->tx_q))
1424 chan->tx_send_head = chan->tx_q.next;
1426 chan->next_tx_seq = chan->expected_ack_seq;
1427 ret = l2cap_ertm_send(chan);
1431 static void l2cap_send_ack(struct l2cap_chan *chan)
1435 control |= __set_reqseq(chan, chan->buffer_seq);
1437 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1438 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1439 set_bit(CONN_RNR_SENT, &chan->conn_state);
1440 l2cap_send_sframe(chan, control);
1444 if (l2cap_ertm_send(chan) > 0)
1447 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1448 l2cap_send_sframe(chan, control);
1451 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1453 struct srej_list *tail;
1456 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1457 control |= __set_ctrl_final(chan);
1459 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1460 control |= __set_reqseq(chan, tail->tx_seq);
1462 l2cap_send_sframe(chan, control);
1465 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1467 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1468 struct sk_buff **frag;
1471 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1477 /* Continuation fragments (no L2CAP header) */
1478 frag = &skb_shinfo(skb)->frag_list;
1480 count = min_t(unsigned int, conn->mtu, len);
1482 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1485 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1488 (*frag)->priority = skb->priority;
1493 frag = &(*frag)->next;
1499 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1500 struct msghdr *msg, size_t len,
1503 struct sock *sk = chan->sk;
1504 struct l2cap_conn *conn = chan->conn;
1505 struct sk_buff *skb;
1506 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1507 struct l2cap_hdr *lh;
1509 BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
1511 count = min_t(unsigned int, (conn->mtu - hlen), len);
1512 skb = bt_skb_send_alloc(sk, count + hlen,
1513 msg->msg_flags & MSG_DONTWAIT, &err);
1515 return ERR_PTR(err);
1517 skb->priority = priority;
1519 /* Create L2CAP header */
1520 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1521 lh->cid = cpu_to_le16(chan->dcid);
1522 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1523 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1525 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1526 if (unlikely(err < 0)) {
1528 return ERR_PTR(err);
1533 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1534 struct msghdr *msg, size_t len,
1537 struct sock *sk = chan->sk;
1538 struct l2cap_conn *conn = chan->conn;
1539 struct sk_buff *skb;
1540 int err, count, hlen = L2CAP_HDR_SIZE;
1541 struct l2cap_hdr *lh;
1543 BT_DBG("sk %p len %d", sk, (int)len);
1545 count = min_t(unsigned int, (conn->mtu - hlen), len);
1546 skb = bt_skb_send_alloc(sk, count + hlen,
1547 msg->msg_flags & MSG_DONTWAIT, &err);
1549 return ERR_PTR(err);
1551 skb->priority = priority;
1553 /* Create L2CAP header */
1554 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1555 lh->cid = cpu_to_le16(chan->dcid);
1556 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1558 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1559 if (unlikely(err < 0)) {
1561 return ERR_PTR(err);
1566 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1567 struct msghdr *msg, size_t len,
1568 u32 control, u16 sdulen)
1570 struct sock *sk = chan->sk;
1571 struct l2cap_conn *conn = chan->conn;
1572 struct sk_buff *skb;
1573 int err, count, hlen;
1574 struct l2cap_hdr *lh;
1576 BT_DBG("sk %p len %d", sk, (int)len);
1579 return ERR_PTR(-ENOTCONN);
1581 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1582 hlen = L2CAP_EXT_HDR_SIZE;
1584 hlen = L2CAP_ENH_HDR_SIZE;
1587 hlen += L2CAP_SDULEN_SIZE;
1589 if (chan->fcs == L2CAP_FCS_CRC16)
1590 hlen += L2CAP_FCS_SIZE;
1592 count = min_t(unsigned int, (conn->mtu - hlen), len);
1593 skb = bt_skb_send_alloc(sk, count + hlen,
1594 msg->msg_flags & MSG_DONTWAIT, &err);
1596 return ERR_PTR(err);
1598 /* Create L2CAP header */
1599 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1600 lh->cid = cpu_to_le16(chan->dcid);
1601 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1603 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1606 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1608 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1609 if (unlikely(err < 0)) {
1611 return ERR_PTR(err);
1614 if (chan->fcs == L2CAP_FCS_CRC16)
1615 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1617 bt_cb(skb)->retries = 0;
1621 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1623 struct sk_buff *skb;
1624 struct sk_buff_head sar_queue;
1628 skb_queue_head_init(&sar_queue);
1629 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1630 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1632 return PTR_ERR(skb);
1634 __skb_queue_tail(&sar_queue, skb);
1635 len -= chan->remote_mps;
1636 size += chan->remote_mps;
1641 if (len > chan->remote_mps) {
1642 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1643 buflen = chan->remote_mps;
1645 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1649 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1651 skb_queue_purge(&sar_queue);
1652 return PTR_ERR(skb);
1655 __skb_queue_tail(&sar_queue, skb);
1659 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1660 if (chan->tx_send_head == NULL)
1661 chan->tx_send_head = sar_queue.next;
1666 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1669 struct sk_buff *skb;
1673 /* Connectionless channel */
1674 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1675 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1677 return PTR_ERR(skb);
1679 l2cap_do_send(chan, skb);
1683 switch (chan->mode) {
1684 case L2CAP_MODE_BASIC:
1685 /* Check outgoing MTU */
1686 if (len > chan->omtu)
1689 /* Create a basic PDU */
1690 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1692 return PTR_ERR(skb);
1694 l2cap_do_send(chan, skb);
1698 case L2CAP_MODE_ERTM:
1699 case L2CAP_MODE_STREAMING:
1700 /* Entire SDU fits into one PDU */
1701 if (len <= chan->remote_mps) {
1702 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1703 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1706 return PTR_ERR(skb);
1708 __skb_queue_tail(&chan->tx_q, skb);
1710 if (chan->tx_send_head == NULL)
1711 chan->tx_send_head = skb;
1714 /* Segment SDU into multiples PDUs */
1715 err = l2cap_sar_segment_sdu(chan, msg, len);
1720 if (chan->mode == L2CAP_MODE_STREAMING) {
1721 l2cap_streaming_send(chan);
1726 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1727 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1732 err = l2cap_ertm_send(chan);
1739 BT_DBG("bad state %1.1x", chan->mode);
1746 /* Copy frame to all raw sockets on that connection */
1747 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1749 struct sk_buff *nskb;
1750 struct l2cap_chan *chan;
1752 BT_DBG("conn %p", conn);
1754 read_lock(&conn->chan_lock);
1755 list_for_each_entry(chan, &conn->chan_l, list) {
1756 struct sock *sk = chan->sk;
1757 if (chan->chan_type != L2CAP_CHAN_RAW)
1760 /* Don't send frame to the socket it came from */
1763 nskb = skb_clone(skb, GFP_ATOMIC);
1767 if (chan->ops->recv(chan->data, nskb))
1770 read_unlock(&conn->chan_lock);
1773 /* ---- L2CAP signalling commands ---- */
1774 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1775 u8 code, u8 ident, u16 dlen, void *data)
1777 struct sk_buff *skb, **frag;
1778 struct l2cap_cmd_hdr *cmd;
1779 struct l2cap_hdr *lh;
1782 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1783 conn, code, ident, dlen);
1785 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1786 count = min_t(unsigned int, conn->mtu, len);
1788 skb = bt_skb_alloc(count, GFP_ATOMIC);
1792 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1793 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1795 if (conn->hcon->type == LE_LINK)
1796 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1798 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1800 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1803 cmd->len = cpu_to_le16(dlen);
1806 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1807 memcpy(skb_put(skb, count), data, count);
1813 /* Continuation fragments (no L2CAP header) */
1814 frag = &skb_shinfo(skb)->frag_list;
1816 count = min_t(unsigned int, conn->mtu, len);
1818 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1822 memcpy(skb_put(*frag, count), data, count);
1827 frag = &(*frag)->next;
1837 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1839 struct l2cap_conf_opt *opt = *ptr;
1842 len = L2CAP_CONF_OPT_SIZE + opt->len;
1850 *val = *((u8 *) opt->val);
1854 *val = get_unaligned_le16(opt->val);
1858 *val = get_unaligned_le32(opt->val);
1862 *val = (unsigned long) opt->val;
1866 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1870 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1872 struct l2cap_conf_opt *opt = *ptr;
1874 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1881 *((u8 *) opt->val) = val;
1885 put_unaligned_le16(val, opt->val);
1889 put_unaligned_le32(val, opt->val);
1893 memcpy(opt->val, (void *) val, len);
1897 *ptr += L2CAP_CONF_OPT_SIZE + len;
1900 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1902 struct l2cap_conf_efs efs;
1904 switch(chan->mode) {
1905 case L2CAP_MODE_ERTM:
1906 efs.id = chan->local_id;
1907 efs.stype = chan->local_stype;
1908 efs.msdu = cpu_to_le16(chan->local_msdu);
1909 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1910 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1911 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1914 case L2CAP_MODE_STREAMING:
1916 efs.stype = L2CAP_SERV_BESTEFFORT;
1917 efs.msdu = cpu_to_le16(chan->local_msdu);
1918 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1927 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1928 (unsigned long) &efs);
1931 static void l2cap_ack_timeout(unsigned long arg)
1933 struct l2cap_chan *chan = (void *) arg;
1935 bh_lock_sock(chan->sk);
1936 l2cap_send_ack(chan);
1937 bh_unlock_sock(chan->sk);
1940 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1942 struct sock *sk = chan->sk;
1944 chan->expected_ack_seq = 0;
1945 chan->unacked_frames = 0;
1946 chan->buffer_seq = 0;
1947 chan->num_acked = 0;
1948 chan->frames_sent = 0;
1950 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1951 (unsigned long) chan);
1952 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1953 (unsigned long) chan);
1954 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1956 skb_queue_head_init(&chan->srej_q);
1958 INIT_LIST_HEAD(&chan->srej_l);
1961 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1964 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1967 case L2CAP_MODE_STREAMING:
1968 case L2CAP_MODE_ERTM:
1969 if (l2cap_mode_supported(mode, remote_feat_mask))
1973 return L2CAP_MODE_BASIC;
1977 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
1979 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
1982 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
1984 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
1987 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
1989 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
1990 __l2cap_ews_supported(chan)) {
1991 /* use extended control field */
1992 set_bit(FLAG_EXT_CTRL, &chan->flags);
1993 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
1995 chan->tx_win = min_t(u16, chan->tx_win,
1996 L2CAP_DEFAULT_TX_WINDOW);
1997 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2001 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2003 struct l2cap_conf_req *req = data;
2004 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2005 void *ptr = req->data;
2008 BT_DBG("chan %p", chan);
2010 if (chan->num_conf_req || chan->num_conf_rsp)
2013 switch (chan->mode) {
2014 case L2CAP_MODE_STREAMING:
2015 case L2CAP_MODE_ERTM:
2016 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2019 if (__l2cap_efs_supported(chan))
2020 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2024 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2029 if (chan->imtu != L2CAP_DEFAULT_MTU)
2030 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2032 switch (chan->mode) {
2033 case L2CAP_MODE_BASIC:
2034 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2035 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2038 rfc.mode = L2CAP_MODE_BASIC;
2040 rfc.max_transmit = 0;
2041 rfc.retrans_timeout = 0;
2042 rfc.monitor_timeout = 0;
2043 rfc.max_pdu_size = 0;
2045 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2046 (unsigned long) &rfc);
2049 case L2CAP_MODE_ERTM:
2050 rfc.mode = L2CAP_MODE_ERTM;
2051 rfc.max_transmit = chan->max_tx;
2052 rfc.retrans_timeout = 0;
2053 rfc.monitor_timeout = 0;
2055 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2056 L2CAP_EXT_HDR_SIZE -
2059 rfc.max_pdu_size = cpu_to_le16(size);
2061 l2cap_txwin_setup(chan);
2063 rfc.txwin_size = min_t(u16, chan->tx_win,
2064 L2CAP_DEFAULT_TX_WINDOW);
2066 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2067 (unsigned long) &rfc);
2069 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2070 l2cap_add_opt_efs(&ptr, chan);
2072 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2075 if (chan->fcs == L2CAP_FCS_NONE ||
2076 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2077 chan->fcs = L2CAP_FCS_NONE;
2078 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2081 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2082 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2086 case L2CAP_MODE_STREAMING:
2087 rfc.mode = L2CAP_MODE_STREAMING;
2089 rfc.max_transmit = 0;
2090 rfc.retrans_timeout = 0;
2091 rfc.monitor_timeout = 0;
2093 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2094 L2CAP_EXT_HDR_SIZE -
2097 rfc.max_pdu_size = cpu_to_le16(size);
2099 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2100 (unsigned long) &rfc);
2102 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2103 l2cap_add_opt_efs(&ptr, chan);
2105 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2108 if (chan->fcs == L2CAP_FCS_NONE ||
2109 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2110 chan->fcs = L2CAP_FCS_NONE;
2111 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2116 req->dcid = cpu_to_le16(chan->dcid);
2117 req->flags = cpu_to_le16(0);
2122 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2124 struct l2cap_conf_rsp *rsp = data;
2125 void *ptr = rsp->data;
2126 void *req = chan->conf_req;
2127 int len = chan->conf_len;
2128 int type, hint, olen;
2130 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2131 struct l2cap_conf_efs efs;
2133 u16 mtu = L2CAP_DEFAULT_MTU;
2134 u16 result = L2CAP_CONF_SUCCESS;
2137 BT_DBG("chan %p", chan);
2139 while (len >= L2CAP_CONF_OPT_SIZE) {
2140 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2142 hint = type & L2CAP_CONF_HINT;
2143 type &= L2CAP_CONF_MASK;
2146 case L2CAP_CONF_MTU:
2150 case L2CAP_CONF_FLUSH_TO:
2151 chan->flush_to = val;
2154 case L2CAP_CONF_QOS:
2157 case L2CAP_CONF_RFC:
2158 if (olen == sizeof(rfc))
2159 memcpy(&rfc, (void *) val, olen);
2162 case L2CAP_CONF_FCS:
2163 if (val == L2CAP_FCS_NONE)
2164 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2167 case L2CAP_CONF_EFS:
2169 if (olen == sizeof(efs))
2170 memcpy(&efs, (void *) val, olen);
2173 case L2CAP_CONF_EWS:
2175 return -ECONNREFUSED;
2177 set_bit(FLAG_EXT_CTRL, &chan->flags);
2178 set_bit(CONF_EWS_RECV, &chan->conf_state);
2179 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2180 chan->remote_tx_win = val;
2187 result = L2CAP_CONF_UNKNOWN;
2188 *((u8 *) ptr++) = type;
2193 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2196 switch (chan->mode) {
2197 case L2CAP_MODE_STREAMING:
2198 case L2CAP_MODE_ERTM:
2199 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2200 chan->mode = l2cap_select_mode(rfc.mode,
2201 chan->conn->feat_mask);
2206 if (__l2cap_efs_supported(chan))
2207 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2209 return -ECONNREFUSED;
2212 if (chan->mode != rfc.mode)
2213 return -ECONNREFUSED;
2219 if (chan->mode != rfc.mode) {
2220 result = L2CAP_CONF_UNACCEPT;
2221 rfc.mode = chan->mode;
2223 if (chan->num_conf_rsp == 1)
2224 return -ECONNREFUSED;
2226 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2227 sizeof(rfc), (unsigned long) &rfc);
2230 if (result == L2CAP_CONF_SUCCESS) {
2231 /* Configure output options and let the other side know
2232 * which ones we don't like. */
2234 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2235 result = L2CAP_CONF_UNACCEPT;
2238 set_bit(CONF_MTU_DONE, &chan->conf_state);
2240 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2243 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2244 efs.stype != L2CAP_SERV_NOTRAFIC &&
2245 efs.stype != chan->local_stype) {
2247 result = L2CAP_CONF_UNACCEPT;
2249 if (chan->num_conf_req >= 1)
2250 return -ECONNREFUSED;
2252 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2254 (unsigned long) &efs);
2256 /* Send PENDING Conf Rsp */
2257 result = L2CAP_CONF_PENDING;
2258 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2263 case L2CAP_MODE_BASIC:
2264 chan->fcs = L2CAP_FCS_NONE;
2265 set_bit(CONF_MODE_DONE, &chan->conf_state);
2268 case L2CAP_MODE_ERTM:
2269 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2270 chan->remote_tx_win = rfc.txwin_size;
2272 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2274 chan->remote_max_tx = rfc.max_transmit;
2276 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2278 L2CAP_EXT_HDR_SIZE -
2281 rfc.max_pdu_size = cpu_to_le16(size);
2282 chan->remote_mps = size;
2284 rfc.retrans_timeout =
2285 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2286 rfc.monitor_timeout =
2287 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2289 set_bit(CONF_MODE_DONE, &chan->conf_state);
2291 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2292 sizeof(rfc), (unsigned long) &rfc);
2294 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2295 chan->remote_id = efs.id;
2296 chan->remote_stype = efs.stype;
2297 chan->remote_msdu = le16_to_cpu(efs.msdu);
2298 chan->remote_flush_to =
2299 le32_to_cpu(efs.flush_to);
2300 chan->remote_acc_lat =
2301 le32_to_cpu(efs.acc_lat);
2302 chan->remote_sdu_itime =
2303 le32_to_cpu(efs.sdu_itime);
2304 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2305 sizeof(efs), (unsigned long) &efs);
2309 case L2CAP_MODE_STREAMING:
2310 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2312 L2CAP_EXT_HDR_SIZE -
2315 rfc.max_pdu_size = cpu_to_le16(size);
2316 chan->remote_mps = size;
2318 set_bit(CONF_MODE_DONE, &chan->conf_state);
2320 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2321 sizeof(rfc), (unsigned long) &rfc);
2326 result = L2CAP_CONF_UNACCEPT;
2328 memset(&rfc, 0, sizeof(rfc));
2329 rfc.mode = chan->mode;
2332 if (result == L2CAP_CONF_SUCCESS)
2333 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2335 rsp->scid = cpu_to_le16(chan->dcid);
2336 rsp->result = cpu_to_le16(result);
2337 rsp->flags = cpu_to_le16(0x0000);
2342 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2344 struct l2cap_conf_req *req = data;
2345 void *ptr = req->data;
2348 struct l2cap_conf_rfc rfc;
2350 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2352 while (len >= L2CAP_CONF_OPT_SIZE) {
2353 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2356 case L2CAP_CONF_MTU:
2357 if (val < L2CAP_DEFAULT_MIN_MTU) {
2358 *result = L2CAP_CONF_UNACCEPT;
2359 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2362 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2365 case L2CAP_CONF_FLUSH_TO:
2366 chan->flush_to = val;
2367 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2371 case L2CAP_CONF_RFC:
2372 if (olen == sizeof(rfc))
2373 memcpy(&rfc, (void *)val, olen);
2375 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2376 rfc.mode != chan->mode)
2377 return -ECONNREFUSED;
2381 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2382 sizeof(rfc), (unsigned long) &rfc);
2385 case L2CAP_CONF_EWS:
2386 chan->tx_win = min_t(u16, val,
2387 L2CAP_DEFAULT_EXT_WINDOW);
2388 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2394 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2395 return -ECONNREFUSED;
2397 chan->mode = rfc.mode;
2399 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2401 case L2CAP_MODE_ERTM:
2402 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2403 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2404 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2406 case L2CAP_MODE_STREAMING:
2407 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2411 req->dcid = cpu_to_le16(chan->dcid);
2412 req->flags = cpu_to_le16(0x0000);
2417 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2419 struct l2cap_conf_rsp *rsp = data;
2420 void *ptr = rsp->data;
2422 BT_DBG("chan %p", chan);
2424 rsp->scid = cpu_to_le16(chan->dcid);
2425 rsp->result = cpu_to_le16(result);
2426 rsp->flags = cpu_to_le16(flags);
2431 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2433 struct l2cap_conn_rsp rsp;
2434 struct l2cap_conn *conn = chan->conn;
2437 rsp.scid = cpu_to_le16(chan->dcid);
2438 rsp.dcid = cpu_to_le16(chan->scid);
2439 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2440 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2441 l2cap_send_cmd(conn, chan->ident,
2442 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2444 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2447 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2448 l2cap_build_conf_req(chan, buf), buf);
2449 chan->num_conf_req++;
2452 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2456 struct l2cap_conf_rfc rfc;
2458 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2460 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2463 while (len >= L2CAP_CONF_OPT_SIZE) {
2464 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2467 case L2CAP_CONF_RFC:
2468 if (olen == sizeof(rfc))
2469 memcpy(&rfc, (void *)val, olen);
2476 case L2CAP_MODE_ERTM:
2477 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2478 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2479 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2481 case L2CAP_MODE_STREAMING:
2482 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2486 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2488 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2490 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2493 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2494 cmd->ident == conn->info_ident) {
2495 del_timer(&conn->info_timer);
2497 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2498 conn->info_ident = 0;
2500 l2cap_conn_start(conn);
2506 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2508 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2509 struct l2cap_conn_rsp rsp;
2510 struct l2cap_chan *chan = NULL, *pchan;
2511 struct sock *parent, *sk = NULL;
2512 int result, status = L2CAP_CS_NO_INFO;
2514 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2515 __le16 psm = req->psm;
2517 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2519 /* Check if we have socket listening on psm */
2520 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2522 result = L2CAP_CR_BAD_PSM;
2528 bh_lock_sock(parent);
2530 /* Check if the ACL is secure enough (if not SDP) */
2531 if (psm != cpu_to_le16(0x0001) &&
2532 !hci_conn_check_link_mode(conn->hcon)) {
2533 conn->disc_reason = 0x05;
2534 result = L2CAP_CR_SEC_BLOCK;
2538 result = L2CAP_CR_NO_MEM;
2540 /* Check for backlog size */
2541 if (sk_acceptq_is_full(parent)) {
2542 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2546 chan = pchan->ops->new_connection(pchan->data);
2552 write_lock_bh(&conn->chan_lock);
2554 /* Check if we already have channel with that dcid */
2555 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2556 write_unlock_bh(&conn->chan_lock);
2557 sock_set_flag(sk, SOCK_ZAPPED);
2558 chan->ops->close(chan->data);
2562 hci_conn_hold(conn->hcon);
2564 bacpy(&bt_sk(sk)->src, conn->src);
2565 bacpy(&bt_sk(sk)->dst, conn->dst);
2569 bt_accept_enqueue(parent, sk);
2571 __l2cap_chan_add(conn, chan);
2575 __set_chan_timer(chan, sk->sk_sndtimeo);
2577 chan->ident = cmd->ident;
2579 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2580 if (l2cap_check_security(chan)) {
2581 if (bt_sk(sk)->defer_setup) {
2582 l2cap_state_change(chan, BT_CONNECT2);
2583 result = L2CAP_CR_PEND;
2584 status = L2CAP_CS_AUTHOR_PEND;
2585 parent->sk_data_ready(parent, 0);
2587 l2cap_state_change(chan, BT_CONFIG);
2588 result = L2CAP_CR_SUCCESS;
2589 status = L2CAP_CS_NO_INFO;
2592 l2cap_state_change(chan, BT_CONNECT2);
2593 result = L2CAP_CR_PEND;
2594 status = L2CAP_CS_AUTHEN_PEND;
2597 l2cap_state_change(chan, BT_CONNECT2);
2598 result = L2CAP_CR_PEND;
2599 status = L2CAP_CS_NO_INFO;
2602 write_unlock_bh(&conn->chan_lock);
2605 bh_unlock_sock(parent);
2608 rsp.scid = cpu_to_le16(scid);
2609 rsp.dcid = cpu_to_le16(dcid);
2610 rsp.result = cpu_to_le16(result);
2611 rsp.status = cpu_to_le16(status);
2612 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2614 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2615 struct l2cap_info_req info;
2616 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2618 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2619 conn->info_ident = l2cap_get_ident(conn);
2621 mod_timer(&conn->info_timer, jiffies +
2622 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2624 l2cap_send_cmd(conn, conn->info_ident,
2625 L2CAP_INFO_REQ, sizeof(info), &info);
2628 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2629 result == L2CAP_CR_SUCCESS) {
2631 set_bit(CONF_REQ_SENT, &chan->conf_state);
2632 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2633 l2cap_build_conf_req(chan, buf), buf);
2634 chan->num_conf_req++;
2640 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2642 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2643 u16 scid, dcid, result, status;
2644 struct l2cap_chan *chan;
2648 scid = __le16_to_cpu(rsp->scid);
2649 dcid = __le16_to_cpu(rsp->dcid);
2650 result = __le16_to_cpu(rsp->result);
2651 status = __le16_to_cpu(rsp->status);
2653 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2656 chan = l2cap_get_chan_by_scid(conn, scid);
2660 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2668 case L2CAP_CR_SUCCESS:
2669 l2cap_state_change(chan, BT_CONFIG);
2672 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2674 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2677 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2678 l2cap_build_conf_req(chan, req), req);
2679 chan->num_conf_req++;
2683 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2687 /* don't delete l2cap channel if sk is owned by user */
2688 if (sock_owned_by_user(sk)) {
2689 l2cap_state_change(chan, BT_DISCONN);
2690 __clear_chan_timer(chan);
2691 __set_chan_timer(chan, HZ / 5);
2695 l2cap_chan_del(chan, ECONNREFUSED);
2703 static inline void set_default_fcs(struct l2cap_chan *chan)
2705 /* FCS is enabled only in ERTM or streaming mode, if one or both
2708 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2709 chan->fcs = L2CAP_FCS_NONE;
2710 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2711 chan->fcs = L2CAP_FCS_CRC16;
2714 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2716 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2719 struct l2cap_chan *chan;
2723 dcid = __le16_to_cpu(req->dcid);
2724 flags = __le16_to_cpu(req->flags);
2726 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2728 chan = l2cap_get_chan_by_scid(conn, dcid);
2734 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2735 struct l2cap_cmd_rej_cid rej;
2737 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2738 rej.scid = cpu_to_le16(chan->scid);
2739 rej.dcid = cpu_to_le16(chan->dcid);
2741 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2746 /* Reject if config buffer is too small. */
2747 len = cmd_len - sizeof(*req);
2748 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2749 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2750 l2cap_build_conf_rsp(chan, rsp,
2751 L2CAP_CONF_REJECT, flags), rsp);
2756 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2757 chan->conf_len += len;
2759 if (flags & 0x0001) {
2760 /* Incomplete config. Send empty response. */
2761 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2762 l2cap_build_conf_rsp(chan, rsp,
2763 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2767 /* Complete config. */
2768 len = l2cap_parse_conf_req(chan, rsp);
2770 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2774 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2775 chan->num_conf_rsp++;
2777 /* Reset config buffer. */
2780 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2783 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2784 set_default_fcs(chan);
2786 l2cap_state_change(chan, BT_CONNECTED);
2788 chan->next_tx_seq = 0;
2789 chan->expected_tx_seq = 0;
2790 skb_queue_head_init(&chan->tx_q);
2791 if (chan->mode == L2CAP_MODE_ERTM)
2792 l2cap_ertm_init(chan);
2794 l2cap_chan_ready(sk);
2798 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2800 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2801 l2cap_build_conf_req(chan, buf), buf);
2802 chan->num_conf_req++;
2805 /* Got Conf Rsp PENDING from remote side and asume we sent
2806 Conf Rsp PENDING in the code above */
2807 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2808 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2810 /* check compatibility */
2812 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2813 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2815 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2816 l2cap_build_conf_rsp(chan, rsp,
2817 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2825 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2827 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2828 u16 scid, flags, result;
2829 struct l2cap_chan *chan;
2831 int len = cmd->len - sizeof(*rsp);
2833 scid = __le16_to_cpu(rsp->scid);
2834 flags = __le16_to_cpu(rsp->flags);
2835 result = __le16_to_cpu(rsp->result);
2837 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2838 scid, flags, result);
2840 chan = l2cap_get_chan_by_scid(conn, scid);
2847 case L2CAP_CONF_SUCCESS:
2848 l2cap_conf_rfc_get(chan, rsp->data, len);
2849 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2852 case L2CAP_CONF_PENDING:
2853 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2855 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2858 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2861 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2865 /* check compatibility */
2867 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2868 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2870 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2871 l2cap_build_conf_rsp(chan, buf,
2872 L2CAP_CONF_SUCCESS, 0x0000), buf);
2876 case L2CAP_CONF_UNACCEPT:
2877 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2880 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2881 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2885 /* throw out any old stored conf requests */
2886 result = L2CAP_CONF_SUCCESS;
2887 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2890 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2894 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2895 L2CAP_CONF_REQ, len, req);
2896 chan->num_conf_req++;
2897 if (result != L2CAP_CONF_SUCCESS)
2903 sk->sk_err = ECONNRESET;
2904 __set_chan_timer(chan, HZ * 5);
2905 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2912 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2914 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2915 set_default_fcs(chan);
2917 l2cap_state_change(chan, BT_CONNECTED);
2918 chan->next_tx_seq = 0;
2919 chan->expected_tx_seq = 0;
2920 skb_queue_head_init(&chan->tx_q);
2921 if (chan->mode == L2CAP_MODE_ERTM)
2922 l2cap_ertm_init(chan);
2924 l2cap_chan_ready(sk);
2932 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2934 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2935 struct l2cap_disconn_rsp rsp;
2937 struct l2cap_chan *chan;
2940 scid = __le16_to_cpu(req->scid);
2941 dcid = __le16_to_cpu(req->dcid);
2943 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2945 chan = l2cap_get_chan_by_scid(conn, dcid);
2951 rsp.dcid = cpu_to_le16(chan->scid);
2952 rsp.scid = cpu_to_le16(chan->dcid);
2953 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2955 sk->sk_shutdown = SHUTDOWN_MASK;
2957 /* don't delete l2cap channel if sk is owned by user */
2958 if (sock_owned_by_user(sk)) {
2959 l2cap_state_change(chan, BT_DISCONN);
2960 __clear_chan_timer(chan);
2961 __set_chan_timer(chan, HZ / 5);
2966 l2cap_chan_del(chan, ECONNRESET);
2969 chan->ops->close(chan->data);
2973 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2975 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2977 struct l2cap_chan *chan;
2980 scid = __le16_to_cpu(rsp->scid);
2981 dcid = __le16_to_cpu(rsp->dcid);
2983 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2985 chan = l2cap_get_chan_by_scid(conn, scid);
2991 /* don't delete l2cap channel if sk is owned by user */
2992 if (sock_owned_by_user(sk)) {
2993 l2cap_state_change(chan,BT_DISCONN);
2994 __clear_chan_timer(chan);
2995 __set_chan_timer(chan, HZ / 5);
3000 l2cap_chan_del(chan, 0);
3003 chan->ops->close(chan->data);
3007 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3009 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3012 type = __le16_to_cpu(req->type);
3014 BT_DBG("type 0x%4.4x", type);
3016 if (type == L2CAP_IT_FEAT_MASK) {
3018 u32 feat_mask = l2cap_feat_mask;
3019 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3020 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3021 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3023 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3026 feat_mask |= L2CAP_FEAT_EXT_FLOW
3027 | L2CAP_FEAT_EXT_WINDOW;
3029 put_unaligned_le32(feat_mask, rsp->data);
3030 l2cap_send_cmd(conn, cmd->ident,
3031 L2CAP_INFO_RSP, sizeof(buf), buf);
3032 } else if (type == L2CAP_IT_FIXED_CHAN) {
3034 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3035 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3036 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3037 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3038 l2cap_send_cmd(conn, cmd->ident,
3039 L2CAP_INFO_RSP, sizeof(buf), buf);
3041 struct l2cap_info_rsp rsp;
3042 rsp.type = cpu_to_le16(type);
3043 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3044 l2cap_send_cmd(conn, cmd->ident,
3045 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3051 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3053 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3056 type = __le16_to_cpu(rsp->type);
3057 result = __le16_to_cpu(rsp->result);
3059 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3061 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3062 if (cmd->ident != conn->info_ident ||
3063 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3066 del_timer(&conn->info_timer);
3068 if (result != L2CAP_IR_SUCCESS) {
3069 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3070 conn->info_ident = 0;
3072 l2cap_conn_start(conn);
3077 if (type == L2CAP_IT_FEAT_MASK) {
3078 conn->feat_mask = get_unaligned_le32(rsp->data);
3080 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3081 struct l2cap_info_req req;
3082 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3084 conn->info_ident = l2cap_get_ident(conn);
3086 l2cap_send_cmd(conn, conn->info_ident,
3087 L2CAP_INFO_REQ, sizeof(req), &req);
3089 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3090 conn->info_ident = 0;
3092 l2cap_conn_start(conn);
3094 } else if (type == L2CAP_IT_FIXED_CHAN) {
3095 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3096 conn->info_ident = 0;
3098 l2cap_conn_start(conn);
3104 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3109 if (min > max || min < 6 || max > 3200)
3112 if (to_multiplier < 10 || to_multiplier > 3200)
3115 if (max >= to_multiplier * 8)
3118 max_latency = (to_multiplier * 8 / max) - 1;
3119 if (latency > 499 || latency > max_latency)
3125 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3126 struct l2cap_cmd_hdr *cmd, u8 *data)
3128 struct hci_conn *hcon = conn->hcon;
3129 struct l2cap_conn_param_update_req *req;
3130 struct l2cap_conn_param_update_rsp rsp;
3131 u16 min, max, latency, to_multiplier, cmd_len;
3134 if (!(hcon->link_mode & HCI_LM_MASTER))
3137 cmd_len = __le16_to_cpu(cmd->len);
3138 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3141 req = (struct l2cap_conn_param_update_req *) data;
3142 min = __le16_to_cpu(req->min);
3143 max = __le16_to_cpu(req->max);
3144 latency = __le16_to_cpu(req->latency);
3145 to_multiplier = __le16_to_cpu(req->to_multiplier);
3147 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3148 min, max, latency, to_multiplier);
3150 memset(&rsp, 0, sizeof(rsp));
3152 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3154 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3156 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3158 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3162 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3167 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3168 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3172 switch (cmd->code) {
3173 case L2CAP_COMMAND_REJ:
3174 l2cap_command_rej(conn, cmd, data);
3177 case L2CAP_CONN_REQ:
3178 err = l2cap_connect_req(conn, cmd, data);
3181 case L2CAP_CONN_RSP:
3182 err = l2cap_connect_rsp(conn, cmd, data);
3185 case L2CAP_CONF_REQ:
3186 err = l2cap_config_req(conn, cmd, cmd_len, data);
3189 case L2CAP_CONF_RSP:
3190 err = l2cap_config_rsp(conn, cmd, data);
3193 case L2CAP_DISCONN_REQ:
3194 err = l2cap_disconnect_req(conn, cmd, data);
3197 case L2CAP_DISCONN_RSP:
3198 err = l2cap_disconnect_rsp(conn, cmd, data);
3201 case L2CAP_ECHO_REQ:
3202 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3205 case L2CAP_ECHO_RSP:
3208 case L2CAP_INFO_REQ:
3209 err = l2cap_information_req(conn, cmd, data);
3212 case L2CAP_INFO_RSP:
3213 err = l2cap_information_rsp(conn, cmd, data);
3217 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3225 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3226 struct l2cap_cmd_hdr *cmd, u8 *data)
3228 switch (cmd->code) {
3229 case L2CAP_COMMAND_REJ:
3232 case L2CAP_CONN_PARAM_UPDATE_REQ:
3233 return l2cap_conn_param_update_req(conn, cmd, data);
3235 case L2CAP_CONN_PARAM_UPDATE_RSP:
3239 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3244 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3245 struct sk_buff *skb)
3247 u8 *data = skb->data;
3249 struct l2cap_cmd_hdr cmd;
3252 l2cap_raw_recv(conn, skb);
3254 while (len >= L2CAP_CMD_HDR_SIZE) {
3256 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3257 data += L2CAP_CMD_HDR_SIZE;
3258 len -= L2CAP_CMD_HDR_SIZE;
3260 cmd_len = le16_to_cpu(cmd.len);
3262 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3264 if (cmd_len > len || !cmd.ident) {
3265 BT_DBG("corrupted command");
3269 if (conn->hcon->type == LE_LINK)
3270 err = l2cap_le_sig_cmd(conn, &cmd, data);
3272 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3275 struct l2cap_cmd_rej_unk rej;
3277 BT_ERR("Wrong link type (%d)", err);
3279 /* FIXME: Map err to a valid reason */
3280 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3281 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3291 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3293 u16 our_fcs, rcv_fcs;
3296 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3297 hdr_size = L2CAP_EXT_HDR_SIZE;
3299 hdr_size = L2CAP_ENH_HDR_SIZE;
3301 if (chan->fcs == L2CAP_FCS_CRC16) {
3302 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3303 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3304 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3306 if (our_fcs != rcv_fcs)
3312 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3316 chan->frames_sent = 0;
3318 control |= __set_reqseq(chan, chan->buffer_seq);
3320 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3321 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3322 l2cap_send_sframe(chan, control);
3323 set_bit(CONN_RNR_SENT, &chan->conn_state);
3326 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3327 l2cap_retransmit_frames(chan);
3329 l2cap_ertm_send(chan);
3331 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3332 chan->frames_sent == 0) {
3333 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3334 l2cap_send_sframe(chan, control);
3338 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3340 struct sk_buff *next_skb;
3341 int tx_seq_offset, next_tx_seq_offset;
3343 bt_cb(skb)->tx_seq = tx_seq;
3344 bt_cb(skb)->sar = sar;
3346 next_skb = skb_peek(&chan->srej_q);
3348 __skb_queue_tail(&chan->srej_q, skb);
3352 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3355 if (bt_cb(next_skb)->tx_seq == tx_seq)
3358 next_tx_seq_offset = __seq_offset(chan,
3359 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3361 if (next_tx_seq_offset > tx_seq_offset) {
3362 __skb_queue_before(&chan->srej_q, next_skb, skb);
3366 if (skb_queue_is_last(&chan->srej_q, next_skb))
3369 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3371 __skb_queue_tail(&chan->srej_q, skb);
3376 static void append_skb_frag(struct sk_buff *skb,
3377 struct sk_buff *new_frag, struct sk_buff **last_frag)
3379 /* skb->len reflects data in skb as well as all fragments
3380 * skb->data_len reflects only data in fragments
3382 if (!skb_has_frag_list(skb))
3383 skb_shinfo(skb)->frag_list = new_frag;
3385 new_frag->next = NULL;
3387 (*last_frag)->next = new_frag;
3388 *last_frag = new_frag;
3390 skb->len += new_frag->len;
3391 skb->data_len += new_frag->len;
3392 skb->truesize += new_frag->truesize;
3395 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3399 switch (__get_ctrl_sar(chan, control)) {
3400 case L2CAP_SAR_UNSEGMENTED:
3404 err = chan->ops->recv(chan->data, skb);
3407 case L2CAP_SAR_START:
3411 chan->sdu_len = get_unaligned_le16(skb->data);
3412 skb_pull(skb, L2CAP_SDULEN_SIZE);
3414 if (chan->sdu_len > chan->imtu) {
3419 if (skb->len >= chan->sdu_len)
3423 chan->sdu_last_frag = skb;
3429 case L2CAP_SAR_CONTINUE:
3433 append_skb_frag(chan->sdu, skb,
3434 &chan->sdu_last_frag);
3437 if (chan->sdu->len >= chan->sdu_len)
3447 append_skb_frag(chan->sdu, skb,
3448 &chan->sdu_last_frag);
3451 if (chan->sdu->len != chan->sdu_len)
3454 err = chan->ops->recv(chan->data, chan->sdu);
3457 /* Reassembly complete */
3459 chan->sdu_last_frag = NULL;
3467 kfree_skb(chan->sdu);
3469 chan->sdu_last_frag = NULL;
3476 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3480 BT_DBG("chan %p, Enter local busy", chan);
3482 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3484 control = __set_reqseq(chan, chan->buffer_seq);
3485 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3486 l2cap_send_sframe(chan, control);
3488 set_bit(CONN_RNR_SENT, &chan->conn_state);
3490 __clear_ack_timer(chan);
3493 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3497 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3500 control = __set_reqseq(chan, chan->buffer_seq);
3501 control |= __set_ctrl_poll(chan);
3502 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3503 l2cap_send_sframe(chan, control);
3504 chan->retry_count = 1;
3506 __clear_retrans_timer(chan);
3507 __set_monitor_timer(chan);
3509 set_bit(CONN_WAIT_F, &chan->conn_state);
3512 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3513 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3515 BT_DBG("chan %p, Exit local busy", chan);
3518 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3520 if (chan->mode == L2CAP_MODE_ERTM) {
3522 l2cap_ertm_enter_local_busy(chan);
3524 l2cap_ertm_exit_local_busy(chan);
3528 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3530 struct sk_buff *skb;
3533 while ((skb = skb_peek(&chan->srej_q)) &&
3534 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3537 if (bt_cb(skb)->tx_seq != tx_seq)
3540 skb = skb_dequeue(&chan->srej_q);
3541 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3542 err = l2cap_reassemble_sdu(chan, skb, control);
3545 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3549 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3550 tx_seq = __next_seq(chan, tx_seq);
3554 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3556 struct srej_list *l, *tmp;
3559 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3560 if (l->tx_seq == tx_seq) {
3565 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3566 control |= __set_reqseq(chan, l->tx_seq);
3567 l2cap_send_sframe(chan, control);
3569 list_add_tail(&l->list, &chan->srej_l);
3573 static void l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3575 struct srej_list *new;
3578 while (tx_seq != chan->expected_tx_seq) {
3579 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3580 control |= __set_reqseq(chan, chan->expected_tx_seq);
3581 l2cap_send_sframe(chan, control);
3583 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3584 new->tx_seq = chan->expected_tx_seq;
3586 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3588 list_add_tail(&new->list, &chan->srej_l);
3591 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3594 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3596 u16 tx_seq = __get_txseq(chan, rx_control);
3597 u16 req_seq = __get_reqseq(chan, rx_control);
3598 u8 sar = __get_ctrl_sar(chan, rx_control);
3599 int tx_seq_offset, expected_tx_seq_offset;
3600 int num_to_ack = (chan->tx_win/6) + 1;
3603 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3604 tx_seq, rx_control);
3606 if (__is_ctrl_final(chan, rx_control) &&
3607 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3608 __clear_monitor_timer(chan);
3609 if (chan->unacked_frames > 0)
3610 __set_retrans_timer(chan);
3611 clear_bit(CONN_WAIT_F, &chan->conn_state);
3614 chan->expected_ack_seq = req_seq;
3615 l2cap_drop_acked_frames(chan);
3617 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3619 /* invalid tx_seq */
3620 if (tx_seq_offset >= chan->tx_win) {
3621 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3625 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3628 if (tx_seq == chan->expected_tx_seq)
3631 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3632 struct srej_list *first;
3634 first = list_first_entry(&chan->srej_l,
3635 struct srej_list, list);
3636 if (tx_seq == first->tx_seq) {
3637 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3638 l2cap_check_srej_gap(chan, tx_seq);
3640 list_del(&first->list);
3643 if (list_empty(&chan->srej_l)) {
3644 chan->buffer_seq = chan->buffer_seq_srej;
3645 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3646 l2cap_send_ack(chan);
3647 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3650 struct srej_list *l;
3652 /* duplicated tx_seq */
3653 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3656 list_for_each_entry(l, &chan->srej_l, list) {
3657 if (l->tx_seq == tx_seq) {
3658 l2cap_resend_srejframe(chan, tx_seq);
3662 l2cap_send_srejframe(chan, tx_seq);
3665 expected_tx_seq_offset = __seq_offset(chan,
3666 chan->expected_tx_seq, chan->buffer_seq);
3668 /* duplicated tx_seq */
3669 if (tx_seq_offset < expected_tx_seq_offset)
3672 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3674 BT_DBG("chan %p, Enter SREJ", chan);
3676 INIT_LIST_HEAD(&chan->srej_l);
3677 chan->buffer_seq_srej = chan->buffer_seq;
3679 __skb_queue_head_init(&chan->srej_q);
3680 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3682 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3684 l2cap_send_srejframe(chan, tx_seq);
3686 __clear_ack_timer(chan);
3691 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3693 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3694 bt_cb(skb)->tx_seq = tx_seq;
3695 bt_cb(skb)->sar = sar;
3696 __skb_queue_tail(&chan->srej_q, skb);
3700 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3701 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3704 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3708 if (__is_ctrl_final(chan, rx_control)) {
3709 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3710 l2cap_retransmit_frames(chan);
3713 __set_ack_timer(chan);
3715 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3716 if (chan->num_acked == num_to_ack - 1)
3717 l2cap_send_ack(chan);
3726 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
3728 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
3729 __get_reqseq(chan, rx_control), rx_control);
3731 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3732 l2cap_drop_acked_frames(chan);
3734 if (__is_ctrl_poll(chan, rx_control)) {
3735 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3736 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3737 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3738 (chan->unacked_frames > 0))
3739 __set_retrans_timer(chan);
3741 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3742 l2cap_send_srejtail(chan);
3744 l2cap_send_i_or_rr_or_rnr(chan);
3747 } else if (__is_ctrl_final(chan, rx_control)) {
3748 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3750 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3751 l2cap_retransmit_frames(chan);
3754 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3755 (chan->unacked_frames > 0))
3756 __set_retrans_timer(chan);
3758 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3759 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3760 l2cap_send_ack(chan);
3762 l2cap_ertm_send(chan);
3766 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
3768 u16 tx_seq = __get_reqseq(chan, rx_control);
3770 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
3772 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3774 chan->expected_ack_seq = tx_seq;
3775 l2cap_drop_acked_frames(chan);
3777 if (__is_ctrl_final(chan, rx_control)) {
3778 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3779 l2cap_retransmit_frames(chan);
3781 l2cap_retransmit_frames(chan);
3783 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3784 set_bit(CONN_REJ_ACT, &chan->conn_state);
3787 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
3789 u16 tx_seq = __get_reqseq(chan, rx_control);
3791 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
3793 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3795 if (__is_ctrl_poll(chan, rx_control)) {
3796 chan->expected_ack_seq = tx_seq;
3797 l2cap_drop_acked_frames(chan);
3799 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3800 l2cap_retransmit_one_frame(chan, tx_seq);
3802 l2cap_ertm_send(chan);
3804 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3805 chan->srej_save_reqseq = tx_seq;
3806 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3808 } else if (__is_ctrl_final(chan, rx_control)) {
3809 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3810 chan->srej_save_reqseq == tx_seq)
3811 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3813 l2cap_retransmit_one_frame(chan, tx_seq);
3815 l2cap_retransmit_one_frame(chan, tx_seq);
3816 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3817 chan->srej_save_reqseq = tx_seq;
3818 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3823 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
3825 u16 tx_seq = __get_reqseq(chan, rx_control);
3827 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
3829 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3830 chan->expected_ack_seq = tx_seq;
3831 l2cap_drop_acked_frames(chan);
3833 if (__is_ctrl_poll(chan, rx_control))
3834 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3836 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3837 __clear_retrans_timer(chan);
3838 if (__is_ctrl_poll(chan, rx_control))
3839 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3843 if (__is_ctrl_poll(chan, rx_control)) {
3844 l2cap_send_srejtail(chan);
3846 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
3847 l2cap_send_sframe(chan, rx_control);
3851 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3853 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
3855 if (__is_ctrl_final(chan, rx_control) &&
3856 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3857 __clear_monitor_timer(chan);
3858 if (chan->unacked_frames > 0)
3859 __set_retrans_timer(chan);
3860 clear_bit(CONN_WAIT_F, &chan->conn_state);
3863 switch (__get_ctrl_super(chan, rx_control)) {
3864 case L2CAP_SUPER_RR:
3865 l2cap_data_channel_rrframe(chan, rx_control);
3868 case L2CAP_SUPER_REJ:
3869 l2cap_data_channel_rejframe(chan, rx_control);
3872 case L2CAP_SUPER_SREJ:
3873 l2cap_data_channel_srejframe(chan, rx_control);
3876 case L2CAP_SUPER_RNR:
3877 l2cap_data_channel_rnrframe(chan, rx_control);
3885 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3887 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3890 int len, next_tx_seq_offset, req_seq_offset;
3892 control = __get_control(chan, skb->data);
3893 skb_pull(skb, __ctrl_size(chan));
3897 * We can just drop the corrupted I-frame here.
3898 * Receiver will miss it and start proper recovery
3899 * procedures and ask retransmission.
3901 if (l2cap_check_fcs(chan, skb))
3904 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
3905 len -= L2CAP_SDULEN_SIZE;
3907 if (chan->fcs == L2CAP_FCS_CRC16)
3908 len -= L2CAP_FCS_SIZE;
3910 if (len > chan->mps) {
3911 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3915 req_seq = __get_reqseq(chan, control);
3917 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
3919 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
3920 chan->expected_ack_seq);
3922 /* check for invalid req-seq */
3923 if (req_seq_offset > next_tx_seq_offset) {
3924 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3928 if (!__is_sframe(chan, control)) {
3930 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3934 l2cap_data_channel_iframe(chan, control, skb);
3938 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3942 l2cap_data_channel_sframe(chan, control, skb);
3952 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3954 struct l2cap_chan *chan;
3955 struct sock *sk = NULL;
3960 chan = l2cap_get_chan_by_scid(conn, cid);
3962 BT_DBG("unknown cid 0x%4.4x", cid);
3968 BT_DBG("chan %p, len %d", chan, skb->len);
3970 if (chan->state != BT_CONNECTED)
3973 switch (chan->mode) {
3974 case L2CAP_MODE_BASIC:
3975 /* If socket recv buffers overflows we drop data here
3976 * which is *bad* because L2CAP has to be reliable.
3977 * But we don't have any other choice. L2CAP doesn't
3978 * provide flow control mechanism. */
3980 if (chan->imtu < skb->len)
3983 if (!chan->ops->recv(chan->data, skb))
3987 case L2CAP_MODE_ERTM:
3988 if (!sock_owned_by_user(sk)) {
3989 l2cap_ertm_data_rcv(sk, skb);
3991 if (sk_add_backlog(sk, skb))
3997 case L2CAP_MODE_STREAMING:
3998 control = __get_control(chan, skb->data);
3999 skb_pull(skb, __ctrl_size(chan));
4002 if (l2cap_check_fcs(chan, skb))
4005 if (__is_sar_start(chan, control))
4006 len -= L2CAP_SDULEN_SIZE;
4008 if (chan->fcs == L2CAP_FCS_CRC16)
4009 len -= L2CAP_FCS_SIZE;
4011 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4014 tx_seq = __get_txseq(chan, control);
4016 if (chan->expected_tx_seq != tx_seq) {
4017 /* Frame(s) missing - must discard partial SDU */
4018 kfree_skb(chan->sdu);
4020 chan->sdu_last_frag = NULL;
4023 /* TODO: Notify userland of missing data */
4026 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4028 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4029 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4034 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4048 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4050 struct sock *sk = NULL;
4051 struct l2cap_chan *chan;
4053 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4061 BT_DBG("sk %p, len %d", sk, skb->len);
4063 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4066 if (chan->imtu < skb->len)
4069 if (!chan->ops->recv(chan->data, skb))
4081 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4083 struct sock *sk = NULL;
4084 struct l2cap_chan *chan;
4086 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4094 BT_DBG("sk %p, len %d", sk, skb->len);
4096 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4099 if (chan->imtu < skb->len)
4102 if (!chan->ops->recv(chan->data, skb))
4114 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4116 struct l2cap_hdr *lh = (void *) skb->data;
4120 skb_pull(skb, L2CAP_HDR_SIZE);
4121 cid = __le16_to_cpu(lh->cid);
4122 len = __le16_to_cpu(lh->len);
4124 if (len != skb->len) {
4129 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4132 case L2CAP_CID_LE_SIGNALING:
4133 case L2CAP_CID_SIGNALING:
4134 l2cap_sig_channel(conn, skb);
4137 case L2CAP_CID_CONN_LESS:
4138 psm = get_unaligned_le16(skb->data);
4140 l2cap_conless_channel(conn, psm, skb);
4143 case L2CAP_CID_LE_DATA:
4144 l2cap_att_channel(conn, cid, skb);
4148 if (smp_sig_channel(conn, skb))
4149 l2cap_conn_del(conn->hcon, EACCES);
4153 l2cap_data_channel(conn, cid, skb);
4158 /* ---- L2CAP interface with lower layer (HCI) ---- */
4160 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4162 int exact = 0, lm1 = 0, lm2 = 0;
4163 struct l2cap_chan *c;
4165 if (type != ACL_LINK)
4168 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4170 /* Find listening sockets and check their link_mode */
4171 read_lock(&chan_list_lock);
4172 list_for_each_entry(c, &chan_list, global_l) {
4173 struct sock *sk = c->sk;
4175 if (c->state != BT_LISTEN)
4178 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4179 lm1 |= HCI_LM_ACCEPT;
4180 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4181 lm1 |= HCI_LM_MASTER;
4183 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4184 lm2 |= HCI_LM_ACCEPT;
4185 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4186 lm2 |= HCI_LM_MASTER;
4189 read_unlock(&chan_list_lock);
4191 return exact ? lm1 : lm2;
4194 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4196 struct l2cap_conn *conn;
4198 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4200 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4204 conn = l2cap_conn_add(hcon, status);
4206 l2cap_conn_ready(conn);
4208 l2cap_conn_del(hcon, bt_to_errno(status));
4213 static int l2cap_disconn_ind(struct hci_conn *hcon)
4215 struct l2cap_conn *conn = hcon->l2cap_data;
4217 BT_DBG("hcon %p", hcon);
4219 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4222 return conn->disc_reason;
4225 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4227 BT_DBG("hcon %p reason %d", hcon, reason);
4229 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4232 l2cap_conn_del(hcon, bt_to_errno(reason));
4237 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4239 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4242 if (encrypt == 0x00) {
4243 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4244 __clear_chan_timer(chan);
4245 __set_chan_timer(chan, HZ * 5);
4246 } else if (chan->sec_level == BT_SECURITY_HIGH)
4247 l2cap_chan_close(chan, ECONNREFUSED);
4249 if (chan->sec_level == BT_SECURITY_MEDIUM)
4250 __clear_chan_timer(chan);
4254 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4256 struct l2cap_conn *conn = hcon->l2cap_data;
4257 struct l2cap_chan *chan;
4262 BT_DBG("conn %p", conn);
4264 if (hcon->type == LE_LINK) {
4265 smp_distribute_keys(conn, 0);
4266 del_timer(&conn->security_timer);
4269 read_lock(&conn->chan_lock);
4271 list_for_each_entry(chan, &conn->chan_l, list) {
4272 struct sock *sk = chan->sk;
4276 BT_DBG("chan->scid %d", chan->scid);
4278 if (chan->scid == L2CAP_CID_LE_DATA) {
4279 if (!status && encrypt) {
4280 chan->sec_level = hcon->sec_level;
4281 l2cap_chan_ready(sk);
4288 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4293 if (!status && (chan->state == BT_CONNECTED ||
4294 chan->state == BT_CONFIG)) {
4295 l2cap_check_encryption(chan, encrypt);
4300 if (chan->state == BT_CONNECT) {
4302 struct l2cap_conn_req req;
4303 req.scid = cpu_to_le16(chan->scid);
4304 req.psm = chan->psm;
4306 chan->ident = l2cap_get_ident(conn);
4307 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4309 l2cap_send_cmd(conn, chan->ident,
4310 L2CAP_CONN_REQ, sizeof(req), &req);
4312 __clear_chan_timer(chan);
4313 __set_chan_timer(chan, HZ / 10);
4315 } else if (chan->state == BT_CONNECT2) {
4316 struct l2cap_conn_rsp rsp;
4320 if (bt_sk(sk)->defer_setup) {
4321 struct sock *parent = bt_sk(sk)->parent;
4322 res = L2CAP_CR_PEND;
4323 stat = L2CAP_CS_AUTHOR_PEND;
4325 parent->sk_data_ready(parent, 0);
4327 l2cap_state_change(chan, BT_CONFIG);
4328 res = L2CAP_CR_SUCCESS;
4329 stat = L2CAP_CS_NO_INFO;
4332 l2cap_state_change(chan, BT_DISCONN);
4333 __set_chan_timer(chan, HZ / 10);
4334 res = L2CAP_CR_SEC_BLOCK;
4335 stat = L2CAP_CS_NO_INFO;
4338 rsp.scid = cpu_to_le16(chan->dcid);
4339 rsp.dcid = cpu_to_le16(chan->scid);
4340 rsp.result = cpu_to_le16(res);
4341 rsp.status = cpu_to_le16(stat);
4342 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4349 read_unlock(&conn->chan_lock);
4354 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4356 struct l2cap_conn *conn = hcon->l2cap_data;
4359 conn = l2cap_conn_add(hcon, 0);
4364 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4366 if (!(flags & ACL_CONT)) {
4367 struct l2cap_hdr *hdr;
4368 struct l2cap_chan *chan;
4373 BT_ERR("Unexpected start frame (len %d)", skb->len);
4374 kfree_skb(conn->rx_skb);
4375 conn->rx_skb = NULL;
4377 l2cap_conn_unreliable(conn, ECOMM);
4380 /* Start fragment always begin with Basic L2CAP header */
4381 if (skb->len < L2CAP_HDR_SIZE) {
4382 BT_ERR("Frame is too short (len %d)", skb->len);
4383 l2cap_conn_unreliable(conn, ECOMM);
4387 hdr = (struct l2cap_hdr *) skb->data;
4388 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4389 cid = __le16_to_cpu(hdr->cid);
4391 if (len == skb->len) {
4392 /* Complete frame received */
4393 l2cap_recv_frame(conn, skb);
4397 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4399 if (skb->len > len) {
4400 BT_ERR("Frame is too long (len %d, expected len %d)",
4402 l2cap_conn_unreliable(conn, ECOMM);
4406 chan = l2cap_get_chan_by_scid(conn, cid);
4408 if (chan && chan->sk) {
4409 struct sock *sk = chan->sk;
4411 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4412 BT_ERR("Frame exceeding recv MTU (len %d, "
4416 l2cap_conn_unreliable(conn, ECOMM);
4422 /* Allocate skb for the complete frame (with header) */
4423 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4427 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4429 conn->rx_len = len - skb->len;
4431 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4433 if (!conn->rx_len) {
4434 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4435 l2cap_conn_unreliable(conn, ECOMM);
4439 if (skb->len > conn->rx_len) {
4440 BT_ERR("Fragment is too long (len %d, expected %d)",
4441 skb->len, conn->rx_len);
4442 kfree_skb(conn->rx_skb);
4443 conn->rx_skb = NULL;
4445 l2cap_conn_unreliable(conn, ECOMM);
4449 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4451 conn->rx_len -= skb->len;
4453 if (!conn->rx_len) {
4454 /* Complete frame received */
4455 l2cap_recv_frame(conn, conn->rx_skb);
4456 conn->rx_skb = NULL;
4465 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4467 struct l2cap_chan *c;
4469 read_lock_bh(&chan_list_lock);
4471 list_for_each_entry(c, &chan_list, global_l) {
4472 struct sock *sk = c->sk;
4474 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4475 batostr(&bt_sk(sk)->src),
4476 batostr(&bt_sk(sk)->dst),
4477 c->state, __le16_to_cpu(c->psm),
4478 c->scid, c->dcid, c->imtu, c->omtu,
4479 c->sec_level, c->mode);
4482 read_unlock_bh(&chan_list_lock);
4487 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4489 return single_open(file, l2cap_debugfs_show, inode->i_private);
4492 static const struct file_operations l2cap_debugfs_fops = {
4493 .open = l2cap_debugfs_open,
4495 .llseek = seq_lseek,
4496 .release = single_release,
4499 static struct dentry *l2cap_debugfs;
4501 static struct hci_proto l2cap_hci_proto = {
4503 .id = HCI_PROTO_L2CAP,
4504 .connect_ind = l2cap_connect_ind,
4505 .connect_cfm = l2cap_connect_cfm,
4506 .disconn_ind = l2cap_disconn_ind,
4507 .disconn_cfm = l2cap_disconn_cfm,
4508 .security_cfm = l2cap_security_cfm,
4509 .recv_acldata = l2cap_recv_acldata
4512 int __init l2cap_init(void)
4516 err = l2cap_init_sockets();
4520 err = hci_register_proto(&l2cap_hci_proto);
4522 BT_ERR("L2CAP protocol registration failed");
4523 bt_sock_unregister(BTPROTO_L2CAP);
4528 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4529 bt_debugfs, NULL, &l2cap_debugfs_fops);
4531 BT_ERR("Failed to create L2CAP debug file");
4537 l2cap_cleanup_sockets();
4541 void l2cap_exit(void)
4543 debugfs_remove(l2cap_debugfs);
4545 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4546 BT_ERR("L2CAP protocol unregistration failed");
4548 l2cap_cleanup_sockets();
4551 module_param(disable_ertm, bool, 0644);
4552 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4554 module_param(enable_hs, bool, 0644);
4555 MODULE_PARM_DESC(enable_hs, "Enable High Speed");