2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
45 #define LE_FLOWCTL_MAX_CREDITS 65535
49 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
50 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
55 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
56 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
58 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
59 u8 code, u8 ident, u16 dlen, void *data);
60 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
62 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
63 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
65 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
66 struct sk_buff_head *skbs, u8 event);
68 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
70 if (hcon->type == LE_LINK) {
71 if (type == ADDR_LE_DEV_PUBLIC)
72 return BDADDR_LE_PUBLIC;
74 return BDADDR_LE_RANDOM;
80 /* ---- L2CAP channels ---- */
82 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
87 list_for_each_entry(c, &conn->chan_l, list) {
94 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
99 list_for_each_entry(c, &conn->chan_l, list) {
106 /* Find channel with given SCID.
107 * Returns locked channel. */
108 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
111 struct l2cap_chan *c;
113 mutex_lock(&conn->chan_lock);
114 c = __l2cap_get_chan_by_scid(conn, cid);
117 mutex_unlock(&conn->chan_lock);
122 /* Find channel with given DCID.
123 * Returns locked channel.
125 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
128 struct l2cap_chan *c;
130 mutex_lock(&conn->chan_lock);
131 c = __l2cap_get_chan_by_dcid(conn, cid);
134 mutex_unlock(&conn->chan_lock);
139 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
142 struct l2cap_chan *c;
144 list_for_each_entry(c, &conn->chan_l, list) {
145 if (c->ident == ident)
151 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
154 struct l2cap_chan *c;
156 mutex_lock(&conn->chan_lock);
157 c = __l2cap_get_chan_by_ident(conn, ident);
160 mutex_unlock(&conn->chan_lock);
165 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
167 struct l2cap_chan *c;
169 list_for_each_entry(c, &chan_list, global_l) {
170 if (c->sport == psm && !bacmp(&c->src, src))
176 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
180 write_lock(&chan_list_lock);
182 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
195 for (p = 0x1001; p < 0x1100; p += 2)
196 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
197 chan->psm = cpu_to_le16(p);
198 chan->sport = cpu_to_le16(p);
205 write_unlock(&chan_list_lock);
209 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
211 write_lock(&chan_list_lock);
215 write_unlock(&chan_list_lock);
220 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
224 if (conn->hcon->type == LE_LINK)
225 dyn_end = L2CAP_CID_LE_DYN_END;
227 dyn_end = L2CAP_CID_DYN_END;
229 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
230 if (!__l2cap_get_chan_by_scid(conn, cid))
237 static void l2cap_state_change(struct l2cap_chan *chan, int state)
239 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
240 state_to_string(state));
243 chan->ops->state_change(chan, state, 0);
246 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
250 chan->ops->state_change(chan, chan->state, err);
253 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
255 chan->ops->state_change(chan, chan->state, err);
258 static void __set_retrans_timer(struct l2cap_chan *chan)
260 if (!delayed_work_pending(&chan->monitor_timer) &&
261 chan->retrans_timeout) {
262 l2cap_set_timer(chan, &chan->retrans_timer,
263 msecs_to_jiffies(chan->retrans_timeout));
267 static void __set_monitor_timer(struct l2cap_chan *chan)
269 __clear_retrans_timer(chan);
270 if (chan->monitor_timeout) {
271 l2cap_set_timer(chan, &chan->monitor_timer,
272 msecs_to_jiffies(chan->monitor_timeout));
276 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
281 skb_queue_walk(head, skb) {
282 if (bt_cb(skb)->control.txseq == seq)
289 /* ---- L2CAP sequence number lists ---- */
291 /* For ERTM, ordered lists of sequence numbers must be tracked for
292 * SREJ requests that are received and for frames that are to be
293 * retransmitted. These seq_list functions implement a singly-linked
294 * list in an array, where membership in the list can also be checked
295 * in constant time. Items can also be added to the tail of the list
296 * and removed from the head in constant time, without further memory
300 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
302 size_t alloc_size, i;
304 /* Allocated size is a power of 2 to map sequence numbers
305 * (which may be up to 14 bits) in to a smaller array that is
306 * sized for the negotiated ERTM transmit windows.
308 alloc_size = roundup_pow_of_two(size);
310 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
314 seq_list->mask = alloc_size - 1;
315 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
316 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
317 for (i = 0; i < alloc_size; i++)
318 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
323 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
325 kfree(seq_list->list);
328 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
331 /* Constant-time check for list membership */
332 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
335 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
337 u16 seq = seq_list->head;
338 u16 mask = seq_list->mask;
340 seq_list->head = seq_list->list[seq & mask];
341 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
343 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
344 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
345 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
351 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
355 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
358 for (i = 0; i <= seq_list->mask; i++)
359 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
361 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
365 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
367 u16 mask = seq_list->mask;
369 /* All appends happen in constant time */
371 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
374 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
375 seq_list->head = seq;
377 seq_list->list[seq_list->tail & mask] = seq;
379 seq_list->tail = seq;
380 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
383 static void l2cap_chan_timeout(struct work_struct *work)
385 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
387 struct l2cap_conn *conn = chan->conn;
390 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
392 mutex_lock(&conn->chan_lock);
393 l2cap_chan_lock(chan);
395 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
396 reason = ECONNREFUSED;
397 else if (chan->state == BT_CONNECT &&
398 chan->sec_level != BT_SECURITY_SDP)
399 reason = ECONNREFUSED;
403 l2cap_chan_close(chan, reason);
405 l2cap_chan_unlock(chan);
407 chan->ops->close(chan);
408 mutex_unlock(&conn->chan_lock);
410 l2cap_chan_put(chan);
413 struct l2cap_chan *l2cap_chan_create(void)
415 struct l2cap_chan *chan;
417 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
421 mutex_init(&chan->lock);
423 write_lock(&chan_list_lock);
424 list_add(&chan->global_l, &chan_list);
425 write_unlock(&chan_list_lock);
427 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
429 chan->state = BT_OPEN;
431 kref_init(&chan->kref);
433 /* This flag is cleared in l2cap_chan_ready() */
434 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
436 BT_DBG("chan %p", chan);
441 static void l2cap_chan_destroy(struct kref *kref)
443 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
445 BT_DBG("chan %p", chan);
447 write_lock(&chan_list_lock);
448 list_del(&chan->global_l);
449 write_unlock(&chan_list_lock);
454 void l2cap_chan_hold(struct l2cap_chan *c)
456 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
461 void l2cap_chan_put(struct l2cap_chan *c)
463 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
465 kref_put(&c->kref, l2cap_chan_destroy);
468 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
470 chan->fcs = L2CAP_FCS_CRC16;
471 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
472 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
473 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
474 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
475 chan->sec_level = BT_SECURITY_LOW;
477 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
480 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
483 chan->sdu_last_frag = NULL;
485 chan->tx_credits = 0;
486 chan->rx_credits = le_max_credits;
487 chan->mps = min_t(u16, chan->imtu, le_default_mps);
489 skb_queue_head_init(&chan->tx_q);
492 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
494 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
495 __le16_to_cpu(chan->psm), chan->dcid);
497 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
501 switch (chan->chan_type) {
502 case L2CAP_CHAN_CONN_ORIENTED:
503 /* Alloc CID for connection-oriented socket */
504 chan->scid = l2cap_alloc_cid(conn);
505 if (conn->hcon->type == ACL_LINK)
506 chan->omtu = L2CAP_DEFAULT_MTU;
509 case L2CAP_CHAN_CONN_LESS:
510 /* Connectionless socket */
511 chan->scid = L2CAP_CID_CONN_LESS;
512 chan->dcid = L2CAP_CID_CONN_LESS;
513 chan->omtu = L2CAP_DEFAULT_MTU;
516 case L2CAP_CHAN_FIXED:
517 /* Caller will set CID and CID specific MTU values */
521 /* Raw socket can send/recv signalling messages only */
522 chan->scid = L2CAP_CID_SIGNALING;
523 chan->dcid = L2CAP_CID_SIGNALING;
524 chan->omtu = L2CAP_DEFAULT_MTU;
527 chan->local_id = L2CAP_BESTEFFORT_ID;
528 chan->local_stype = L2CAP_SERV_BESTEFFORT;
529 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
530 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
531 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
532 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
534 l2cap_chan_hold(chan);
536 hci_conn_hold(conn->hcon);
538 list_add(&chan->list, &conn->chan_l);
541 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
543 mutex_lock(&conn->chan_lock);
544 __l2cap_chan_add(conn, chan);
545 mutex_unlock(&conn->chan_lock);
548 void l2cap_chan_del(struct l2cap_chan *chan, int err)
550 struct l2cap_conn *conn = chan->conn;
552 __clear_chan_timer(chan);
554 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
557 struct amp_mgr *mgr = conn->hcon->amp_mgr;
558 /* Delete from channel list */
559 list_del(&chan->list);
561 l2cap_chan_put(chan);
565 if (chan->scid != L2CAP_CID_A2MP)
566 hci_conn_drop(conn->hcon);
568 if (mgr && mgr->bredr_chan == chan)
569 mgr->bredr_chan = NULL;
572 if (chan->hs_hchan) {
573 struct hci_chan *hs_hchan = chan->hs_hchan;
575 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
576 amp_disconnect_logical_link(hs_hchan);
579 chan->ops->teardown(chan, err);
581 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
585 case L2CAP_MODE_BASIC:
588 case L2CAP_MODE_LE_FLOWCTL:
589 skb_queue_purge(&chan->tx_q);
592 case L2CAP_MODE_ERTM:
593 __clear_retrans_timer(chan);
594 __clear_monitor_timer(chan);
595 __clear_ack_timer(chan);
597 skb_queue_purge(&chan->srej_q);
599 l2cap_seq_list_free(&chan->srej_list);
600 l2cap_seq_list_free(&chan->retrans_list);
604 case L2CAP_MODE_STREAMING:
605 skb_queue_purge(&chan->tx_q);
612 void l2cap_conn_update_id_addr(struct hci_conn *hcon)
614 struct l2cap_conn *conn = hcon->l2cap_data;
615 struct l2cap_chan *chan;
617 mutex_lock(&conn->chan_lock);
619 list_for_each_entry(chan, &conn->chan_l, list) {
620 l2cap_chan_lock(chan);
621 bacpy(&chan->dst, &hcon->dst);
622 chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
623 l2cap_chan_unlock(chan);
626 mutex_unlock(&conn->chan_lock);
629 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
631 struct l2cap_conn *conn = chan->conn;
632 struct l2cap_le_conn_rsp rsp;
635 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
636 result = L2CAP_CR_AUTHORIZATION;
638 result = L2CAP_CR_BAD_PSM;
640 l2cap_state_change(chan, BT_DISCONN);
642 rsp.dcid = cpu_to_le16(chan->scid);
643 rsp.mtu = cpu_to_le16(chan->imtu);
644 rsp.mps = cpu_to_le16(chan->mps);
645 rsp.credits = cpu_to_le16(chan->rx_credits);
646 rsp.result = cpu_to_le16(result);
648 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
652 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
654 struct l2cap_conn *conn = chan->conn;
655 struct l2cap_conn_rsp rsp;
658 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
659 result = L2CAP_CR_SEC_BLOCK;
661 result = L2CAP_CR_BAD_PSM;
663 l2cap_state_change(chan, BT_DISCONN);
665 rsp.scid = cpu_to_le16(chan->dcid);
666 rsp.dcid = cpu_to_le16(chan->scid);
667 rsp.result = cpu_to_le16(result);
668 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
670 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
673 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
675 struct l2cap_conn *conn = chan->conn;
677 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
679 switch (chan->state) {
681 chan->ops->teardown(chan, 0);
686 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
687 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
688 l2cap_send_disconn_req(chan, reason);
690 l2cap_chan_del(chan, reason);
694 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
695 if (conn->hcon->type == ACL_LINK)
696 l2cap_chan_connect_reject(chan);
697 else if (conn->hcon->type == LE_LINK)
698 l2cap_chan_le_connect_reject(chan);
701 l2cap_chan_del(chan, reason);
706 l2cap_chan_del(chan, reason);
710 chan->ops->teardown(chan, 0);
715 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
717 switch (chan->chan_type) {
719 switch (chan->sec_level) {
720 case BT_SECURITY_HIGH:
721 case BT_SECURITY_FIPS:
722 return HCI_AT_DEDICATED_BONDING_MITM;
723 case BT_SECURITY_MEDIUM:
724 return HCI_AT_DEDICATED_BONDING;
726 return HCI_AT_NO_BONDING;
729 case L2CAP_CHAN_CONN_LESS:
730 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
731 if (chan->sec_level == BT_SECURITY_LOW)
732 chan->sec_level = BT_SECURITY_SDP;
734 if (chan->sec_level == BT_SECURITY_HIGH ||
735 chan->sec_level == BT_SECURITY_FIPS)
736 return HCI_AT_NO_BONDING_MITM;
738 return HCI_AT_NO_BONDING;
740 case L2CAP_CHAN_CONN_ORIENTED:
741 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
742 if (chan->sec_level == BT_SECURITY_LOW)
743 chan->sec_level = BT_SECURITY_SDP;
745 if (chan->sec_level == BT_SECURITY_HIGH ||
746 chan->sec_level == BT_SECURITY_FIPS)
747 return HCI_AT_NO_BONDING_MITM;
749 return HCI_AT_NO_BONDING;
753 switch (chan->sec_level) {
754 case BT_SECURITY_HIGH:
755 case BT_SECURITY_FIPS:
756 return HCI_AT_GENERAL_BONDING_MITM;
757 case BT_SECURITY_MEDIUM:
758 return HCI_AT_GENERAL_BONDING;
760 return HCI_AT_NO_BONDING;
766 /* Service level security */
767 int l2cap_chan_check_security(struct l2cap_chan *chan)
769 struct l2cap_conn *conn = chan->conn;
772 if (conn->hcon->type == LE_LINK)
773 return smp_conn_security(conn->hcon, chan->sec_level);
775 auth_type = l2cap_get_auth_type(chan);
777 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
780 static u8 l2cap_get_ident(struct l2cap_conn *conn)
784 /* Get next available identificator.
785 * 1 - 128 are used by kernel.
786 * 129 - 199 are reserved.
787 * 200 - 254 are used by utilities like l2ping, etc.
790 spin_lock(&conn->lock);
792 if (++conn->tx_ident > 128)
797 spin_unlock(&conn->lock);
802 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
805 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
808 BT_DBG("code 0x%2.2x", code);
813 if (lmp_no_flush_capable(conn->hcon->hdev))
814 flags = ACL_START_NO_FLUSH;
818 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
819 skb->priority = HCI_PRIO_MAX;
821 hci_send_acl(conn->hchan, skb, flags);
824 static bool __chan_is_moving(struct l2cap_chan *chan)
826 return chan->move_state != L2CAP_MOVE_STABLE &&
827 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
830 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
832 struct hci_conn *hcon = chan->conn->hcon;
835 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
838 if (chan->hs_hcon && !__chan_is_moving(chan)) {
840 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
847 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
848 lmp_no_flush_capable(hcon->hdev))
849 flags = ACL_START_NO_FLUSH;
853 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
854 hci_send_acl(chan->conn->hchan, skb, flags);
857 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
859 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
860 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
862 if (enh & L2CAP_CTRL_FRAME_TYPE) {
865 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
866 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
873 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
874 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
881 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
883 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
884 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
886 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
889 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
890 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
897 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
898 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
905 static inline void __unpack_control(struct l2cap_chan *chan,
908 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
909 __unpack_extended_control(get_unaligned_le32(skb->data),
910 &bt_cb(skb)->control);
911 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
913 __unpack_enhanced_control(get_unaligned_le16(skb->data),
914 &bt_cb(skb)->control);
915 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
919 static u32 __pack_extended_control(struct l2cap_ctrl *control)
923 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
924 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
926 if (control->sframe) {
927 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
928 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
929 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
931 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
932 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
938 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
942 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
943 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
945 if (control->sframe) {
946 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
947 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
948 packed |= L2CAP_CTRL_FRAME_TYPE;
950 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
951 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
957 static inline void __pack_control(struct l2cap_chan *chan,
958 struct l2cap_ctrl *control,
961 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
962 put_unaligned_le32(__pack_extended_control(control),
963 skb->data + L2CAP_HDR_SIZE);
965 put_unaligned_le16(__pack_enhanced_control(control),
966 skb->data + L2CAP_HDR_SIZE);
970 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
972 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
973 return L2CAP_EXT_HDR_SIZE;
975 return L2CAP_ENH_HDR_SIZE;
978 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
982 struct l2cap_hdr *lh;
983 int hlen = __ertm_hdr_size(chan);
985 if (chan->fcs == L2CAP_FCS_CRC16)
986 hlen += L2CAP_FCS_SIZE;
988 skb = bt_skb_alloc(hlen, GFP_KERNEL);
991 return ERR_PTR(-ENOMEM);
993 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
994 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
995 lh->cid = cpu_to_le16(chan->dcid);
997 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
998 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1000 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1002 if (chan->fcs == L2CAP_FCS_CRC16) {
1003 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1004 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1007 skb->priority = HCI_PRIO_MAX;
1011 static void l2cap_send_sframe(struct l2cap_chan *chan,
1012 struct l2cap_ctrl *control)
1014 struct sk_buff *skb;
1017 BT_DBG("chan %p, control %p", chan, control);
1019 if (!control->sframe)
1022 if (__chan_is_moving(chan))
1025 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1029 if (control->super == L2CAP_SUPER_RR)
1030 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1031 else if (control->super == L2CAP_SUPER_RNR)
1032 set_bit(CONN_RNR_SENT, &chan->conn_state);
1034 if (control->super != L2CAP_SUPER_SREJ) {
1035 chan->last_acked_seq = control->reqseq;
1036 __clear_ack_timer(chan);
1039 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1040 control->final, control->poll, control->super);
1042 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1043 control_field = __pack_extended_control(control);
1045 control_field = __pack_enhanced_control(control);
1047 skb = l2cap_create_sframe_pdu(chan, control_field);
1049 l2cap_do_send(chan, skb);
1052 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1054 struct l2cap_ctrl control;
1056 BT_DBG("chan %p, poll %d", chan, poll);
1058 memset(&control, 0, sizeof(control));
1060 control.poll = poll;
1062 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1063 control.super = L2CAP_SUPER_RNR;
1065 control.super = L2CAP_SUPER_RR;
1067 control.reqseq = chan->buffer_seq;
1068 l2cap_send_sframe(chan, &control);
1071 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1073 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1076 static bool __amp_capable(struct l2cap_chan *chan)
1078 struct l2cap_conn *conn = chan->conn;
1079 struct hci_dev *hdev;
1080 bool amp_available = false;
1082 if (!conn->hs_enabled)
1085 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1088 read_lock(&hci_dev_list_lock);
1089 list_for_each_entry(hdev, &hci_dev_list, list) {
1090 if (hdev->amp_type != AMP_TYPE_BREDR &&
1091 test_bit(HCI_UP, &hdev->flags)) {
1092 amp_available = true;
1096 read_unlock(&hci_dev_list_lock);
1098 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1099 return amp_available;
1104 static bool l2cap_check_efs(struct l2cap_chan *chan)
1106 /* Check EFS parameters */
1110 void l2cap_send_conn_req(struct l2cap_chan *chan)
1112 struct l2cap_conn *conn = chan->conn;
1113 struct l2cap_conn_req req;
1115 req.scid = cpu_to_le16(chan->scid);
1116 req.psm = chan->psm;
1118 chan->ident = l2cap_get_ident(conn);
1120 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1122 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1125 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1127 struct l2cap_create_chan_req req;
1128 req.scid = cpu_to_le16(chan->scid);
1129 req.psm = chan->psm;
1130 req.amp_id = amp_id;
1132 chan->ident = l2cap_get_ident(chan->conn);
1134 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1138 static void l2cap_move_setup(struct l2cap_chan *chan)
1140 struct sk_buff *skb;
1142 BT_DBG("chan %p", chan);
1144 if (chan->mode != L2CAP_MODE_ERTM)
1147 __clear_retrans_timer(chan);
1148 __clear_monitor_timer(chan);
1149 __clear_ack_timer(chan);
1151 chan->retry_count = 0;
1152 skb_queue_walk(&chan->tx_q, skb) {
1153 if (bt_cb(skb)->control.retries)
1154 bt_cb(skb)->control.retries = 1;
1159 chan->expected_tx_seq = chan->buffer_seq;
1161 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1162 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1163 l2cap_seq_list_clear(&chan->retrans_list);
1164 l2cap_seq_list_clear(&chan->srej_list);
1165 skb_queue_purge(&chan->srej_q);
1167 chan->tx_state = L2CAP_TX_STATE_XMIT;
1168 chan->rx_state = L2CAP_RX_STATE_MOVE;
1170 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1173 static void l2cap_move_done(struct l2cap_chan *chan)
1175 u8 move_role = chan->move_role;
1176 BT_DBG("chan %p", chan);
1178 chan->move_state = L2CAP_MOVE_STABLE;
1179 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1181 if (chan->mode != L2CAP_MODE_ERTM)
1184 switch (move_role) {
1185 case L2CAP_MOVE_ROLE_INITIATOR:
1186 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1187 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1189 case L2CAP_MOVE_ROLE_RESPONDER:
1190 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1195 static void l2cap_chan_ready(struct l2cap_chan *chan)
1197 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1198 chan->conf_state = 0;
1199 __clear_chan_timer(chan);
1201 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1202 chan->ops->suspend(chan);
1204 chan->state = BT_CONNECTED;
1206 chan->ops->ready(chan);
1209 static void l2cap_le_connect(struct l2cap_chan *chan)
1211 struct l2cap_conn *conn = chan->conn;
1212 struct l2cap_le_conn_req req;
1214 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1217 req.psm = chan->psm;
1218 req.scid = cpu_to_le16(chan->scid);
1219 req.mtu = cpu_to_le16(chan->imtu);
1220 req.mps = cpu_to_le16(chan->mps);
1221 req.credits = cpu_to_le16(chan->rx_credits);
1223 chan->ident = l2cap_get_ident(conn);
1225 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1229 static void l2cap_le_start(struct l2cap_chan *chan)
1231 struct l2cap_conn *conn = chan->conn;
1233 if (!smp_conn_security(conn->hcon, chan->sec_level))
1237 l2cap_chan_ready(chan);
1241 if (chan->state == BT_CONNECT)
1242 l2cap_le_connect(chan);
1245 static void l2cap_start_connection(struct l2cap_chan *chan)
1247 if (__amp_capable(chan)) {
1248 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1249 a2mp_discover_amp(chan);
1250 } else if (chan->conn->hcon->type == LE_LINK) {
1251 l2cap_le_start(chan);
1253 l2cap_send_conn_req(chan);
1257 static void l2cap_do_start(struct l2cap_chan *chan)
1259 struct l2cap_conn *conn = chan->conn;
1261 if (conn->hcon->type == LE_LINK) {
1262 l2cap_le_start(chan);
1266 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1267 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1270 if (l2cap_chan_check_security(chan) &&
1271 __l2cap_no_conn_pending(chan)) {
1272 l2cap_start_connection(chan);
1275 struct l2cap_info_req req;
1276 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1278 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1279 conn->info_ident = l2cap_get_ident(conn);
1281 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1283 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1288 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1290 u32 local_feat_mask = l2cap_feat_mask;
1292 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1295 case L2CAP_MODE_ERTM:
1296 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1297 case L2CAP_MODE_STREAMING:
1298 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1304 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1306 struct l2cap_conn *conn = chan->conn;
1307 struct l2cap_disconn_req req;
1312 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1313 __clear_retrans_timer(chan);
1314 __clear_monitor_timer(chan);
1315 __clear_ack_timer(chan);
1318 if (chan->scid == L2CAP_CID_A2MP) {
1319 l2cap_state_change(chan, BT_DISCONN);
1323 req.dcid = cpu_to_le16(chan->dcid);
1324 req.scid = cpu_to_le16(chan->scid);
1325 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1328 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1331 /* ---- L2CAP connections ---- */
1332 static void l2cap_conn_start(struct l2cap_conn *conn)
1334 struct l2cap_chan *chan, *tmp;
1336 BT_DBG("conn %p", conn);
1338 mutex_lock(&conn->chan_lock);
1340 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1341 l2cap_chan_lock(chan);
1343 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1344 l2cap_chan_unlock(chan);
1348 if (chan->state == BT_CONNECT) {
1349 if (!l2cap_chan_check_security(chan) ||
1350 !__l2cap_no_conn_pending(chan)) {
1351 l2cap_chan_unlock(chan);
1355 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1356 && test_bit(CONF_STATE2_DEVICE,
1357 &chan->conf_state)) {
1358 l2cap_chan_close(chan, ECONNRESET);
1359 l2cap_chan_unlock(chan);
1363 l2cap_start_connection(chan);
1365 } else if (chan->state == BT_CONNECT2) {
1366 struct l2cap_conn_rsp rsp;
1368 rsp.scid = cpu_to_le16(chan->dcid);
1369 rsp.dcid = cpu_to_le16(chan->scid);
1371 if (l2cap_chan_check_security(chan)) {
1372 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1373 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1374 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1375 chan->ops->defer(chan);
1378 l2cap_state_change(chan, BT_CONFIG);
1379 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1380 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1383 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1384 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1387 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1390 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1391 rsp.result != L2CAP_CR_SUCCESS) {
1392 l2cap_chan_unlock(chan);
1396 set_bit(CONF_REQ_SENT, &chan->conf_state);
1397 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1398 l2cap_build_conf_req(chan, buf), buf);
1399 chan->num_conf_req++;
1402 l2cap_chan_unlock(chan);
1405 mutex_unlock(&conn->chan_lock);
1408 /* Find socket with cid and source/destination bdaddr.
1409 * Returns closest match, locked.
1411 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1415 struct l2cap_chan *c, *c1 = NULL;
1417 read_lock(&chan_list_lock);
1419 list_for_each_entry(c, &chan_list, global_l) {
1420 if (state && c->state != state)
1423 if (c->scid == cid) {
1424 int src_match, dst_match;
1425 int src_any, dst_any;
1428 src_match = !bacmp(&c->src, src);
1429 dst_match = !bacmp(&c->dst, dst);
1430 if (src_match && dst_match) {
1431 read_unlock(&chan_list_lock);
1436 src_any = !bacmp(&c->src, BDADDR_ANY);
1437 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1438 if ((src_match && dst_any) || (src_any && dst_match) ||
1439 (src_any && dst_any))
1444 read_unlock(&chan_list_lock);
1449 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1451 struct hci_conn *hcon = conn->hcon;
1452 struct l2cap_chan *chan, *pchan;
1457 bt_6lowpan_add_conn(conn);
1459 /* Check if we have socket listening on cid */
1460 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1461 &hcon->src, &hcon->dst);
1465 /* Client ATT sockets should override the server one */
1466 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1469 dst_type = bdaddr_type(hcon, hcon->dst_type);
1471 /* If device is blocked, do not create a channel for it */
1472 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1475 l2cap_chan_lock(pchan);
1477 chan = pchan->ops->new_connection(pchan);
1481 bacpy(&chan->src, &hcon->src);
1482 bacpy(&chan->dst, &hcon->dst);
1483 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1484 chan->dst_type = dst_type;
1486 __l2cap_chan_add(conn, chan);
1489 l2cap_chan_unlock(pchan);
1492 static void l2cap_conn_ready(struct l2cap_conn *conn)
1494 struct l2cap_chan *chan;
1495 struct hci_conn *hcon = conn->hcon;
1497 BT_DBG("conn %p", conn);
1499 /* For outgoing pairing which doesn't necessarily have an
1500 * associated socket (e.g. mgmt_pair_device).
1502 if (hcon->out && hcon->type == LE_LINK)
1503 smp_conn_security(hcon, hcon->pending_sec_level);
1505 mutex_lock(&conn->chan_lock);
1507 if (hcon->type == LE_LINK)
1508 l2cap_le_conn_ready(conn);
1510 list_for_each_entry(chan, &conn->chan_l, list) {
1512 l2cap_chan_lock(chan);
1514 if (chan->scid == L2CAP_CID_A2MP) {
1515 l2cap_chan_unlock(chan);
1519 if (hcon->type == LE_LINK) {
1520 l2cap_le_start(chan);
1521 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1522 l2cap_chan_ready(chan);
1524 } else if (chan->state == BT_CONNECT) {
1525 l2cap_do_start(chan);
1528 l2cap_chan_unlock(chan);
1531 mutex_unlock(&conn->chan_lock);
1533 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1536 /* Notify sockets that we cannot guaranty reliability anymore */
1537 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1539 struct l2cap_chan *chan;
1541 BT_DBG("conn %p", conn);
1543 mutex_lock(&conn->chan_lock);
1545 list_for_each_entry(chan, &conn->chan_l, list) {
1546 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1547 l2cap_chan_set_err(chan, err);
1550 mutex_unlock(&conn->chan_lock);
1553 static void l2cap_info_timeout(struct work_struct *work)
1555 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1558 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1559 conn->info_ident = 0;
1561 l2cap_conn_start(conn);
1566 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1567 * callback is called during registration. The ->remove callback is called
1568 * during unregistration.
1569 * An l2cap_user object can either be explicitly unregistered or when the
1570 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1571 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1572 * External modules must own a reference to the l2cap_conn object if they intend
1573 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1574 * any time if they don't.
1577 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1579 struct hci_dev *hdev = conn->hcon->hdev;
1582 /* We need to check whether l2cap_conn is registered. If it is not, we
1583 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1584 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1585 * relies on the parent hci_conn object to be locked. This itself relies
1586 * on the hci_dev object to be locked. So we must lock the hci device
1591 if (user->list.next || user->list.prev) {
1596 /* conn->hchan is NULL after l2cap_conn_del() was called */
1602 ret = user->probe(conn, user);
1606 list_add(&user->list, &conn->users);
1610 hci_dev_unlock(hdev);
1613 EXPORT_SYMBOL(l2cap_register_user);
1615 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1617 struct hci_dev *hdev = conn->hcon->hdev;
1621 if (!user->list.next || !user->list.prev)
1624 list_del(&user->list);
1625 user->list.next = NULL;
1626 user->list.prev = NULL;
1627 user->remove(conn, user);
1630 hci_dev_unlock(hdev);
1632 EXPORT_SYMBOL(l2cap_unregister_user);
1634 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1636 struct l2cap_user *user;
1638 while (!list_empty(&conn->users)) {
1639 user = list_first_entry(&conn->users, struct l2cap_user, list);
1640 list_del(&user->list);
1641 user->list.next = NULL;
1642 user->list.prev = NULL;
1643 user->remove(conn, user);
1647 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1649 struct l2cap_conn *conn = hcon->l2cap_data;
1650 struct l2cap_chan *chan, *l;
1655 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1657 kfree_skb(conn->rx_skb);
1659 skb_queue_purge(&conn->pending_rx);
1660 flush_work(&conn->pending_rx_work);
1662 l2cap_unregister_all_users(conn);
1664 mutex_lock(&conn->chan_lock);
1667 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1668 l2cap_chan_hold(chan);
1669 l2cap_chan_lock(chan);
1671 l2cap_chan_del(chan, err);
1673 l2cap_chan_unlock(chan);
1675 chan->ops->close(chan);
1676 l2cap_chan_put(chan);
1679 mutex_unlock(&conn->chan_lock);
1681 hci_chan_del(conn->hchan);
1683 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1684 cancel_delayed_work_sync(&conn->info_timer);
1686 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1687 cancel_delayed_work_sync(&conn->security_timer);
1688 smp_chan_destroy(conn);
1691 hcon->l2cap_data = NULL;
1693 l2cap_conn_put(conn);
1696 static void security_timeout(struct work_struct *work)
1698 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1699 security_timer.work);
1701 BT_DBG("conn %p", conn);
1703 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1704 smp_chan_destroy(conn);
1705 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1709 static void l2cap_conn_free(struct kref *ref)
1711 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1713 hci_conn_put(conn->hcon);
1717 void l2cap_conn_get(struct l2cap_conn *conn)
1719 kref_get(&conn->ref);
1721 EXPORT_SYMBOL(l2cap_conn_get);
1723 void l2cap_conn_put(struct l2cap_conn *conn)
1725 kref_put(&conn->ref, l2cap_conn_free);
1727 EXPORT_SYMBOL(l2cap_conn_put);
1729 /* ---- Socket interface ---- */
1731 /* Find socket with psm and source / destination bdaddr.
1732 * Returns closest match.
1734 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1739 struct l2cap_chan *c, *c1 = NULL;
1741 read_lock(&chan_list_lock);
1743 list_for_each_entry(c, &chan_list, global_l) {
1744 if (state && c->state != state)
1747 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1750 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1753 if (c->psm == psm) {
1754 int src_match, dst_match;
1755 int src_any, dst_any;
1758 src_match = !bacmp(&c->src, src);
1759 dst_match = !bacmp(&c->dst, dst);
1760 if (src_match && dst_match) {
1761 read_unlock(&chan_list_lock);
1766 src_any = !bacmp(&c->src, BDADDR_ANY);
1767 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1768 if ((src_match && dst_any) || (src_any && dst_match) ||
1769 (src_any && dst_any))
1774 read_unlock(&chan_list_lock);
1779 static void l2cap_monitor_timeout(struct work_struct *work)
1781 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1782 monitor_timer.work);
1784 BT_DBG("chan %p", chan);
1786 l2cap_chan_lock(chan);
1789 l2cap_chan_unlock(chan);
1790 l2cap_chan_put(chan);
1794 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1796 l2cap_chan_unlock(chan);
1797 l2cap_chan_put(chan);
1800 static void l2cap_retrans_timeout(struct work_struct *work)
1802 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1803 retrans_timer.work);
1805 BT_DBG("chan %p", chan);
1807 l2cap_chan_lock(chan);
1810 l2cap_chan_unlock(chan);
1811 l2cap_chan_put(chan);
1815 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1816 l2cap_chan_unlock(chan);
1817 l2cap_chan_put(chan);
1820 static void l2cap_streaming_send(struct l2cap_chan *chan,
1821 struct sk_buff_head *skbs)
1823 struct sk_buff *skb;
1824 struct l2cap_ctrl *control;
1826 BT_DBG("chan %p, skbs %p", chan, skbs);
1828 if (__chan_is_moving(chan))
1831 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1833 while (!skb_queue_empty(&chan->tx_q)) {
1835 skb = skb_dequeue(&chan->tx_q);
1837 bt_cb(skb)->control.retries = 1;
1838 control = &bt_cb(skb)->control;
1840 control->reqseq = 0;
1841 control->txseq = chan->next_tx_seq;
1843 __pack_control(chan, control, skb);
1845 if (chan->fcs == L2CAP_FCS_CRC16) {
1846 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1847 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1850 l2cap_do_send(chan, skb);
1852 BT_DBG("Sent txseq %u", control->txseq);
1854 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1855 chan->frames_sent++;
1859 static int l2cap_ertm_send(struct l2cap_chan *chan)
1861 struct sk_buff *skb, *tx_skb;
1862 struct l2cap_ctrl *control;
1865 BT_DBG("chan %p", chan);
1867 if (chan->state != BT_CONNECTED)
1870 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1873 if (__chan_is_moving(chan))
1876 while (chan->tx_send_head &&
1877 chan->unacked_frames < chan->remote_tx_win &&
1878 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1880 skb = chan->tx_send_head;
1882 bt_cb(skb)->control.retries = 1;
1883 control = &bt_cb(skb)->control;
1885 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1888 control->reqseq = chan->buffer_seq;
1889 chan->last_acked_seq = chan->buffer_seq;
1890 control->txseq = chan->next_tx_seq;
1892 __pack_control(chan, control, skb);
1894 if (chan->fcs == L2CAP_FCS_CRC16) {
1895 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1896 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1899 /* Clone after data has been modified. Data is assumed to be
1900 read-only (for locking purposes) on cloned sk_buffs.
1902 tx_skb = skb_clone(skb, GFP_KERNEL);
1907 __set_retrans_timer(chan);
1909 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1910 chan->unacked_frames++;
1911 chan->frames_sent++;
1914 if (skb_queue_is_last(&chan->tx_q, skb))
1915 chan->tx_send_head = NULL;
1917 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1919 l2cap_do_send(chan, tx_skb);
1920 BT_DBG("Sent txseq %u", control->txseq);
1923 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1924 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1929 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1931 struct l2cap_ctrl control;
1932 struct sk_buff *skb;
1933 struct sk_buff *tx_skb;
1936 BT_DBG("chan %p", chan);
1938 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1941 if (__chan_is_moving(chan))
1944 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1945 seq = l2cap_seq_list_pop(&chan->retrans_list);
1947 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1949 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1954 bt_cb(skb)->control.retries++;
1955 control = bt_cb(skb)->control;
1957 if (chan->max_tx != 0 &&
1958 bt_cb(skb)->control.retries > chan->max_tx) {
1959 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1960 l2cap_send_disconn_req(chan, ECONNRESET);
1961 l2cap_seq_list_clear(&chan->retrans_list);
1965 control.reqseq = chan->buffer_seq;
1966 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1971 if (skb_cloned(skb)) {
1972 /* Cloned sk_buffs are read-only, so we need a
1975 tx_skb = skb_copy(skb, GFP_KERNEL);
1977 tx_skb = skb_clone(skb, GFP_KERNEL);
1981 l2cap_seq_list_clear(&chan->retrans_list);
1985 /* Update skb contents */
1986 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1987 put_unaligned_le32(__pack_extended_control(&control),
1988 tx_skb->data + L2CAP_HDR_SIZE);
1990 put_unaligned_le16(__pack_enhanced_control(&control),
1991 tx_skb->data + L2CAP_HDR_SIZE);
1994 if (chan->fcs == L2CAP_FCS_CRC16) {
1995 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1996 put_unaligned_le16(fcs, skb_put(tx_skb,
2000 l2cap_do_send(chan, tx_skb);
2002 BT_DBG("Resent txseq %d", control.txseq);
2004 chan->last_acked_seq = chan->buffer_seq;
2008 static void l2cap_retransmit(struct l2cap_chan *chan,
2009 struct l2cap_ctrl *control)
2011 BT_DBG("chan %p, control %p", chan, control);
2013 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2014 l2cap_ertm_resend(chan);
2017 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2018 struct l2cap_ctrl *control)
2020 struct sk_buff *skb;
2022 BT_DBG("chan %p, control %p", chan, control);
2025 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2027 l2cap_seq_list_clear(&chan->retrans_list);
2029 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2032 if (chan->unacked_frames) {
2033 skb_queue_walk(&chan->tx_q, skb) {
2034 if (bt_cb(skb)->control.txseq == control->reqseq ||
2035 skb == chan->tx_send_head)
2039 skb_queue_walk_from(&chan->tx_q, skb) {
2040 if (skb == chan->tx_send_head)
2043 l2cap_seq_list_append(&chan->retrans_list,
2044 bt_cb(skb)->control.txseq);
2047 l2cap_ertm_resend(chan);
2051 static void l2cap_send_ack(struct l2cap_chan *chan)
2053 struct l2cap_ctrl control;
2054 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2055 chan->last_acked_seq);
2058 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2059 chan, chan->last_acked_seq, chan->buffer_seq);
2061 memset(&control, 0, sizeof(control));
2064 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2065 chan->rx_state == L2CAP_RX_STATE_RECV) {
2066 __clear_ack_timer(chan);
2067 control.super = L2CAP_SUPER_RNR;
2068 control.reqseq = chan->buffer_seq;
2069 l2cap_send_sframe(chan, &control);
2071 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2072 l2cap_ertm_send(chan);
2073 /* If any i-frames were sent, they included an ack */
2074 if (chan->buffer_seq == chan->last_acked_seq)
2078 /* Ack now if the window is 3/4ths full.
2079 * Calculate without mul or div
2081 threshold = chan->ack_win;
2082 threshold += threshold << 1;
2085 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2088 if (frames_to_ack >= threshold) {
2089 __clear_ack_timer(chan);
2090 control.super = L2CAP_SUPER_RR;
2091 control.reqseq = chan->buffer_seq;
2092 l2cap_send_sframe(chan, &control);
2097 __set_ack_timer(chan);
2101 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2102 struct msghdr *msg, int len,
2103 int count, struct sk_buff *skb)
2105 struct l2cap_conn *conn = chan->conn;
2106 struct sk_buff **frag;
2109 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2115 /* Continuation fragments (no L2CAP header) */
2116 frag = &skb_shinfo(skb)->frag_list;
2118 struct sk_buff *tmp;
2120 count = min_t(unsigned int, conn->mtu, len);
2122 tmp = chan->ops->alloc_skb(chan, count,
2123 msg->msg_flags & MSG_DONTWAIT);
2125 return PTR_ERR(tmp);
2129 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2132 (*frag)->priority = skb->priority;
2137 skb->len += (*frag)->len;
2138 skb->data_len += (*frag)->len;
2140 frag = &(*frag)->next;
2146 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2147 struct msghdr *msg, size_t len,
2150 struct l2cap_conn *conn = chan->conn;
2151 struct sk_buff *skb;
2152 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2153 struct l2cap_hdr *lh;
2155 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2156 __le16_to_cpu(chan->psm), len, priority);
2158 count = min_t(unsigned int, (conn->mtu - hlen), len);
2160 skb = chan->ops->alloc_skb(chan, count + hlen,
2161 msg->msg_flags & MSG_DONTWAIT);
2165 skb->priority = priority;
2167 /* Create L2CAP header */
2168 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2169 lh->cid = cpu_to_le16(chan->dcid);
2170 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2171 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2173 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2174 if (unlikely(err < 0)) {
2176 return ERR_PTR(err);
2181 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2182 struct msghdr *msg, size_t len,
2185 struct l2cap_conn *conn = chan->conn;
2186 struct sk_buff *skb;
2188 struct l2cap_hdr *lh;
2190 BT_DBG("chan %p len %zu", chan, len);
2192 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2194 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2195 msg->msg_flags & MSG_DONTWAIT);
2199 skb->priority = priority;
2201 /* Create L2CAP header */
2202 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2203 lh->cid = cpu_to_le16(chan->dcid);
2204 lh->len = cpu_to_le16(len);
2206 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2207 if (unlikely(err < 0)) {
2209 return ERR_PTR(err);
2214 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2215 struct msghdr *msg, size_t len,
2218 struct l2cap_conn *conn = chan->conn;
2219 struct sk_buff *skb;
2220 int err, count, hlen;
2221 struct l2cap_hdr *lh;
2223 BT_DBG("chan %p len %zu", chan, len);
2226 return ERR_PTR(-ENOTCONN);
2228 hlen = __ertm_hdr_size(chan);
2231 hlen += L2CAP_SDULEN_SIZE;
2233 if (chan->fcs == L2CAP_FCS_CRC16)
2234 hlen += L2CAP_FCS_SIZE;
2236 count = min_t(unsigned int, (conn->mtu - hlen), len);
2238 skb = chan->ops->alloc_skb(chan, count + hlen,
2239 msg->msg_flags & MSG_DONTWAIT);
2243 /* Create L2CAP header */
2244 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2245 lh->cid = cpu_to_le16(chan->dcid);
2246 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2248 /* Control header is populated later */
2249 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2250 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2252 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2255 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2257 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2258 if (unlikely(err < 0)) {
2260 return ERR_PTR(err);
2263 bt_cb(skb)->control.fcs = chan->fcs;
2264 bt_cb(skb)->control.retries = 0;
2268 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2269 struct sk_buff_head *seg_queue,
2270 struct msghdr *msg, size_t len)
2272 struct sk_buff *skb;
2277 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2279 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2280 * so fragmented skbs are not used. The HCI layer's handling
2281 * of fragmented skbs is not compatible with ERTM's queueing.
2284 /* PDU size is derived from the HCI MTU */
2285 pdu_len = chan->conn->mtu;
2287 /* Constrain PDU size for BR/EDR connections */
2289 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2291 /* Adjust for largest possible L2CAP overhead. */
2293 pdu_len -= L2CAP_FCS_SIZE;
2295 pdu_len -= __ertm_hdr_size(chan);
2297 /* Remote device may have requested smaller PDUs */
2298 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2300 if (len <= pdu_len) {
2301 sar = L2CAP_SAR_UNSEGMENTED;
2305 sar = L2CAP_SAR_START;
2307 pdu_len -= L2CAP_SDULEN_SIZE;
2311 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2314 __skb_queue_purge(seg_queue);
2315 return PTR_ERR(skb);
2318 bt_cb(skb)->control.sar = sar;
2319 __skb_queue_tail(seg_queue, skb);
2324 pdu_len += L2CAP_SDULEN_SIZE;
2327 if (len <= pdu_len) {
2328 sar = L2CAP_SAR_END;
2331 sar = L2CAP_SAR_CONTINUE;
2338 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2340 size_t len, u16 sdulen)
2342 struct l2cap_conn *conn = chan->conn;
2343 struct sk_buff *skb;
2344 int err, count, hlen;
2345 struct l2cap_hdr *lh;
2347 BT_DBG("chan %p len %zu", chan, len);
2350 return ERR_PTR(-ENOTCONN);
2352 hlen = L2CAP_HDR_SIZE;
2355 hlen += L2CAP_SDULEN_SIZE;
2357 count = min_t(unsigned int, (conn->mtu - hlen), len);
2359 skb = chan->ops->alloc_skb(chan, count + hlen,
2360 msg->msg_flags & MSG_DONTWAIT);
2364 /* Create L2CAP header */
2365 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2366 lh->cid = cpu_to_le16(chan->dcid);
2367 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2370 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2372 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2373 if (unlikely(err < 0)) {
2375 return ERR_PTR(err);
2381 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2382 struct sk_buff_head *seg_queue,
2383 struct msghdr *msg, size_t len)
2385 struct sk_buff *skb;
2389 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2391 pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
2393 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2396 pdu_len -= L2CAP_SDULEN_SIZE;
2402 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2404 __skb_queue_purge(seg_queue);
2405 return PTR_ERR(skb);
2408 __skb_queue_tail(seg_queue, skb);
2414 pdu_len += L2CAP_SDULEN_SIZE;
2421 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2424 struct sk_buff *skb;
2426 struct sk_buff_head seg_queue;
2431 /* Connectionless channel */
2432 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2433 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2435 return PTR_ERR(skb);
2437 l2cap_do_send(chan, skb);
2441 switch (chan->mode) {
2442 case L2CAP_MODE_LE_FLOWCTL:
2443 /* Check outgoing MTU */
2444 if (len > chan->omtu)
2447 if (!chan->tx_credits)
2450 __skb_queue_head_init(&seg_queue);
2452 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2454 if (chan->state != BT_CONNECTED) {
2455 __skb_queue_purge(&seg_queue);
2462 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2464 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2465 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2469 if (!chan->tx_credits)
2470 chan->ops->suspend(chan);
2476 case L2CAP_MODE_BASIC:
2477 /* Check outgoing MTU */
2478 if (len > chan->omtu)
2481 /* Create a basic PDU */
2482 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2484 return PTR_ERR(skb);
2486 l2cap_do_send(chan, skb);
2490 case L2CAP_MODE_ERTM:
2491 case L2CAP_MODE_STREAMING:
2492 /* Check outgoing MTU */
2493 if (len > chan->omtu) {
2498 __skb_queue_head_init(&seg_queue);
2500 /* Do segmentation before calling in to the state machine,
2501 * since it's possible to block while waiting for memory
2504 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2506 /* The channel could have been closed while segmenting,
2507 * check that it is still connected.
2509 if (chan->state != BT_CONNECTED) {
2510 __skb_queue_purge(&seg_queue);
2517 if (chan->mode == L2CAP_MODE_ERTM)
2518 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2520 l2cap_streaming_send(chan, &seg_queue);
2524 /* If the skbs were not queued for sending, they'll still be in
2525 * seg_queue and need to be purged.
2527 __skb_queue_purge(&seg_queue);
2531 BT_DBG("bad state %1.1x", chan->mode);
2538 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2540 struct l2cap_ctrl control;
2543 BT_DBG("chan %p, txseq %u", chan, txseq);
2545 memset(&control, 0, sizeof(control));
2547 control.super = L2CAP_SUPER_SREJ;
2549 for (seq = chan->expected_tx_seq; seq != txseq;
2550 seq = __next_seq(chan, seq)) {
2551 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2552 control.reqseq = seq;
2553 l2cap_send_sframe(chan, &control);
2554 l2cap_seq_list_append(&chan->srej_list, seq);
2558 chan->expected_tx_seq = __next_seq(chan, txseq);
2561 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2563 struct l2cap_ctrl control;
2565 BT_DBG("chan %p", chan);
2567 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2570 memset(&control, 0, sizeof(control));
2572 control.super = L2CAP_SUPER_SREJ;
2573 control.reqseq = chan->srej_list.tail;
2574 l2cap_send_sframe(chan, &control);
2577 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2579 struct l2cap_ctrl control;
2583 BT_DBG("chan %p, txseq %u", chan, txseq);
2585 memset(&control, 0, sizeof(control));
2587 control.super = L2CAP_SUPER_SREJ;
2589 /* Capture initial list head to allow only one pass through the list. */
2590 initial_head = chan->srej_list.head;
2593 seq = l2cap_seq_list_pop(&chan->srej_list);
2594 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2597 control.reqseq = seq;
2598 l2cap_send_sframe(chan, &control);
2599 l2cap_seq_list_append(&chan->srej_list, seq);
2600 } while (chan->srej_list.head != initial_head);
2603 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2605 struct sk_buff *acked_skb;
2608 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2610 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2613 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2614 chan->expected_ack_seq, chan->unacked_frames);
2616 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2617 ackseq = __next_seq(chan, ackseq)) {
2619 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2621 skb_unlink(acked_skb, &chan->tx_q);
2622 kfree_skb(acked_skb);
2623 chan->unacked_frames--;
2627 chan->expected_ack_seq = reqseq;
2629 if (chan->unacked_frames == 0)
2630 __clear_retrans_timer(chan);
2632 BT_DBG("unacked_frames %u", chan->unacked_frames);
2635 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2637 BT_DBG("chan %p", chan);
2639 chan->expected_tx_seq = chan->buffer_seq;
2640 l2cap_seq_list_clear(&chan->srej_list);
2641 skb_queue_purge(&chan->srej_q);
2642 chan->rx_state = L2CAP_RX_STATE_RECV;
2645 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2646 struct l2cap_ctrl *control,
2647 struct sk_buff_head *skbs, u8 event)
2649 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2653 case L2CAP_EV_DATA_REQUEST:
2654 if (chan->tx_send_head == NULL)
2655 chan->tx_send_head = skb_peek(skbs);
2657 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2658 l2cap_ertm_send(chan);
2660 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2661 BT_DBG("Enter LOCAL_BUSY");
2662 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2664 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2665 /* The SREJ_SENT state must be aborted if we are to
2666 * enter the LOCAL_BUSY state.
2668 l2cap_abort_rx_srej_sent(chan);
2671 l2cap_send_ack(chan);
2674 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2675 BT_DBG("Exit LOCAL_BUSY");
2676 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2678 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2679 struct l2cap_ctrl local_control;
2681 memset(&local_control, 0, sizeof(local_control));
2682 local_control.sframe = 1;
2683 local_control.super = L2CAP_SUPER_RR;
2684 local_control.poll = 1;
2685 local_control.reqseq = chan->buffer_seq;
2686 l2cap_send_sframe(chan, &local_control);
2688 chan->retry_count = 1;
2689 __set_monitor_timer(chan);
2690 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2693 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2694 l2cap_process_reqseq(chan, control->reqseq);
2696 case L2CAP_EV_EXPLICIT_POLL:
2697 l2cap_send_rr_or_rnr(chan, 1);
2698 chan->retry_count = 1;
2699 __set_monitor_timer(chan);
2700 __clear_ack_timer(chan);
2701 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2703 case L2CAP_EV_RETRANS_TO:
2704 l2cap_send_rr_or_rnr(chan, 1);
2705 chan->retry_count = 1;
2706 __set_monitor_timer(chan);
2707 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2709 case L2CAP_EV_RECV_FBIT:
2710 /* Nothing to process */
2717 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2718 struct l2cap_ctrl *control,
2719 struct sk_buff_head *skbs, u8 event)
2721 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2725 case L2CAP_EV_DATA_REQUEST:
2726 if (chan->tx_send_head == NULL)
2727 chan->tx_send_head = skb_peek(skbs);
2728 /* Queue data, but don't send. */
2729 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2731 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2732 BT_DBG("Enter LOCAL_BUSY");
2733 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2735 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2736 /* The SREJ_SENT state must be aborted if we are to
2737 * enter the LOCAL_BUSY state.
2739 l2cap_abort_rx_srej_sent(chan);
2742 l2cap_send_ack(chan);
2745 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2746 BT_DBG("Exit LOCAL_BUSY");
2747 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2749 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2750 struct l2cap_ctrl local_control;
2751 memset(&local_control, 0, sizeof(local_control));
2752 local_control.sframe = 1;
2753 local_control.super = L2CAP_SUPER_RR;
2754 local_control.poll = 1;
2755 local_control.reqseq = chan->buffer_seq;
2756 l2cap_send_sframe(chan, &local_control);
2758 chan->retry_count = 1;
2759 __set_monitor_timer(chan);
2760 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2763 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2764 l2cap_process_reqseq(chan, control->reqseq);
2768 case L2CAP_EV_RECV_FBIT:
2769 if (control && control->final) {
2770 __clear_monitor_timer(chan);
2771 if (chan->unacked_frames > 0)
2772 __set_retrans_timer(chan);
2773 chan->retry_count = 0;
2774 chan->tx_state = L2CAP_TX_STATE_XMIT;
2775 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2778 case L2CAP_EV_EXPLICIT_POLL:
2781 case L2CAP_EV_MONITOR_TO:
2782 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2783 l2cap_send_rr_or_rnr(chan, 1);
2784 __set_monitor_timer(chan);
2785 chan->retry_count++;
2787 l2cap_send_disconn_req(chan, ECONNABORTED);
2795 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2796 struct sk_buff_head *skbs, u8 event)
2798 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2799 chan, control, skbs, event, chan->tx_state);
2801 switch (chan->tx_state) {
2802 case L2CAP_TX_STATE_XMIT:
2803 l2cap_tx_state_xmit(chan, control, skbs, event);
2805 case L2CAP_TX_STATE_WAIT_F:
2806 l2cap_tx_state_wait_f(chan, control, skbs, event);
2814 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2815 struct l2cap_ctrl *control)
2817 BT_DBG("chan %p, control %p", chan, control);
2818 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2821 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2822 struct l2cap_ctrl *control)
2824 BT_DBG("chan %p, control %p", chan, control);
2825 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2828 /* Copy frame to all raw sockets on that connection */
2829 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2831 struct sk_buff *nskb;
2832 struct l2cap_chan *chan;
2834 BT_DBG("conn %p", conn);
2836 mutex_lock(&conn->chan_lock);
2838 list_for_each_entry(chan, &conn->chan_l, list) {
2839 if (chan->chan_type != L2CAP_CHAN_RAW)
2842 /* Don't send frame to the channel it came from */
2843 if (bt_cb(skb)->chan == chan)
2846 nskb = skb_clone(skb, GFP_KERNEL);
2849 if (chan->ops->recv(chan, nskb))
2853 mutex_unlock(&conn->chan_lock);
2856 /* ---- L2CAP signalling commands ---- */
2857 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2858 u8 ident, u16 dlen, void *data)
2860 struct sk_buff *skb, **frag;
2861 struct l2cap_cmd_hdr *cmd;
2862 struct l2cap_hdr *lh;
2865 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2866 conn, code, ident, dlen);
2868 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2871 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2872 count = min_t(unsigned int, conn->mtu, len);
2874 skb = bt_skb_alloc(count, GFP_KERNEL);
2878 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2879 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2881 if (conn->hcon->type == LE_LINK)
2882 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2884 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2886 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2889 cmd->len = cpu_to_le16(dlen);
2892 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2893 memcpy(skb_put(skb, count), data, count);
2899 /* Continuation fragments (no L2CAP header) */
2900 frag = &skb_shinfo(skb)->frag_list;
2902 count = min_t(unsigned int, conn->mtu, len);
2904 *frag = bt_skb_alloc(count, GFP_KERNEL);
2908 memcpy(skb_put(*frag, count), data, count);
2913 frag = &(*frag)->next;
2923 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2926 struct l2cap_conf_opt *opt = *ptr;
2929 len = L2CAP_CONF_OPT_SIZE + opt->len;
2937 *val = *((u8 *) opt->val);
2941 *val = get_unaligned_le16(opt->val);
2945 *val = get_unaligned_le32(opt->val);
2949 *val = (unsigned long) opt->val;
2953 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2957 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2959 struct l2cap_conf_opt *opt = *ptr;
2961 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2968 *((u8 *) opt->val) = val;
2972 put_unaligned_le16(val, opt->val);
2976 put_unaligned_le32(val, opt->val);
2980 memcpy(opt->val, (void *) val, len);
2984 *ptr += L2CAP_CONF_OPT_SIZE + len;
2987 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2989 struct l2cap_conf_efs efs;
2991 switch (chan->mode) {
2992 case L2CAP_MODE_ERTM:
2993 efs.id = chan->local_id;
2994 efs.stype = chan->local_stype;
2995 efs.msdu = cpu_to_le16(chan->local_msdu);
2996 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2997 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2998 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3001 case L2CAP_MODE_STREAMING:
3003 efs.stype = L2CAP_SERV_BESTEFFORT;
3004 efs.msdu = cpu_to_le16(chan->local_msdu);
3005 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3014 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3015 (unsigned long) &efs);
3018 static void l2cap_ack_timeout(struct work_struct *work)
3020 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3024 BT_DBG("chan %p", chan);
3026 l2cap_chan_lock(chan);
3028 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3029 chan->last_acked_seq);
3032 l2cap_send_rr_or_rnr(chan, 0);
3034 l2cap_chan_unlock(chan);
3035 l2cap_chan_put(chan);
3038 int l2cap_ertm_init(struct l2cap_chan *chan)
3042 chan->next_tx_seq = 0;
3043 chan->expected_tx_seq = 0;
3044 chan->expected_ack_seq = 0;
3045 chan->unacked_frames = 0;
3046 chan->buffer_seq = 0;
3047 chan->frames_sent = 0;
3048 chan->last_acked_seq = 0;
3050 chan->sdu_last_frag = NULL;
3053 skb_queue_head_init(&chan->tx_q);
3055 chan->local_amp_id = AMP_ID_BREDR;
3056 chan->move_id = AMP_ID_BREDR;
3057 chan->move_state = L2CAP_MOVE_STABLE;
3058 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3060 if (chan->mode != L2CAP_MODE_ERTM)
3063 chan->rx_state = L2CAP_RX_STATE_RECV;
3064 chan->tx_state = L2CAP_TX_STATE_XMIT;
3066 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3067 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3068 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3070 skb_queue_head_init(&chan->srej_q);
3072 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3076 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3078 l2cap_seq_list_free(&chan->srej_list);
3083 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3086 case L2CAP_MODE_STREAMING:
3087 case L2CAP_MODE_ERTM:
3088 if (l2cap_mode_supported(mode, remote_feat_mask))
3092 return L2CAP_MODE_BASIC;
3096 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3098 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3101 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3103 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3106 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3107 struct l2cap_conf_rfc *rfc)
3109 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3110 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3112 /* Class 1 devices have must have ERTM timeouts
3113 * exceeding the Link Supervision Timeout. The
3114 * default Link Supervision Timeout for AMP
3115 * controllers is 10 seconds.
3117 * Class 1 devices use 0xffffffff for their
3118 * best-effort flush timeout, so the clamping logic
3119 * will result in a timeout that meets the above
3120 * requirement. ERTM timeouts are 16-bit values, so
3121 * the maximum timeout is 65.535 seconds.
3124 /* Convert timeout to milliseconds and round */
3125 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3127 /* This is the recommended formula for class 2 devices
3128 * that start ERTM timers when packets are sent to the
3131 ertm_to = 3 * ertm_to + 500;
3133 if (ertm_to > 0xffff)
3136 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3137 rfc->monitor_timeout = rfc->retrans_timeout;
3139 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3140 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3144 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3146 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3147 __l2cap_ews_supported(chan->conn)) {
3148 /* use extended control field */
3149 set_bit(FLAG_EXT_CTRL, &chan->flags);
3150 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3152 chan->tx_win = min_t(u16, chan->tx_win,
3153 L2CAP_DEFAULT_TX_WINDOW);
3154 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3156 chan->ack_win = chan->tx_win;
3159 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3161 struct l2cap_conf_req *req = data;
3162 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3163 void *ptr = req->data;
3166 BT_DBG("chan %p", chan);
3168 if (chan->num_conf_req || chan->num_conf_rsp)
3171 switch (chan->mode) {
3172 case L2CAP_MODE_STREAMING:
3173 case L2CAP_MODE_ERTM:
3174 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3177 if (__l2cap_efs_supported(chan->conn))
3178 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3182 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3187 if (chan->imtu != L2CAP_DEFAULT_MTU)
3188 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3190 switch (chan->mode) {
3191 case L2CAP_MODE_BASIC:
3192 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3193 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3196 rfc.mode = L2CAP_MODE_BASIC;
3198 rfc.max_transmit = 0;
3199 rfc.retrans_timeout = 0;
3200 rfc.monitor_timeout = 0;
3201 rfc.max_pdu_size = 0;
3203 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3204 (unsigned long) &rfc);
3207 case L2CAP_MODE_ERTM:
3208 rfc.mode = L2CAP_MODE_ERTM;
3209 rfc.max_transmit = chan->max_tx;
3211 __l2cap_set_ertm_timeouts(chan, &rfc);
3213 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3214 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3216 rfc.max_pdu_size = cpu_to_le16(size);
3218 l2cap_txwin_setup(chan);
3220 rfc.txwin_size = min_t(u16, chan->tx_win,
3221 L2CAP_DEFAULT_TX_WINDOW);
3223 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3224 (unsigned long) &rfc);
3226 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3227 l2cap_add_opt_efs(&ptr, chan);
3229 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3230 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3233 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3234 if (chan->fcs == L2CAP_FCS_NONE ||
3235 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3236 chan->fcs = L2CAP_FCS_NONE;
3237 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3242 case L2CAP_MODE_STREAMING:
3243 l2cap_txwin_setup(chan);
3244 rfc.mode = L2CAP_MODE_STREAMING;
3246 rfc.max_transmit = 0;
3247 rfc.retrans_timeout = 0;
3248 rfc.monitor_timeout = 0;
3250 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3251 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3253 rfc.max_pdu_size = cpu_to_le16(size);
3255 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3256 (unsigned long) &rfc);
3258 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3259 l2cap_add_opt_efs(&ptr, chan);
3261 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3262 if (chan->fcs == L2CAP_FCS_NONE ||
3263 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3264 chan->fcs = L2CAP_FCS_NONE;
3265 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3271 req->dcid = cpu_to_le16(chan->dcid);
3272 req->flags = __constant_cpu_to_le16(0);
3277 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3279 struct l2cap_conf_rsp *rsp = data;
3280 void *ptr = rsp->data;
3281 void *req = chan->conf_req;
3282 int len = chan->conf_len;
3283 int type, hint, olen;
3285 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3286 struct l2cap_conf_efs efs;
3288 u16 mtu = L2CAP_DEFAULT_MTU;
3289 u16 result = L2CAP_CONF_SUCCESS;
3292 BT_DBG("chan %p", chan);
3294 while (len >= L2CAP_CONF_OPT_SIZE) {
3295 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3297 hint = type & L2CAP_CONF_HINT;
3298 type &= L2CAP_CONF_MASK;
3301 case L2CAP_CONF_MTU:
3305 case L2CAP_CONF_FLUSH_TO:
3306 chan->flush_to = val;
3309 case L2CAP_CONF_QOS:
3312 case L2CAP_CONF_RFC:
3313 if (olen == sizeof(rfc))
3314 memcpy(&rfc, (void *) val, olen);
3317 case L2CAP_CONF_FCS:
3318 if (val == L2CAP_FCS_NONE)
3319 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3322 case L2CAP_CONF_EFS:
3324 if (olen == sizeof(efs))
3325 memcpy(&efs, (void *) val, olen);
3328 case L2CAP_CONF_EWS:
3329 if (!chan->conn->hs_enabled)
3330 return -ECONNREFUSED;
3332 set_bit(FLAG_EXT_CTRL, &chan->flags);
3333 set_bit(CONF_EWS_RECV, &chan->conf_state);
3334 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3335 chan->remote_tx_win = val;
3342 result = L2CAP_CONF_UNKNOWN;
3343 *((u8 *) ptr++) = type;
3348 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3351 switch (chan->mode) {
3352 case L2CAP_MODE_STREAMING:
3353 case L2CAP_MODE_ERTM:
3354 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3355 chan->mode = l2cap_select_mode(rfc.mode,
3356 chan->conn->feat_mask);
3361 if (__l2cap_efs_supported(chan->conn))
3362 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3364 return -ECONNREFUSED;
3367 if (chan->mode != rfc.mode)
3368 return -ECONNREFUSED;
3374 if (chan->mode != rfc.mode) {
3375 result = L2CAP_CONF_UNACCEPT;
3376 rfc.mode = chan->mode;
3378 if (chan->num_conf_rsp == 1)
3379 return -ECONNREFUSED;
3381 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3382 (unsigned long) &rfc);
3385 if (result == L2CAP_CONF_SUCCESS) {
3386 /* Configure output options and let the other side know
3387 * which ones we don't like. */
3389 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3390 result = L2CAP_CONF_UNACCEPT;
3393 set_bit(CONF_MTU_DONE, &chan->conf_state);
3395 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3398 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3399 efs.stype != L2CAP_SERV_NOTRAFIC &&
3400 efs.stype != chan->local_stype) {
3402 result = L2CAP_CONF_UNACCEPT;
3404 if (chan->num_conf_req >= 1)
3405 return -ECONNREFUSED;
3407 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3409 (unsigned long) &efs);
3411 /* Send PENDING Conf Rsp */
3412 result = L2CAP_CONF_PENDING;
3413 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3418 case L2CAP_MODE_BASIC:
3419 chan->fcs = L2CAP_FCS_NONE;
3420 set_bit(CONF_MODE_DONE, &chan->conf_state);
3423 case L2CAP_MODE_ERTM:
3424 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3425 chan->remote_tx_win = rfc.txwin_size;
3427 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3429 chan->remote_max_tx = rfc.max_transmit;
3431 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3432 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3433 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3434 rfc.max_pdu_size = cpu_to_le16(size);
3435 chan->remote_mps = size;
3437 __l2cap_set_ertm_timeouts(chan, &rfc);
3439 set_bit(CONF_MODE_DONE, &chan->conf_state);
3441 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3442 sizeof(rfc), (unsigned long) &rfc);
3444 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3445 chan->remote_id = efs.id;
3446 chan->remote_stype = efs.stype;
3447 chan->remote_msdu = le16_to_cpu(efs.msdu);
3448 chan->remote_flush_to =
3449 le32_to_cpu(efs.flush_to);
3450 chan->remote_acc_lat =
3451 le32_to_cpu(efs.acc_lat);
3452 chan->remote_sdu_itime =
3453 le32_to_cpu(efs.sdu_itime);
3454 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3456 (unsigned long) &efs);
3460 case L2CAP_MODE_STREAMING:
3461 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3462 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3463 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3464 rfc.max_pdu_size = cpu_to_le16(size);
3465 chan->remote_mps = size;
3467 set_bit(CONF_MODE_DONE, &chan->conf_state);
3469 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3470 (unsigned long) &rfc);
3475 result = L2CAP_CONF_UNACCEPT;
3477 memset(&rfc, 0, sizeof(rfc));
3478 rfc.mode = chan->mode;
3481 if (result == L2CAP_CONF_SUCCESS)
3482 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3484 rsp->scid = cpu_to_le16(chan->dcid);
3485 rsp->result = cpu_to_le16(result);
3486 rsp->flags = __constant_cpu_to_le16(0);
3491 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3492 void *data, u16 *result)
3494 struct l2cap_conf_req *req = data;
3495 void *ptr = req->data;
3498 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3499 struct l2cap_conf_efs efs;
3501 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3503 while (len >= L2CAP_CONF_OPT_SIZE) {
3504 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3507 case L2CAP_CONF_MTU:
3508 if (val < L2CAP_DEFAULT_MIN_MTU) {
3509 *result = L2CAP_CONF_UNACCEPT;
3510 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3513 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3516 case L2CAP_CONF_FLUSH_TO:
3517 chan->flush_to = val;
3518 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3522 case L2CAP_CONF_RFC:
3523 if (olen == sizeof(rfc))
3524 memcpy(&rfc, (void *)val, olen);
3526 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3527 rfc.mode != chan->mode)
3528 return -ECONNREFUSED;
3532 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3533 sizeof(rfc), (unsigned long) &rfc);
3536 case L2CAP_CONF_EWS:
3537 chan->ack_win = min_t(u16, val, chan->ack_win);
3538 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3542 case L2CAP_CONF_EFS:
3543 if (olen == sizeof(efs))
3544 memcpy(&efs, (void *)val, olen);
3546 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3547 efs.stype != L2CAP_SERV_NOTRAFIC &&
3548 efs.stype != chan->local_stype)
3549 return -ECONNREFUSED;
3551 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3552 (unsigned long) &efs);
3555 case L2CAP_CONF_FCS:
3556 if (*result == L2CAP_CONF_PENDING)
3557 if (val == L2CAP_FCS_NONE)
3558 set_bit(CONF_RECV_NO_FCS,
3564 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3565 return -ECONNREFUSED;
3567 chan->mode = rfc.mode;
3569 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3571 case L2CAP_MODE_ERTM:
3572 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3573 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3574 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3575 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3576 chan->ack_win = min_t(u16, chan->ack_win,
3579 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3580 chan->local_msdu = le16_to_cpu(efs.msdu);
3581 chan->local_sdu_itime =
3582 le32_to_cpu(efs.sdu_itime);
3583 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3584 chan->local_flush_to =
3585 le32_to_cpu(efs.flush_to);
3589 case L2CAP_MODE_STREAMING:
3590 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3594 req->dcid = cpu_to_le16(chan->dcid);
3595 req->flags = __constant_cpu_to_le16(0);
3600 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3601 u16 result, u16 flags)
3603 struct l2cap_conf_rsp *rsp = data;
3604 void *ptr = rsp->data;
3606 BT_DBG("chan %p", chan);
3608 rsp->scid = cpu_to_le16(chan->dcid);
3609 rsp->result = cpu_to_le16(result);
3610 rsp->flags = cpu_to_le16(flags);
3615 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3617 struct l2cap_le_conn_rsp rsp;
3618 struct l2cap_conn *conn = chan->conn;
3620 BT_DBG("chan %p", chan);
3622 rsp.dcid = cpu_to_le16(chan->scid);
3623 rsp.mtu = cpu_to_le16(chan->imtu);
3624 rsp.mps = cpu_to_le16(chan->mps);
3625 rsp.credits = cpu_to_le16(chan->rx_credits);
3626 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3628 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3632 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3634 struct l2cap_conn_rsp rsp;
3635 struct l2cap_conn *conn = chan->conn;
3639 rsp.scid = cpu_to_le16(chan->dcid);
3640 rsp.dcid = cpu_to_le16(chan->scid);
3641 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3642 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3645 rsp_code = L2CAP_CREATE_CHAN_RSP;
3647 rsp_code = L2CAP_CONN_RSP;
3649 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3651 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3653 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3656 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3657 l2cap_build_conf_req(chan, buf), buf);
3658 chan->num_conf_req++;
3661 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3665 /* Use sane default values in case a misbehaving remote device
3666 * did not send an RFC or extended window size option.
3668 u16 txwin_ext = chan->ack_win;
3669 struct l2cap_conf_rfc rfc = {
3671 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3672 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3673 .max_pdu_size = cpu_to_le16(chan->imtu),
3674 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3677 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3679 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3682 while (len >= L2CAP_CONF_OPT_SIZE) {
3683 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3686 case L2CAP_CONF_RFC:
3687 if (olen == sizeof(rfc))
3688 memcpy(&rfc, (void *)val, olen);
3690 case L2CAP_CONF_EWS:
3697 case L2CAP_MODE_ERTM:
3698 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3699 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3700 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3701 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3702 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3704 chan->ack_win = min_t(u16, chan->ack_win,
3707 case L2CAP_MODE_STREAMING:
3708 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3712 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3713 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3716 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3718 if (cmd_len < sizeof(*rej))
3721 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3724 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3725 cmd->ident == conn->info_ident) {
3726 cancel_delayed_work(&conn->info_timer);
3728 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3729 conn->info_ident = 0;
3731 l2cap_conn_start(conn);
3737 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3738 struct l2cap_cmd_hdr *cmd,
3739 u8 *data, u8 rsp_code, u8 amp_id)
3741 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3742 struct l2cap_conn_rsp rsp;
3743 struct l2cap_chan *chan = NULL, *pchan;
3744 int result, status = L2CAP_CS_NO_INFO;
3746 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3747 __le16 psm = req->psm;
3749 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3751 /* Check if we have socket listening on psm */
3752 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3753 &conn->hcon->dst, ACL_LINK);
3755 result = L2CAP_CR_BAD_PSM;
3759 mutex_lock(&conn->chan_lock);
3760 l2cap_chan_lock(pchan);
3762 /* Check if the ACL is secure enough (if not SDP) */
3763 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3764 !hci_conn_check_link_mode(conn->hcon)) {
3765 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3766 result = L2CAP_CR_SEC_BLOCK;
3770 result = L2CAP_CR_NO_MEM;
3772 /* Check if we already have channel with that dcid */
3773 if (__l2cap_get_chan_by_dcid(conn, scid))
3776 chan = pchan->ops->new_connection(pchan);
3780 /* For certain devices (ex: HID mouse), support for authentication,
3781 * pairing and bonding is optional. For such devices, inorder to avoid
3782 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3783 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3785 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3787 bacpy(&chan->src, &conn->hcon->src);
3788 bacpy(&chan->dst, &conn->hcon->dst);
3789 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3790 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3793 chan->local_amp_id = amp_id;
3795 __l2cap_chan_add(conn, chan);
3799 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3801 chan->ident = cmd->ident;
3803 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3804 if (l2cap_chan_check_security(chan)) {
3805 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3806 l2cap_state_change(chan, BT_CONNECT2);
3807 result = L2CAP_CR_PEND;
3808 status = L2CAP_CS_AUTHOR_PEND;
3809 chan->ops->defer(chan);
3811 /* Force pending result for AMP controllers.
3812 * The connection will succeed after the
3813 * physical link is up.
3815 if (amp_id == AMP_ID_BREDR) {
3816 l2cap_state_change(chan, BT_CONFIG);
3817 result = L2CAP_CR_SUCCESS;
3819 l2cap_state_change(chan, BT_CONNECT2);
3820 result = L2CAP_CR_PEND;
3822 status = L2CAP_CS_NO_INFO;
3825 l2cap_state_change(chan, BT_CONNECT2);
3826 result = L2CAP_CR_PEND;
3827 status = L2CAP_CS_AUTHEN_PEND;
3830 l2cap_state_change(chan, BT_CONNECT2);
3831 result = L2CAP_CR_PEND;
3832 status = L2CAP_CS_NO_INFO;
3836 l2cap_chan_unlock(pchan);
3837 mutex_unlock(&conn->chan_lock);
3840 rsp.scid = cpu_to_le16(scid);
3841 rsp.dcid = cpu_to_le16(dcid);
3842 rsp.result = cpu_to_le16(result);
3843 rsp.status = cpu_to_le16(status);
3844 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3846 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3847 struct l2cap_info_req info;
3848 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3850 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3851 conn->info_ident = l2cap_get_ident(conn);
3853 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3855 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3856 sizeof(info), &info);
3859 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3860 result == L2CAP_CR_SUCCESS) {
3862 set_bit(CONF_REQ_SENT, &chan->conf_state);
3863 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3864 l2cap_build_conf_req(chan, buf), buf);
3865 chan->num_conf_req++;
3871 static int l2cap_connect_req(struct l2cap_conn *conn,
3872 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3874 struct hci_dev *hdev = conn->hcon->hdev;
3875 struct hci_conn *hcon = conn->hcon;
3877 if (cmd_len < sizeof(struct l2cap_conn_req))
3881 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3882 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3883 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3884 hcon->dst_type, 0, NULL, 0,
3886 hci_dev_unlock(hdev);
3888 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3892 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3893 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3896 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3897 u16 scid, dcid, result, status;
3898 struct l2cap_chan *chan;
3902 if (cmd_len < sizeof(*rsp))
3905 scid = __le16_to_cpu(rsp->scid);
3906 dcid = __le16_to_cpu(rsp->dcid);
3907 result = __le16_to_cpu(rsp->result);
3908 status = __le16_to_cpu(rsp->status);
3910 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3911 dcid, scid, result, status);
3913 mutex_lock(&conn->chan_lock);
3916 chan = __l2cap_get_chan_by_scid(conn, scid);
3922 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3931 l2cap_chan_lock(chan);
3934 case L2CAP_CR_SUCCESS:
3935 l2cap_state_change(chan, BT_CONFIG);
3938 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3940 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3943 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3944 l2cap_build_conf_req(chan, req), req);
3945 chan->num_conf_req++;
3949 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3953 l2cap_chan_del(chan, ECONNREFUSED);
3957 l2cap_chan_unlock(chan);
3960 mutex_unlock(&conn->chan_lock);
3965 static inline void set_default_fcs(struct l2cap_chan *chan)
3967 /* FCS is enabled only in ERTM or streaming mode, if one or both
3970 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3971 chan->fcs = L2CAP_FCS_NONE;
3972 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3973 chan->fcs = L2CAP_FCS_CRC16;
3976 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3977 u8 ident, u16 flags)
3979 struct l2cap_conn *conn = chan->conn;
3981 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3984 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3985 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3987 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3988 l2cap_build_conf_rsp(chan, data,
3989 L2CAP_CONF_SUCCESS, flags), data);
3992 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
3995 struct l2cap_cmd_rej_cid rej;
3997 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3998 rej.scid = __cpu_to_le16(scid);
3999 rej.dcid = __cpu_to_le16(dcid);
4001 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4004 static inline int l2cap_config_req(struct l2cap_conn *conn,
4005 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4008 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4011 struct l2cap_chan *chan;
4014 if (cmd_len < sizeof(*req))
4017 dcid = __le16_to_cpu(req->dcid);
4018 flags = __le16_to_cpu(req->flags);
4020 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4022 chan = l2cap_get_chan_by_scid(conn, dcid);
4024 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4028 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4029 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4034 /* Reject if config buffer is too small. */
4035 len = cmd_len - sizeof(*req);
4036 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4037 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4038 l2cap_build_conf_rsp(chan, rsp,
4039 L2CAP_CONF_REJECT, flags), rsp);
4044 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4045 chan->conf_len += len;
4047 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4048 /* Incomplete config. Send empty response. */
4049 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4050 l2cap_build_conf_rsp(chan, rsp,
4051 L2CAP_CONF_SUCCESS, flags), rsp);
4055 /* Complete config. */
4056 len = l2cap_parse_conf_req(chan, rsp);
4058 l2cap_send_disconn_req(chan, ECONNRESET);
4062 chan->ident = cmd->ident;
4063 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4064 chan->num_conf_rsp++;
4066 /* Reset config buffer. */
4069 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4072 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4073 set_default_fcs(chan);
4075 if (chan->mode == L2CAP_MODE_ERTM ||
4076 chan->mode == L2CAP_MODE_STREAMING)
4077 err = l2cap_ertm_init(chan);
4080 l2cap_send_disconn_req(chan, -err);
4082 l2cap_chan_ready(chan);
4087 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4089 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4090 l2cap_build_conf_req(chan, buf), buf);
4091 chan->num_conf_req++;
4094 /* Got Conf Rsp PENDING from remote side and asume we sent
4095 Conf Rsp PENDING in the code above */
4096 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4097 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4099 /* check compatibility */
4101 /* Send rsp for BR/EDR channel */
4103 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4105 chan->ident = cmd->ident;
4109 l2cap_chan_unlock(chan);
4113 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4114 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4117 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4118 u16 scid, flags, result;
4119 struct l2cap_chan *chan;
4120 int len = cmd_len - sizeof(*rsp);
4123 if (cmd_len < sizeof(*rsp))
4126 scid = __le16_to_cpu(rsp->scid);
4127 flags = __le16_to_cpu(rsp->flags);
4128 result = __le16_to_cpu(rsp->result);
4130 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4133 chan = l2cap_get_chan_by_scid(conn, scid);
4138 case L2CAP_CONF_SUCCESS:
4139 l2cap_conf_rfc_get(chan, rsp->data, len);
4140 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4143 case L2CAP_CONF_PENDING:
4144 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4146 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4149 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4152 l2cap_send_disconn_req(chan, ECONNRESET);
4156 if (!chan->hs_hcon) {
4157 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4160 if (l2cap_check_efs(chan)) {
4161 amp_create_logical_link(chan);
4162 chan->ident = cmd->ident;
4168 case L2CAP_CONF_UNACCEPT:
4169 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4172 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4173 l2cap_send_disconn_req(chan, ECONNRESET);
4177 /* throw out any old stored conf requests */
4178 result = L2CAP_CONF_SUCCESS;
4179 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4182 l2cap_send_disconn_req(chan, ECONNRESET);
4186 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4187 L2CAP_CONF_REQ, len, req);
4188 chan->num_conf_req++;
4189 if (result != L2CAP_CONF_SUCCESS)
4195 l2cap_chan_set_err(chan, ECONNRESET);
4197 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4198 l2cap_send_disconn_req(chan, ECONNRESET);
4202 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4205 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4207 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4208 set_default_fcs(chan);
4210 if (chan->mode == L2CAP_MODE_ERTM ||
4211 chan->mode == L2CAP_MODE_STREAMING)
4212 err = l2cap_ertm_init(chan);
4215 l2cap_send_disconn_req(chan, -err);
4217 l2cap_chan_ready(chan);
4221 l2cap_chan_unlock(chan);
4225 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4226 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4229 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4230 struct l2cap_disconn_rsp rsp;
4232 struct l2cap_chan *chan;
4234 if (cmd_len != sizeof(*req))
4237 scid = __le16_to_cpu(req->scid);
4238 dcid = __le16_to_cpu(req->dcid);
4240 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4242 mutex_lock(&conn->chan_lock);
4244 chan = __l2cap_get_chan_by_scid(conn, dcid);
4246 mutex_unlock(&conn->chan_lock);
4247 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4251 l2cap_chan_lock(chan);
4253 rsp.dcid = cpu_to_le16(chan->scid);
4254 rsp.scid = cpu_to_le16(chan->dcid);
4255 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4257 chan->ops->set_shutdown(chan);
4259 l2cap_chan_hold(chan);
4260 l2cap_chan_del(chan, ECONNRESET);
4262 l2cap_chan_unlock(chan);
4264 chan->ops->close(chan);
4265 l2cap_chan_put(chan);
4267 mutex_unlock(&conn->chan_lock);
4272 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4273 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4276 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4278 struct l2cap_chan *chan;
4280 if (cmd_len != sizeof(*rsp))
4283 scid = __le16_to_cpu(rsp->scid);
4284 dcid = __le16_to_cpu(rsp->dcid);
4286 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4288 mutex_lock(&conn->chan_lock);
4290 chan = __l2cap_get_chan_by_scid(conn, scid);
4292 mutex_unlock(&conn->chan_lock);
4296 l2cap_chan_lock(chan);
4298 l2cap_chan_hold(chan);
4299 l2cap_chan_del(chan, 0);
4301 l2cap_chan_unlock(chan);
4303 chan->ops->close(chan);
4304 l2cap_chan_put(chan);
4306 mutex_unlock(&conn->chan_lock);
4311 static inline int l2cap_information_req(struct l2cap_conn *conn,
4312 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4315 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4318 if (cmd_len != sizeof(*req))
4321 type = __le16_to_cpu(req->type);
4323 BT_DBG("type 0x%4.4x", type);
4325 if (type == L2CAP_IT_FEAT_MASK) {
4327 u32 feat_mask = l2cap_feat_mask;
4328 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4329 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4330 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4332 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4334 if (conn->hs_enabled)
4335 feat_mask |= L2CAP_FEAT_EXT_FLOW
4336 | L2CAP_FEAT_EXT_WINDOW;
4338 put_unaligned_le32(feat_mask, rsp->data);
4339 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4341 } else if (type == L2CAP_IT_FIXED_CHAN) {
4343 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4345 if (conn->hs_enabled)
4346 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4348 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4350 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4351 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4352 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4353 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4356 struct l2cap_info_rsp rsp;
4357 rsp.type = cpu_to_le16(type);
4358 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4359 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4366 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4367 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4370 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4373 if (cmd_len < sizeof(*rsp))
4376 type = __le16_to_cpu(rsp->type);
4377 result = __le16_to_cpu(rsp->result);
4379 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4381 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4382 if (cmd->ident != conn->info_ident ||
4383 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4386 cancel_delayed_work(&conn->info_timer);
4388 if (result != L2CAP_IR_SUCCESS) {
4389 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4390 conn->info_ident = 0;
4392 l2cap_conn_start(conn);
4398 case L2CAP_IT_FEAT_MASK:
4399 conn->feat_mask = get_unaligned_le32(rsp->data);
4401 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4402 struct l2cap_info_req req;
4403 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4405 conn->info_ident = l2cap_get_ident(conn);
4407 l2cap_send_cmd(conn, conn->info_ident,
4408 L2CAP_INFO_REQ, sizeof(req), &req);
4410 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4411 conn->info_ident = 0;
4413 l2cap_conn_start(conn);
4417 case L2CAP_IT_FIXED_CHAN:
4418 conn->fixed_chan_mask = rsp->data[0];
4419 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4420 conn->info_ident = 0;
4422 l2cap_conn_start(conn);
4429 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4430 struct l2cap_cmd_hdr *cmd,
4431 u16 cmd_len, void *data)
4433 struct l2cap_create_chan_req *req = data;
4434 struct l2cap_create_chan_rsp rsp;
4435 struct l2cap_chan *chan;
4436 struct hci_dev *hdev;
4439 if (cmd_len != sizeof(*req))
4442 if (!conn->hs_enabled)
4445 psm = le16_to_cpu(req->psm);
4446 scid = le16_to_cpu(req->scid);
4448 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4450 /* For controller id 0 make BR/EDR connection */
4451 if (req->amp_id == AMP_ID_BREDR) {
4452 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4457 /* Validate AMP controller id */
4458 hdev = hci_dev_get(req->amp_id);
4462 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4467 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4470 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4471 struct hci_conn *hs_hcon;
4473 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4477 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4482 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4484 mgr->bredr_chan = chan;
4485 chan->hs_hcon = hs_hcon;
4486 chan->fcs = L2CAP_FCS_NONE;
4487 conn->mtu = hdev->block_mtu;
4496 rsp.scid = cpu_to_le16(scid);
4497 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4498 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4500 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4506 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4508 struct l2cap_move_chan_req req;
4511 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4513 ident = l2cap_get_ident(chan->conn);
4514 chan->ident = ident;
4516 req.icid = cpu_to_le16(chan->scid);
4517 req.dest_amp_id = dest_amp_id;
4519 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4522 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4525 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4527 struct l2cap_move_chan_rsp rsp;
4529 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4531 rsp.icid = cpu_to_le16(chan->dcid);
4532 rsp.result = cpu_to_le16(result);
4534 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4538 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4540 struct l2cap_move_chan_cfm cfm;
4542 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4544 chan->ident = l2cap_get_ident(chan->conn);
4546 cfm.icid = cpu_to_le16(chan->scid);
4547 cfm.result = cpu_to_le16(result);
4549 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4552 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4555 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4557 struct l2cap_move_chan_cfm cfm;
4559 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4561 cfm.icid = cpu_to_le16(icid);
4562 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4564 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4568 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4571 struct l2cap_move_chan_cfm_rsp rsp;
4573 BT_DBG("icid 0x%4.4x", icid);
4575 rsp.icid = cpu_to_le16(icid);
4576 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4579 static void __release_logical_link(struct l2cap_chan *chan)
4581 chan->hs_hchan = NULL;
4582 chan->hs_hcon = NULL;
4584 /* Placeholder - release the logical link */
4587 static void l2cap_logical_fail(struct l2cap_chan *chan)
4589 /* Logical link setup failed */
4590 if (chan->state != BT_CONNECTED) {
4591 /* Create channel failure, disconnect */
4592 l2cap_send_disconn_req(chan, ECONNRESET);
4596 switch (chan->move_role) {
4597 case L2CAP_MOVE_ROLE_RESPONDER:
4598 l2cap_move_done(chan);
4599 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4601 case L2CAP_MOVE_ROLE_INITIATOR:
4602 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4603 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4604 /* Remote has only sent pending or
4605 * success responses, clean up
4607 l2cap_move_done(chan);
4610 /* Other amp move states imply that the move
4611 * has already aborted
4613 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4618 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4619 struct hci_chan *hchan)
4621 struct l2cap_conf_rsp rsp;
4623 chan->hs_hchan = hchan;
4624 chan->hs_hcon->l2cap_data = chan->conn;
4626 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4628 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4631 set_default_fcs(chan);
4633 err = l2cap_ertm_init(chan);
4635 l2cap_send_disconn_req(chan, -err);
4637 l2cap_chan_ready(chan);
4641 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4642 struct hci_chan *hchan)
4644 chan->hs_hcon = hchan->conn;
4645 chan->hs_hcon->l2cap_data = chan->conn;
4647 BT_DBG("move_state %d", chan->move_state);
4649 switch (chan->move_state) {
4650 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4651 /* Move confirm will be sent after a success
4652 * response is received
4654 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4656 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4657 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4658 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4659 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4660 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4661 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4662 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4663 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4664 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4668 /* Move was not in expected state, free the channel */
4669 __release_logical_link(chan);
4671 chan->move_state = L2CAP_MOVE_STABLE;
4675 /* Call with chan locked */
4676 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4679 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4682 l2cap_logical_fail(chan);
4683 __release_logical_link(chan);
4687 if (chan->state != BT_CONNECTED) {
4688 /* Ignore logical link if channel is on BR/EDR */
4689 if (chan->local_amp_id != AMP_ID_BREDR)
4690 l2cap_logical_finish_create(chan, hchan);
4692 l2cap_logical_finish_move(chan, hchan);
4696 void l2cap_move_start(struct l2cap_chan *chan)
4698 BT_DBG("chan %p", chan);
4700 if (chan->local_amp_id == AMP_ID_BREDR) {
4701 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4703 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4704 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4705 /* Placeholder - start physical link setup */
4707 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4708 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4710 l2cap_move_setup(chan);
4711 l2cap_send_move_chan_req(chan, 0);
4715 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4716 u8 local_amp_id, u8 remote_amp_id)
4718 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4719 local_amp_id, remote_amp_id);
4721 chan->fcs = L2CAP_FCS_NONE;
4723 /* Outgoing channel on AMP */
4724 if (chan->state == BT_CONNECT) {
4725 if (result == L2CAP_CR_SUCCESS) {
4726 chan->local_amp_id = local_amp_id;
4727 l2cap_send_create_chan_req(chan, remote_amp_id);
4729 /* Revert to BR/EDR connect */
4730 l2cap_send_conn_req(chan);
4736 /* Incoming channel on AMP */
4737 if (__l2cap_no_conn_pending(chan)) {
4738 struct l2cap_conn_rsp rsp;
4740 rsp.scid = cpu_to_le16(chan->dcid);
4741 rsp.dcid = cpu_to_le16(chan->scid);
4743 if (result == L2CAP_CR_SUCCESS) {
4744 /* Send successful response */
4745 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4746 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4748 /* Send negative response */
4749 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4750 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4753 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4756 if (result == L2CAP_CR_SUCCESS) {
4757 l2cap_state_change(chan, BT_CONFIG);
4758 set_bit(CONF_REQ_SENT, &chan->conf_state);
4759 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4761 l2cap_build_conf_req(chan, buf), buf);
4762 chan->num_conf_req++;
4767 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4770 l2cap_move_setup(chan);
4771 chan->move_id = local_amp_id;
4772 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4774 l2cap_send_move_chan_req(chan, remote_amp_id);
4777 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4779 struct hci_chan *hchan = NULL;
4781 /* Placeholder - get hci_chan for logical link */
4784 if (hchan->state == BT_CONNECTED) {
4785 /* Logical link is ready to go */
4786 chan->hs_hcon = hchan->conn;
4787 chan->hs_hcon->l2cap_data = chan->conn;
4788 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4789 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4791 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4793 /* Wait for logical link to be ready */
4794 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4797 /* Logical link not available */
4798 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4802 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4804 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4806 if (result == -EINVAL)
4807 rsp_result = L2CAP_MR_BAD_ID;
4809 rsp_result = L2CAP_MR_NOT_ALLOWED;
4811 l2cap_send_move_chan_rsp(chan, rsp_result);
4814 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4815 chan->move_state = L2CAP_MOVE_STABLE;
4817 /* Restart data transmission */
4818 l2cap_ertm_send(chan);
4821 /* Invoke with locked chan */
4822 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4824 u8 local_amp_id = chan->local_amp_id;
4825 u8 remote_amp_id = chan->remote_amp_id;
4827 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4828 chan, result, local_amp_id, remote_amp_id);
4830 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4831 l2cap_chan_unlock(chan);
4835 if (chan->state != BT_CONNECTED) {
4836 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4837 } else if (result != L2CAP_MR_SUCCESS) {
4838 l2cap_do_move_cancel(chan, result);
4840 switch (chan->move_role) {
4841 case L2CAP_MOVE_ROLE_INITIATOR:
4842 l2cap_do_move_initiate(chan, local_amp_id,
4845 case L2CAP_MOVE_ROLE_RESPONDER:
4846 l2cap_do_move_respond(chan, result);
4849 l2cap_do_move_cancel(chan, result);
4855 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4856 struct l2cap_cmd_hdr *cmd,
4857 u16 cmd_len, void *data)
4859 struct l2cap_move_chan_req *req = data;
4860 struct l2cap_move_chan_rsp rsp;
4861 struct l2cap_chan *chan;
4863 u16 result = L2CAP_MR_NOT_ALLOWED;
4865 if (cmd_len != sizeof(*req))
4868 icid = le16_to_cpu(req->icid);
4870 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4872 if (!conn->hs_enabled)
4875 chan = l2cap_get_chan_by_dcid(conn, icid);
4877 rsp.icid = cpu_to_le16(icid);
4878 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4879 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4884 chan->ident = cmd->ident;
4886 if (chan->scid < L2CAP_CID_DYN_START ||
4887 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4888 (chan->mode != L2CAP_MODE_ERTM &&
4889 chan->mode != L2CAP_MODE_STREAMING)) {
4890 result = L2CAP_MR_NOT_ALLOWED;
4891 goto send_move_response;
4894 if (chan->local_amp_id == req->dest_amp_id) {
4895 result = L2CAP_MR_SAME_ID;
4896 goto send_move_response;
4899 if (req->dest_amp_id != AMP_ID_BREDR) {
4900 struct hci_dev *hdev;
4901 hdev = hci_dev_get(req->dest_amp_id);
4902 if (!hdev || hdev->dev_type != HCI_AMP ||
4903 !test_bit(HCI_UP, &hdev->flags)) {
4907 result = L2CAP_MR_BAD_ID;
4908 goto send_move_response;
4913 /* Detect a move collision. Only send a collision response
4914 * if this side has "lost", otherwise proceed with the move.
4915 * The winner has the larger bd_addr.
4917 if ((__chan_is_moving(chan) ||
4918 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4919 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4920 result = L2CAP_MR_COLLISION;
4921 goto send_move_response;
4924 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4925 l2cap_move_setup(chan);
4926 chan->move_id = req->dest_amp_id;
4929 if (req->dest_amp_id == AMP_ID_BREDR) {
4930 /* Moving to BR/EDR */
4931 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4932 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4933 result = L2CAP_MR_PEND;
4935 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4936 result = L2CAP_MR_SUCCESS;
4939 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4940 /* Placeholder - uncomment when amp functions are available */
4941 /*amp_accept_physical(chan, req->dest_amp_id);*/
4942 result = L2CAP_MR_PEND;
4946 l2cap_send_move_chan_rsp(chan, result);
4948 l2cap_chan_unlock(chan);
4953 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4955 struct l2cap_chan *chan;
4956 struct hci_chan *hchan = NULL;
4958 chan = l2cap_get_chan_by_scid(conn, icid);
4960 l2cap_send_move_chan_cfm_icid(conn, icid);
4964 __clear_chan_timer(chan);
4965 if (result == L2CAP_MR_PEND)
4966 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4968 switch (chan->move_state) {
4969 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4970 /* Move confirm will be sent when logical link
4973 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4975 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4976 if (result == L2CAP_MR_PEND) {
4978 } else if (test_bit(CONN_LOCAL_BUSY,
4979 &chan->conn_state)) {
4980 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4982 /* Logical link is up or moving to BR/EDR,
4985 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4986 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4989 case L2CAP_MOVE_WAIT_RSP:
4991 if (result == L2CAP_MR_SUCCESS) {
4992 /* Remote is ready, send confirm immediately
4993 * after logical link is ready
4995 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4997 /* Both logical link and move success
4998 * are required to confirm
5000 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5003 /* Placeholder - get hci_chan for logical link */
5005 /* Logical link not available */
5006 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5010 /* If the logical link is not yet connected, do not
5011 * send confirmation.
5013 if (hchan->state != BT_CONNECTED)
5016 /* Logical link is already ready to go */
5018 chan->hs_hcon = hchan->conn;
5019 chan->hs_hcon->l2cap_data = chan->conn;
5021 if (result == L2CAP_MR_SUCCESS) {
5022 /* Can confirm now */
5023 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5025 /* Now only need move success
5028 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5031 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5034 /* Any other amp move state means the move failed. */
5035 chan->move_id = chan->local_amp_id;
5036 l2cap_move_done(chan);
5037 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5040 l2cap_chan_unlock(chan);
5043 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5046 struct l2cap_chan *chan;
5048 chan = l2cap_get_chan_by_ident(conn, ident);
5050 /* Could not locate channel, icid is best guess */
5051 l2cap_send_move_chan_cfm_icid(conn, icid);
5055 __clear_chan_timer(chan);
5057 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5058 if (result == L2CAP_MR_COLLISION) {
5059 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5061 /* Cleanup - cancel move */
5062 chan->move_id = chan->local_amp_id;
5063 l2cap_move_done(chan);
5067 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5069 l2cap_chan_unlock(chan);
5072 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5073 struct l2cap_cmd_hdr *cmd,
5074 u16 cmd_len, void *data)
5076 struct l2cap_move_chan_rsp *rsp = data;
5079 if (cmd_len != sizeof(*rsp))
5082 icid = le16_to_cpu(rsp->icid);
5083 result = le16_to_cpu(rsp->result);
5085 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5087 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5088 l2cap_move_continue(conn, icid, result);
5090 l2cap_move_fail(conn, cmd->ident, icid, result);
5095 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5096 struct l2cap_cmd_hdr *cmd,
5097 u16 cmd_len, void *data)
5099 struct l2cap_move_chan_cfm *cfm = data;
5100 struct l2cap_chan *chan;
5103 if (cmd_len != sizeof(*cfm))
5106 icid = le16_to_cpu(cfm->icid);
5107 result = le16_to_cpu(cfm->result);
5109 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5111 chan = l2cap_get_chan_by_dcid(conn, icid);
5113 /* Spec requires a response even if the icid was not found */
5114 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5118 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5119 if (result == L2CAP_MC_CONFIRMED) {
5120 chan->local_amp_id = chan->move_id;
5121 if (chan->local_amp_id == AMP_ID_BREDR)
5122 __release_logical_link(chan);
5124 chan->move_id = chan->local_amp_id;
5127 l2cap_move_done(chan);
5130 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5132 l2cap_chan_unlock(chan);
5137 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5138 struct l2cap_cmd_hdr *cmd,
5139 u16 cmd_len, void *data)
5141 struct l2cap_move_chan_cfm_rsp *rsp = data;
5142 struct l2cap_chan *chan;
5145 if (cmd_len != sizeof(*rsp))
5148 icid = le16_to_cpu(rsp->icid);
5150 BT_DBG("icid 0x%4.4x", icid);
5152 chan = l2cap_get_chan_by_scid(conn, icid);
5156 __clear_chan_timer(chan);
5158 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5159 chan->local_amp_id = chan->move_id;
5161 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5162 __release_logical_link(chan);
5164 l2cap_move_done(chan);
5167 l2cap_chan_unlock(chan);
5172 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5177 if (min > max || min < 6 || max > 3200)
5180 if (to_multiplier < 10 || to_multiplier > 3200)
5183 if (max >= to_multiplier * 8)
5186 max_latency = (to_multiplier * 8 / max) - 1;
5187 if (latency > 499 || latency > max_latency)
5193 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5194 struct l2cap_cmd_hdr *cmd,
5195 u16 cmd_len, u8 *data)
5197 struct hci_conn *hcon = conn->hcon;
5198 struct l2cap_conn_param_update_req *req;
5199 struct l2cap_conn_param_update_rsp rsp;
5200 u16 min, max, latency, to_multiplier;
5203 if (!(hcon->link_mode & HCI_LM_MASTER))
5206 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5209 req = (struct l2cap_conn_param_update_req *) data;
5210 min = __le16_to_cpu(req->min);
5211 max = __le16_to_cpu(req->max);
5212 latency = __le16_to_cpu(req->latency);
5213 to_multiplier = __le16_to_cpu(req->to_multiplier);
5215 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5216 min, max, latency, to_multiplier);
5218 memset(&rsp, 0, sizeof(rsp));
5220 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5222 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5224 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5226 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5230 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5235 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5236 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5239 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5240 u16 dcid, mtu, mps, credits, result;
5241 struct l2cap_chan *chan;
5244 if (cmd_len < sizeof(*rsp))
5247 dcid = __le16_to_cpu(rsp->dcid);
5248 mtu = __le16_to_cpu(rsp->mtu);
5249 mps = __le16_to_cpu(rsp->mps);
5250 credits = __le16_to_cpu(rsp->credits);
5251 result = __le16_to_cpu(rsp->result);
5253 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5256 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5257 dcid, mtu, mps, credits, result);
5259 mutex_lock(&conn->chan_lock);
5261 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5269 l2cap_chan_lock(chan);
5272 case L2CAP_CR_SUCCESS:
5276 chan->remote_mps = mps;
5277 chan->tx_credits = credits;
5278 l2cap_chan_ready(chan);
5282 l2cap_chan_del(chan, ECONNREFUSED);
5286 l2cap_chan_unlock(chan);
5289 mutex_unlock(&conn->chan_lock);
5294 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5295 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5300 switch (cmd->code) {
5301 case L2CAP_COMMAND_REJ:
5302 l2cap_command_rej(conn, cmd, cmd_len, data);
5305 case L2CAP_CONN_REQ:
5306 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5309 case L2CAP_CONN_RSP:
5310 case L2CAP_CREATE_CHAN_RSP:
5311 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5314 case L2CAP_CONF_REQ:
5315 err = l2cap_config_req(conn, cmd, cmd_len, data);
5318 case L2CAP_CONF_RSP:
5319 l2cap_config_rsp(conn, cmd, cmd_len, data);
5322 case L2CAP_DISCONN_REQ:
5323 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5326 case L2CAP_DISCONN_RSP:
5327 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5330 case L2CAP_ECHO_REQ:
5331 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5334 case L2CAP_ECHO_RSP:
5337 case L2CAP_INFO_REQ:
5338 err = l2cap_information_req(conn, cmd, cmd_len, data);
5341 case L2CAP_INFO_RSP:
5342 l2cap_information_rsp(conn, cmd, cmd_len, data);
5345 case L2CAP_CREATE_CHAN_REQ:
5346 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5349 case L2CAP_MOVE_CHAN_REQ:
5350 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5353 case L2CAP_MOVE_CHAN_RSP:
5354 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5357 case L2CAP_MOVE_CHAN_CFM:
5358 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5361 case L2CAP_MOVE_CHAN_CFM_RSP:
5362 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5366 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5374 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5375 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5378 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5379 struct l2cap_le_conn_rsp rsp;
5380 struct l2cap_chan *chan, *pchan;
5381 u16 dcid, scid, credits, mtu, mps;
5385 if (cmd_len != sizeof(*req))
5388 scid = __le16_to_cpu(req->scid);
5389 mtu = __le16_to_cpu(req->mtu);
5390 mps = __le16_to_cpu(req->mps);
5395 if (mtu < 23 || mps < 23)
5398 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5401 /* Check if we have socket listening on psm */
5402 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5403 &conn->hcon->dst, LE_LINK);
5405 result = L2CAP_CR_BAD_PSM;
5410 mutex_lock(&conn->chan_lock);
5411 l2cap_chan_lock(pchan);
5413 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5414 result = L2CAP_CR_AUTHENTICATION;
5416 goto response_unlock;
5419 /* Check if we already have channel with that dcid */
5420 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5421 result = L2CAP_CR_NO_MEM;
5423 goto response_unlock;
5426 chan = pchan->ops->new_connection(pchan);
5428 result = L2CAP_CR_NO_MEM;
5429 goto response_unlock;
5432 l2cap_le_flowctl_init(chan);
5434 bacpy(&chan->src, &conn->hcon->src);
5435 bacpy(&chan->dst, &conn->hcon->dst);
5436 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5437 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5441 chan->remote_mps = mps;
5442 chan->tx_credits = __le16_to_cpu(req->credits);
5444 __l2cap_chan_add(conn, chan);
5446 credits = chan->rx_credits;
5448 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5450 chan->ident = cmd->ident;
5452 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5453 l2cap_state_change(chan, BT_CONNECT2);
5454 result = L2CAP_CR_PEND;
5455 chan->ops->defer(chan);
5457 l2cap_chan_ready(chan);
5458 result = L2CAP_CR_SUCCESS;
5462 l2cap_chan_unlock(pchan);
5463 mutex_unlock(&conn->chan_lock);
5465 if (result == L2CAP_CR_PEND)
5470 rsp.mtu = cpu_to_le16(chan->imtu);
5471 rsp.mps = cpu_to_le16(chan->mps);
5477 rsp.dcid = cpu_to_le16(dcid);
5478 rsp.credits = cpu_to_le16(credits);
5479 rsp.result = cpu_to_le16(result);
5481 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5486 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5487 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5490 struct l2cap_le_credits *pkt;
5491 struct l2cap_chan *chan;
5492 u16 cid, credits, max_credits;
5494 if (cmd_len != sizeof(*pkt))
5497 pkt = (struct l2cap_le_credits *) data;
5498 cid = __le16_to_cpu(pkt->cid);
5499 credits = __le16_to_cpu(pkt->credits);
5501 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5503 chan = l2cap_get_chan_by_dcid(conn, cid);
5507 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5508 if (credits > max_credits) {
5509 BT_ERR("LE credits overflow");
5510 l2cap_send_disconn_req(chan, ECONNRESET);
5512 /* Return 0 so that we don't trigger an unnecessary
5513 * command reject packet.
5518 chan->tx_credits += credits;
5520 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5521 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5525 if (chan->tx_credits)
5526 chan->ops->resume(chan);
5528 l2cap_chan_unlock(chan);
5533 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5534 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5537 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5538 struct l2cap_chan *chan;
5540 if (cmd_len < sizeof(*rej))
5543 mutex_lock(&conn->chan_lock);
5545 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5549 l2cap_chan_lock(chan);
5550 l2cap_chan_del(chan, ECONNREFUSED);
5551 l2cap_chan_unlock(chan);
5554 mutex_unlock(&conn->chan_lock);
5558 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5559 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5564 switch (cmd->code) {
5565 case L2CAP_COMMAND_REJ:
5566 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5569 case L2CAP_CONN_PARAM_UPDATE_REQ:
5570 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5573 case L2CAP_CONN_PARAM_UPDATE_RSP:
5576 case L2CAP_LE_CONN_RSP:
5577 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5580 case L2CAP_LE_CONN_REQ:
5581 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5584 case L2CAP_LE_CREDITS:
5585 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5588 case L2CAP_DISCONN_REQ:
5589 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5592 case L2CAP_DISCONN_RSP:
5593 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5597 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5605 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5606 struct sk_buff *skb)
5608 struct hci_conn *hcon = conn->hcon;
5609 struct l2cap_cmd_hdr *cmd;
5613 if (hcon->type != LE_LINK)
5616 if (skb->len < L2CAP_CMD_HDR_SIZE)
5619 cmd = (void *) skb->data;
5620 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5622 len = le16_to_cpu(cmd->len);
5624 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5626 if (len != skb->len || !cmd->ident) {
5627 BT_DBG("corrupted command");
5631 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5633 struct l2cap_cmd_rej_unk rej;
5635 BT_ERR("Wrong link type (%d)", err);
5637 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5638 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5646 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5647 struct sk_buff *skb)
5649 struct hci_conn *hcon = conn->hcon;
5650 u8 *data = skb->data;
5652 struct l2cap_cmd_hdr cmd;
5655 l2cap_raw_recv(conn, skb);
5657 if (hcon->type != ACL_LINK)
5660 while (len >= L2CAP_CMD_HDR_SIZE) {
5662 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5663 data += L2CAP_CMD_HDR_SIZE;
5664 len -= L2CAP_CMD_HDR_SIZE;
5666 cmd_len = le16_to_cpu(cmd.len);
5668 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5671 if (cmd_len > len || !cmd.ident) {
5672 BT_DBG("corrupted command");
5676 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5678 struct l2cap_cmd_rej_unk rej;
5680 BT_ERR("Wrong link type (%d)", err);
5682 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5683 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5695 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5697 u16 our_fcs, rcv_fcs;
5700 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5701 hdr_size = L2CAP_EXT_HDR_SIZE;
5703 hdr_size = L2CAP_ENH_HDR_SIZE;
5705 if (chan->fcs == L2CAP_FCS_CRC16) {
5706 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5707 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5708 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5710 if (our_fcs != rcv_fcs)
5716 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5718 struct l2cap_ctrl control;
5720 BT_DBG("chan %p", chan);
5722 memset(&control, 0, sizeof(control));
5725 control.reqseq = chan->buffer_seq;
5726 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5728 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5729 control.super = L2CAP_SUPER_RNR;
5730 l2cap_send_sframe(chan, &control);
5733 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5734 chan->unacked_frames > 0)
5735 __set_retrans_timer(chan);
5737 /* Send pending iframes */
5738 l2cap_ertm_send(chan);
5740 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5741 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5742 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5745 control.super = L2CAP_SUPER_RR;
5746 l2cap_send_sframe(chan, &control);
5750 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5751 struct sk_buff **last_frag)
5753 /* skb->len reflects data in skb as well as all fragments
5754 * skb->data_len reflects only data in fragments
5756 if (!skb_has_frag_list(skb))
5757 skb_shinfo(skb)->frag_list = new_frag;
5759 new_frag->next = NULL;
5761 (*last_frag)->next = new_frag;
5762 *last_frag = new_frag;
5764 skb->len += new_frag->len;
5765 skb->data_len += new_frag->len;
5766 skb->truesize += new_frag->truesize;
5769 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5770 struct l2cap_ctrl *control)
5774 switch (control->sar) {
5775 case L2CAP_SAR_UNSEGMENTED:
5779 err = chan->ops->recv(chan, skb);
5782 case L2CAP_SAR_START:
5786 chan->sdu_len = get_unaligned_le16(skb->data);
5787 skb_pull(skb, L2CAP_SDULEN_SIZE);
5789 if (chan->sdu_len > chan->imtu) {
5794 if (skb->len >= chan->sdu_len)
5798 chan->sdu_last_frag = skb;
5804 case L2CAP_SAR_CONTINUE:
5808 append_skb_frag(chan->sdu, skb,
5809 &chan->sdu_last_frag);
5812 if (chan->sdu->len >= chan->sdu_len)
5822 append_skb_frag(chan->sdu, skb,
5823 &chan->sdu_last_frag);
5826 if (chan->sdu->len != chan->sdu_len)
5829 err = chan->ops->recv(chan, chan->sdu);
5832 /* Reassembly complete */
5834 chan->sdu_last_frag = NULL;
5842 kfree_skb(chan->sdu);
5844 chan->sdu_last_frag = NULL;
5851 static int l2cap_resegment(struct l2cap_chan *chan)
5857 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5861 if (chan->mode != L2CAP_MODE_ERTM)
5864 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5865 l2cap_tx(chan, NULL, NULL, event);
5868 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5871 /* Pass sequential frames to l2cap_reassemble_sdu()
5872 * until a gap is encountered.
5875 BT_DBG("chan %p", chan);
5877 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5878 struct sk_buff *skb;
5879 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5880 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5882 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5887 skb_unlink(skb, &chan->srej_q);
5888 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5889 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5894 if (skb_queue_empty(&chan->srej_q)) {
5895 chan->rx_state = L2CAP_RX_STATE_RECV;
5896 l2cap_send_ack(chan);
5902 static void l2cap_handle_srej(struct l2cap_chan *chan,
5903 struct l2cap_ctrl *control)
5905 struct sk_buff *skb;
5907 BT_DBG("chan %p, control %p", chan, control);
5909 if (control->reqseq == chan->next_tx_seq) {
5910 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5911 l2cap_send_disconn_req(chan, ECONNRESET);
5915 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5918 BT_DBG("Seq %d not available for retransmission",
5923 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5924 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5925 l2cap_send_disconn_req(chan, ECONNRESET);
5929 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5931 if (control->poll) {
5932 l2cap_pass_to_tx(chan, control);
5934 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5935 l2cap_retransmit(chan, control);
5936 l2cap_ertm_send(chan);
5938 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5939 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5940 chan->srej_save_reqseq = control->reqseq;
5943 l2cap_pass_to_tx_fbit(chan, control);
5945 if (control->final) {
5946 if (chan->srej_save_reqseq != control->reqseq ||
5947 !test_and_clear_bit(CONN_SREJ_ACT,
5949 l2cap_retransmit(chan, control);
5951 l2cap_retransmit(chan, control);
5952 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5953 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5954 chan->srej_save_reqseq = control->reqseq;
5960 static void l2cap_handle_rej(struct l2cap_chan *chan,
5961 struct l2cap_ctrl *control)
5963 struct sk_buff *skb;
5965 BT_DBG("chan %p, control %p", chan, control);
5967 if (control->reqseq == chan->next_tx_seq) {
5968 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5969 l2cap_send_disconn_req(chan, ECONNRESET);
5973 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5975 if (chan->max_tx && skb &&
5976 bt_cb(skb)->control.retries >= chan->max_tx) {
5977 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5978 l2cap_send_disconn_req(chan, ECONNRESET);
5982 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5984 l2cap_pass_to_tx(chan, control);
5986 if (control->final) {
5987 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5988 l2cap_retransmit_all(chan, control);
5990 l2cap_retransmit_all(chan, control);
5991 l2cap_ertm_send(chan);
5992 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5993 set_bit(CONN_REJ_ACT, &chan->conn_state);
5997 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5999 BT_DBG("chan %p, txseq %d", chan, txseq);
6001 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6002 chan->expected_tx_seq);
6004 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6005 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6007 /* See notes below regarding "double poll" and
6010 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6011 BT_DBG("Invalid/Ignore - after SREJ");
6012 return L2CAP_TXSEQ_INVALID_IGNORE;
6014 BT_DBG("Invalid - in window after SREJ sent");
6015 return L2CAP_TXSEQ_INVALID;
6019 if (chan->srej_list.head == txseq) {
6020 BT_DBG("Expected SREJ");
6021 return L2CAP_TXSEQ_EXPECTED_SREJ;
6024 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6025 BT_DBG("Duplicate SREJ - txseq already stored");
6026 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6029 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6030 BT_DBG("Unexpected SREJ - not requested");
6031 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6035 if (chan->expected_tx_seq == txseq) {
6036 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6038 BT_DBG("Invalid - txseq outside tx window");
6039 return L2CAP_TXSEQ_INVALID;
6042 return L2CAP_TXSEQ_EXPECTED;
6046 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6047 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6048 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6049 return L2CAP_TXSEQ_DUPLICATE;
6052 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6053 /* A source of invalid packets is a "double poll" condition,
6054 * where delays cause us to send multiple poll packets. If
6055 * the remote stack receives and processes both polls,
6056 * sequence numbers can wrap around in such a way that a
6057 * resent frame has a sequence number that looks like new data
6058 * with a sequence gap. This would trigger an erroneous SREJ
6061 * Fortunately, this is impossible with a tx window that's
6062 * less than half of the maximum sequence number, which allows
6063 * invalid frames to be safely ignored.
6065 * With tx window sizes greater than half of the tx window
6066 * maximum, the frame is invalid and cannot be ignored. This
6067 * causes a disconnect.
6070 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6071 BT_DBG("Invalid/Ignore - txseq outside tx window");
6072 return L2CAP_TXSEQ_INVALID_IGNORE;
6074 BT_DBG("Invalid - txseq outside tx window");
6075 return L2CAP_TXSEQ_INVALID;
6078 BT_DBG("Unexpected - txseq indicates missing frames");
6079 return L2CAP_TXSEQ_UNEXPECTED;
6083 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6084 struct l2cap_ctrl *control,
6085 struct sk_buff *skb, u8 event)
6088 bool skb_in_use = false;
6090 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6094 case L2CAP_EV_RECV_IFRAME:
6095 switch (l2cap_classify_txseq(chan, control->txseq)) {
6096 case L2CAP_TXSEQ_EXPECTED:
6097 l2cap_pass_to_tx(chan, control);
6099 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6100 BT_DBG("Busy, discarding expected seq %d",
6105 chan->expected_tx_seq = __next_seq(chan,
6108 chan->buffer_seq = chan->expected_tx_seq;
6111 err = l2cap_reassemble_sdu(chan, skb, control);
6115 if (control->final) {
6116 if (!test_and_clear_bit(CONN_REJ_ACT,
6117 &chan->conn_state)) {
6119 l2cap_retransmit_all(chan, control);
6120 l2cap_ertm_send(chan);
6124 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6125 l2cap_send_ack(chan);
6127 case L2CAP_TXSEQ_UNEXPECTED:
6128 l2cap_pass_to_tx(chan, control);
6130 /* Can't issue SREJ frames in the local busy state.
6131 * Drop this frame, it will be seen as missing
6132 * when local busy is exited.
6134 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6135 BT_DBG("Busy, discarding unexpected seq %d",
6140 /* There was a gap in the sequence, so an SREJ
6141 * must be sent for each missing frame. The
6142 * current frame is stored for later use.
6144 skb_queue_tail(&chan->srej_q, skb);
6146 BT_DBG("Queued %p (queue len %d)", skb,
6147 skb_queue_len(&chan->srej_q));
6149 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6150 l2cap_seq_list_clear(&chan->srej_list);
6151 l2cap_send_srej(chan, control->txseq);
6153 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6155 case L2CAP_TXSEQ_DUPLICATE:
6156 l2cap_pass_to_tx(chan, control);
6158 case L2CAP_TXSEQ_INVALID_IGNORE:
6160 case L2CAP_TXSEQ_INVALID:
6162 l2cap_send_disconn_req(chan, ECONNRESET);
6166 case L2CAP_EV_RECV_RR:
6167 l2cap_pass_to_tx(chan, control);
6168 if (control->final) {
6169 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6171 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6172 !__chan_is_moving(chan)) {
6174 l2cap_retransmit_all(chan, control);
6177 l2cap_ertm_send(chan);
6178 } else if (control->poll) {
6179 l2cap_send_i_or_rr_or_rnr(chan);
6181 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6182 &chan->conn_state) &&
6183 chan->unacked_frames)
6184 __set_retrans_timer(chan);
6186 l2cap_ertm_send(chan);
6189 case L2CAP_EV_RECV_RNR:
6190 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6191 l2cap_pass_to_tx(chan, control);
6192 if (control && control->poll) {
6193 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6194 l2cap_send_rr_or_rnr(chan, 0);
6196 __clear_retrans_timer(chan);
6197 l2cap_seq_list_clear(&chan->retrans_list);
6199 case L2CAP_EV_RECV_REJ:
6200 l2cap_handle_rej(chan, control);
6202 case L2CAP_EV_RECV_SREJ:
6203 l2cap_handle_srej(chan, control);
6209 if (skb && !skb_in_use) {
6210 BT_DBG("Freeing %p", skb);
6217 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6218 struct l2cap_ctrl *control,
6219 struct sk_buff *skb, u8 event)
6222 u16 txseq = control->txseq;
6223 bool skb_in_use = false;
6225 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6229 case L2CAP_EV_RECV_IFRAME:
6230 switch (l2cap_classify_txseq(chan, txseq)) {
6231 case L2CAP_TXSEQ_EXPECTED:
6232 /* Keep frame for reassembly later */
6233 l2cap_pass_to_tx(chan, control);
6234 skb_queue_tail(&chan->srej_q, skb);
6236 BT_DBG("Queued %p (queue len %d)", skb,
6237 skb_queue_len(&chan->srej_q));
6239 chan->expected_tx_seq = __next_seq(chan, txseq);
6241 case L2CAP_TXSEQ_EXPECTED_SREJ:
6242 l2cap_seq_list_pop(&chan->srej_list);
6244 l2cap_pass_to_tx(chan, control);
6245 skb_queue_tail(&chan->srej_q, skb);
6247 BT_DBG("Queued %p (queue len %d)", skb,
6248 skb_queue_len(&chan->srej_q));
6250 err = l2cap_rx_queued_iframes(chan);
6255 case L2CAP_TXSEQ_UNEXPECTED:
6256 /* Got a frame that can't be reassembled yet.
6257 * Save it for later, and send SREJs to cover
6258 * the missing frames.
6260 skb_queue_tail(&chan->srej_q, skb);
6262 BT_DBG("Queued %p (queue len %d)", skb,
6263 skb_queue_len(&chan->srej_q));
6265 l2cap_pass_to_tx(chan, control);
6266 l2cap_send_srej(chan, control->txseq);
6268 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6269 /* This frame was requested with an SREJ, but
6270 * some expected retransmitted frames are
6271 * missing. Request retransmission of missing
6274 skb_queue_tail(&chan->srej_q, skb);
6276 BT_DBG("Queued %p (queue len %d)", skb,
6277 skb_queue_len(&chan->srej_q));
6279 l2cap_pass_to_tx(chan, control);
6280 l2cap_send_srej_list(chan, control->txseq);
6282 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6283 /* We've already queued this frame. Drop this copy. */
6284 l2cap_pass_to_tx(chan, control);
6286 case L2CAP_TXSEQ_DUPLICATE:
6287 /* Expecting a later sequence number, so this frame
6288 * was already received. Ignore it completely.
6291 case L2CAP_TXSEQ_INVALID_IGNORE:
6293 case L2CAP_TXSEQ_INVALID:
6295 l2cap_send_disconn_req(chan, ECONNRESET);
6299 case L2CAP_EV_RECV_RR:
6300 l2cap_pass_to_tx(chan, control);
6301 if (control->final) {
6302 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6304 if (!test_and_clear_bit(CONN_REJ_ACT,
6305 &chan->conn_state)) {
6307 l2cap_retransmit_all(chan, control);
6310 l2cap_ertm_send(chan);
6311 } else if (control->poll) {
6312 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6313 &chan->conn_state) &&
6314 chan->unacked_frames) {
6315 __set_retrans_timer(chan);
6318 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6319 l2cap_send_srej_tail(chan);
6321 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6322 &chan->conn_state) &&
6323 chan->unacked_frames)
6324 __set_retrans_timer(chan);
6326 l2cap_send_ack(chan);
6329 case L2CAP_EV_RECV_RNR:
6330 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6331 l2cap_pass_to_tx(chan, control);
6332 if (control->poll) {
6333 l2cap_send_srej_tail(chan);
6335 struct l2cap_ctrl rr_control;
6336 memset(&rr_control, 0, sizeof(rr_control));
6337 rr_control.sframe = 1;
6338 rr_control.super = L2CAP_SUPER_RR;
6339 rr_control.reqseq = chan->buffer_seq;
6340 l2cap_send_sframe(chan, &rr_control);
6344 case L2CAP_EV_RECV_REJ:
6345 l2cap_handle_rej(chan, control);
6347 case L2CAP_EV_RECV_SREJ:
6348 l2cap_handle_srej(chan, control);
6352 if (skb && !skb_in_use) {
6353 BT_DBG("Freeing %p", skb);
6360 static int l2cap_finish_move(struct l2cap_chan *chan)
6362 BT_DBG("chan %p", chan);
6364 chan->rx_state = L2CAP_RX_STATE_RECV;
6367 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6369 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6371 return l2cap_resegment(chan);
6374 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6375 struct l2cap_ctrl *control,
6376 struct sk_buff *skb, u8 event)
6380 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6386 l2cap_process_reqseq(chan, control->reqseq);
6388 if (!skb_queue_empty(&chan->tx_q))
6389 chan->tx_send_head = skb_peek(&chan->tx_q);
6391 chan->tx_send_head = NULL;
6393 /* Rewind next_tx_seq to the point expected
6396 chan->next_tx_seq = control->reqseq;
6397 chan->unacked_frames = 0;
6399 err = l2cap_finish_move(chan);
6403 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6404 l2cap_send_i_or_rr_or_rnr(chan);
6406 if (event == L2CAP_EV_RECV_IFRAME)
6409 return l2cap_rx_state_recv(chan, control, NULL, event);
6412 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6413 struct l2cap_ctrl *control,
6414 struct sk_buff *skb, u8 event)
6418 if (!control->final)
6421 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6423 chan->rx_state = L2CAP_RX_STATE_RECV;
6424 l2cap_process_reqseq(chan, control->reqseq);
6426 if (!skb_queue_empty(&chan->tx_q))
6427 chan->tx_send_head = skb_peek(&chan->tx_q);
6429 chan->tx_send_head = NULL;
6431 /* Rewind next_tx_seq to the point expected
6434 chan->next_tx_seq = control->reqseq;
6435 chan->unacked_frames = 0;
6438 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6440 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6442 err = l2cap_resegment(chan);
6445 err = l2cap_rx_state_recv(chan, control, skb, event);
6450 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6452 /* Make sure reqseq is for a packet that has been sent but not acked */
6455 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6456 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6459 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6460 struct sk_buff *skb, u8 event)
6464 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6465 control, skb, event, chan->rx_state);
6467 if (__valid_reqseq(chan, control->reqseq)) {
6468 switch (chan->rx_state) {
6469 case L2CAP_RX_STATE_RECV:
6470 err = l2cap_rx_state_recv(chan, control, skb, event);
6472 case L2CAP_RX_STATE_SREJ_SENT:
6473 err = l2cap_rx_state_srej_sent(chan, control, skb,
6476 case L2CAP_RX_STATE_WAIT_P:
6477 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6479 case L2CAP_RX_STATE_WAIT_F:
6480 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6487 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6488 control->reqseq, chan->next_tx_seq,
6489 chan->expected_ack_seq);
6490 l2cap_send_disconn_req(chan, ECONNRESET);
6496 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6497 struct sk_buff *skb)
6501 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6504 if (l2cap_classify_txseq(chan, control->txseq) ==
6505 L2CAP_TXSEQ_EXPECTED) {
6506 l2cap_pass_to_tx(chan, control);
6508 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6509 __next_seq(chan, chan->buffer_seq));
6511 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6513 l2cap_reassemble_sdu(chan, skb, control);
6516 kfree_skb(chan->sdu);
6519 chan->sdu_last_frag = NULL;
6523 BT_DBG("Freeing %p", skb);
6528 chan->last_acked_seq = control->txseq;
6529 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6534 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6536 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6540 __unpack_control(chan, skb);
6545 * We can just drop the corrupted I-frame here.
6546 * Receiver will miss it and start proper recovery
6547 * procedures and ask for retransmission.
6549 if (l2cap_check_fcs(chan, skb))
6552 if (!control->sframe && control->sar == L2CAP_SAR_START)
6553 len -= L2CAP_SDULEN_SIZE;
6555 if (chan->fcs == L2CAP_FCS_CRC16)
6556 len -= L2CAP_FCS_SIZE;
6558 if (len > chan->mps) {
6559 l2cap_send_disconn_req(chan, ECONNRESET);
6563 if (!control->sframe) {
6566 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6567 control->sar, control->reqseq, control->final,
6570 /* Validate F-bit - F=0 always valid, F=1 only
6571 * valid in TX WAIT_F
6573 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6576 if (chan->mode != L2CAP_MODE_STREAMING) {
6577 event = L2CAP_EV_RECV_IFRAME;
6578 err = l2cap_rx(chan, control, skb, event);
6580 err = l2cap_stream_rx(chan, control, skb);
6584 l2cap_send_disconn_req(chan, ECONNRESET);
6586 const u8 rx_func_to_event[4] = {
6587 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6588 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6591 /* Only I-frames are expected in streaming mode */
6592 if (chan->mode == L2CAP_MODE_STREAMING)
6595 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6596 control->reqseq, control->final, control->poll,
6600 BT_ERR("Trailing bytes: %d in sframe", len);
6601 l2cap_send_disconn_req(chan, ECONNRESET);
6605 /* Validate F and P bits */
6606 if (control->final && (control->poll ||
6607 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6610 event = rx_func_to_event[control->super];
6611 if (l2cap_rx(chan, control, skb, event))
6612 l2cap_send_disconn_req(chan, ECONNRESET);
6622 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6624 struct l2cap_conn *conn = chan->conn;
6625 struct l2cap_le_credits pkt;
6628 /* We return more credits to the sender only after the amount of
6629 * credits falls below half of the initial amount.
6631 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6634 return_credits = le_max_credits - chan->rx_credits;
6636 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6638 chan->rx_credits += return_credits;
6640 pkt.cid = cpu_to_le16(chan->scid);
6641 pkt.credits = cpu_to_le16(return_credits);
6643 chan->ident = l2cap_get_ident(conn);
6645 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6648 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6652 if (!chan->rx_credits) {
6653 BT_ERR("No credits to receive LE L2CAP data");
6654 l2cap_send_disconn_req(chan, ECONNRESET);
6658 if (chan->imtu < skb->len) {
6659 BT_ERR("Too big LE L2CAP PDU");
6664 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6666 l2cap_chan_le_send_credits(chan);
6673 sdu_len = get_unaligned_le16(skb->data);
6674 skb_pull(skb, L2CAP_SDULEN_SIZE);
6676 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6677 sdu_len, skb->len, chan->imtu);
6679 if (sdu_len > chan->imtu) {
6680 BT_ERR("Too big LE L2CAP SDU length received");
6685 if (skb->len > sdu_len) {
6686 BT_ERR("Too much LE L2CAP data received");
6691 if (skb->len == sdu_len)
6692 return chan->ops->recv(chan, skb);
6695 chan->sdu_len = sdu_len;
6696 chan->sdu_last_frag = skb;
6701 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6702 chan->sdu->len, skb->len, chan->sdu_len);
6704 if (chan->sdu->len + skb->len > chan->sdu_len) {
6705 BT_ERR("Too much LE L2CAP data received");
6710 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6713 if (chan->sdu->len == chan->sdu_len) {
6714 err = chan->ops->recv(chan, chan->sdu);
6717 chan->sdu_last_frag = NULL;
6725 kfree_skb(chan->sdu);
6727 chan->sdu_last_frag = NULL;
6731 /* We can't return an error here since we took care of the skb
6732 * freeing internally. An error return would cause the caller to
6733 * do a double-free of the skb.
6738 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6739 struct sk_buff *skb)
6741 struct l2cap_chan *chan;
6743 chan = l2cap_get_chan_by_scid(conn, cid);
6745 if (cid == L2CAP_CID_A2MP) {
6746 chan = a2mp_channel_create(conn, skb);
6752 l2cap_chan_lock(chan);
6754 BT_DBG("unknown cid 0x%4.4x", cid);
6755 /* Drop packet and return */
6761 BT_DBG("chan %p, len %d", chan, skb->len);
6763 if (chan->state != BT_CONNECTED)
6766 switch (chan->mode) {
6767 case L2CAP_MODE_LE_FLOWCTL:
6768 if (l2cap_le_data_rcv(chan, skb) < 0)
6773 case L2CAP_MODE_BASIC:
6774 /* If socket recv buffers overflows we drop data here
6775 * which is *bad* because L2CAP has to be reliable.
6776 * But we don't have any other choice. L2CAP doesn't
6777 * provide flow control mechanism. */
6779 if (chan->imtu < skb->len) {
6780 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6784 if (!chan->ops->recv(chan, skb))
6788 case L2CAP_MODE_ERTM:
6789 case L2CAP_MODE_STREAMING:
6790 l2cap_data_rcv(chan, skb);
6794 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6802 l2cap_chan_unlock(chan);
6805 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6806 struct sk_buff *skb)
6808 struct hci_conn *hcon = conn->hcon;
6809 struct l2cap_chan *chan;
6811 if (hcon->type != ACL_LINK)
6814 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6819 BT_DBG("chan %p, len %d", chan, skb->len);
6821 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6824 if (chan->imtu < skb->len)
6827 /* Store remote BD_ADDR and PSM for msg_name */
6828 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6829 bt_cb(skb)->psm = psm;
6831 if (!chan->ops->recv(chan, skb))
6838 static void l2cap_att_channel(struct l2cap_conn *conn,
6839 struct sk_buff *skb)
6841 struct hci_conn *hcon = conn->hcon;
6842 struct l2cap_chan *chan;
6844 if (hcon->type != LE_LINK)
6847 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6848 &hcon->src, &hcon->dst);
6852 BT_DBG("chan %p, len %d", chan, skb->len);
6854 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
6857 if (chan->imtu < skb->len)
6860 if (!chan->ops->recv(chan, skb))
6867 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6869 struct l2cap_hdr *lh = (void *) skb->data;
6870 struct hci_conn *hcon = conn->hcon;
6874 if (hcon->state != BT_CONNECTED) {
6875 BT_DBG("queueing pending rx skb");
6876 skb_queue_tail(&conn->pending_rx, skb);
6880 skb_pull(skb, L2CAP_HDR_SIZE);
6881 cid = __le16_to_cpu(lh->cid);
6882 len = __le16_to_cpu(lh->len);
6884 if (len != skb->len) {
6889 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6892 case L2CAP_CID_SIGNALING:
6893 l2cap_sig_channel(conn, skb);
6896 case L2CAP_CID_CONN_LESS:
6897 psm = get_unaligned((__le16 *) skb->data);
6898 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6899 l2cap_conless_channel(conn, psm, skb);
6903 l2cap_att_channel(conn, skb);
6906 case L2CAP_CID_LE_SIGNALING:
6907 l2cap_le_sig_channel(conn, skb);
6911 if (smp_sig_channel(conn, skb))
6912 l2cap_conn_del(conn->hcon, EACCES);
6915 case L2CAP_FC_6LOWPAN:
6916 bt_6lowpan_recv(conn, skb);
6920 l2cap_data_channel(conn, cid, skb);
6925 static void process_pending_rx(struct work_struct *work)
6927 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6929 struct sk_buff *skb;
6933 while ((skb = skb_dequeue(&conn->pending_rx)))
6934 l2cap_recv_frame(conn, skb);
6937 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6939 struct l2cap_conn *conn = hcon->l2cap_data;
6940 struct hci_chan *hchan;
6945 hchan = hci_chan_create(hcon);
6949 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
6951 hci_chan_del(hchan);
6955 kref_init(&conn->ref);
6956 hcon->l2cap_data = conn;
6958 hci_conn_get(conn->hcon);
6959 conn->hchan = hchan;
6961 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6963 switch (hcon->type) {
6965 if (hcon->hdev->le_mtu) {
6966 conn->mtu = hcon->hdev->le_mtu;
6971 conn->mtu = hcon->hdev->acl_mtu;
6975 conn->feat_mask = 0;
6977 if (hcon->type == ACL_LINK)
6978 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
6979 &hcon->hdev->dev_flags);
6981 spin_lock_init(&conn->lock);
6982 mutex_init(&conn->chan_lock);
6984 INIT_LIST_HEAD(&conn->chan_l);
6985 INIT_LIST_HEAD(&conn->users);
6987 if (hcon->type == LE_LINK)
6988 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
6990 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6992 skb_queue_head_init(&conn->pending_rx);
6993 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6995 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7000 static bool is_valid_psm(u16 psm, u8 dst_type) {
7004 if (bdaddr_type_is_le(dst_type))
7005 return (psm <= 0x00ff);
7007 /* PSM must be odd and lsb of upper byte must be 0 */
7008 return ((psm & 0x0101) == 0x0001);
7011 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7012 bdaddr_t *dst, u8 dst_type)
7014 struct l2cap_conn *conn;
7015 struct hci_conn *hcon;
7016 struct hci_dev *hdev;
7020 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7021 dst_type, __le16_to_cpu(psm));
7023 hdev = hci_get_route(dst, &chan->src);
7025 return -EHOSTUNREACH;
7029 l2cap_chan_lock(chan);
7031 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7032 chan->chan_type != L2CAP_CHAN_RAW) {
7037 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7042 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7047 switch (chan->mode) {
7048 case L2CAP_MODE_BASIC:
7050 case L2CAP_MODE_LE_FLOWCTL:
7051 l2cap_le_flowctl_init(chan);
7053 case L2CAP_MODE_ERTM:
7054 case L2CAP_MODE_STREAMING:
7063 switch (chan->state) {
7067 /* Already connecting */
7072 /* Already connected */
7086 /* Set destination address and psm */
7087 bacpy(&chan->dst, dst);
7088 chan->dst_type = dst_type;
7093 auth_type = l2cap_get_auth_type(chan);
7095 if (bdaddr_type_is_le(dst_type))
7096 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
7097 chan->sec_level, auth_type);
7099 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
7100 chan->sec_level, auth_type);
7103 err = PTR_ERR(hcon);
7107 conn = l2cap_conn_add(hcon);
7109 hci_conn_drop(hcon);
7114 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7115 hci_conn_drop(hcon);
7120 /* Update source addr of the socket */
7121 bacpy(&chan->src, &hcon->src);
7122 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7124 l2cap_chan_unlock(chan);
7125 l2cap_chan_add(conn, chan);
7126 l2cap_chan_lock(chan);
7128 /* l2cap_chan_add takes its own ref so we can drop this one */
7129 hci_conn_drop(hcon);
7131 l2cap_state_change(chan, BT_CONNECT);
7132 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7134 /* Release chan->sport so that it can be reused by other
7135 * sockets (as it's only used for listening sockets).
7137 write_lock(&chan_list_lock);
7139 write_unlock(&chan_list_lock);
7141 if (hcon->state == BT_CONNECTED) {
7142 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7143 __clear_chan_timer(chan);
7144 if (l2cap_chan_check_security(chan))
7145 l2cap_state_change(chan, BT_CONNECTED);
7147 l2cap_do_start(chan);
7153 l2cap_chan_unlock(chan);
7154 hci_dev_unlock(hdev);
7159 /* ---- L2CAP interface with lower layer (HCI) ---- */
7161 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7163 int exact = 0, lm1 = 0, lm2 = 0;
7164 struct l2cap_chan *c;
7166 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7168 /* Find listening sockets and check their link_mode */
7169 read_lock(&chan_list_lock);
7170 list_for_each_entry(c, &chan_list, global_l) {
7171 if (c->state != BT_LISTEN)
7174 if (!bacmp(&c->src, &hdev->bdaddr)) {
7175 lm1 |= HCI_LM_ACCEPT;
7176 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7177 lm1 |= HCI_LM_MASTER;
7179 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7180 lm2 |= HCI_LM_ACCEPT;
7181 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7182 lm2 |= HCI_LM_MASTER;
7185 read_unlock(&chan_list_lock);
7187 return exact ? lm1 : lm2;
7190 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7192 struct l2cap_conn *conn;
7194 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7197 conn = l2cap_conn_add(hcon);
7199 l2cap_conn_ready(conn);
7201 l2cap_conn_del(hcon, bt_to_errno(status));
7205 int l2cap_disconn_ind(struct hci_conn *hcon)
7207 struct l2cap_conn *conn = hcon->l2cap_data;
7209 BT_DBG("hcon %p", hcon);
7212 return HCI_ERROR_REMOTE_USER_TERM;
7213 return conn->disc_reason;
7216 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7218 BT_DBG("hcon %p reason %d", hcon, reason);
7220 bt_6lowpan_del_conn(hcon->l2cap_data);
7222 l2cap_conn_del(hcon, bt_to_errno(reason));
7225 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7227 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7230 if (encrypt == 0x00) {
7231 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7232 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7233 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7234 chan->sec_level == BT_SECURITY_FIPS)
7235 l2cap_chan_close(chan, ECONNREFUSED);
7237 if (chan->sec_level == BT_SECURITY_MEDIUM)
7238 __clear_chan_timer(chan);
7242 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7244 struct l2cap_conn *conn = hcon->l2cap_data;
7245 struct l2cap_chan *chan;
7250 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7252 if (hcon->type == LE_LINK) {
7253 if (!status && encrypt)
7254 smp_distribute_keys(conn, 0);
7255 cancel_delayed_work(&conn->security_timer);
7258 mutex_lock(&conn->chan_lock);
7260 list_for_each_entry(chan, &conn->chan_l, list) {
7261 l2cap_chan_lock(chan);
7263 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7264 state_to_string(chan->state));
7266 if (chan->scid == L2CAP_CID_A2MP) {
7267 l2cap_chan_unlock(chan);
7271 if (chan->scid == L2CAP_CID_ATT) {
7272 if (!status && encrypt) {
7273 chan->sec_level = hcon->sec_level;
7274 l2cap_chan_ready(chan);
7277 l2cap_chan_unlock(chan);
7281 if (!__l2cap_no_conn_pending(chan)) {
7282 l2cap_chan_unlock(chan);
7286 if (!status && (chan->state == BT_CONNECTED ||
7287 chan->state == BT_CONFIG)) {
7288 chan->ops->resume(chan);
7289 l2cap_check_encryption(chan, encrypt);
7290 l2cap_chan_unlock(chan);
7294 if (chan->state == BT_CONNECT) {
7296 l2cap_start_connection(chan);
7298 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7299 } else if (chan->state == BT_CONNECT2) {
7300 struct l2cap_conn_rsp rsp;
7304 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7305 res = L2CAP_CR_PEND;
7306 stat = L2CAP_CS_AUTHOR_PEND;
7307 chan->ops->defer(chan);
7309 l2cap_state_change(chan, BT_CONFIG);
7310 res = L2CAP_CR_SUCCESS;
7311 stat = L2CAP_CS_NO_INFO;
7314 l2cap_state_change(chan, BT_DISCONN);
7315 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7316 res = L2CAP_CR_SEC_BLOCK;
7317 stat = L2CAP_CS_NO_INFO;
7320 rsp.scid = cpu_to_le16(chan->dcid);
7321 rsp.dcid = cpu_to_le16(chan->scid);
7322 rsp.result = cpu_to_le16(res);
7323 rsp.status = cpu_to_le16(stat);
7324 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7327 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7328 res == L2CAP_CR_SUCCESS) {
7330 set_bit(CONF_REQ_SENT, &chan->conf_state);
7331 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7333 l2cap_build_conf_req(chan, buf),
7335 chan->num_conf_req++;
7339 l2cap_chan_unlock(chan);
7342 mutex_unlock(&conn->chan_lock);
7347 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7349 struct l2cap_conn *conn = hcon->l2cap_data;
7350 struct l2cap_hdr *hdr;
7353 /* For AMP controller do not create l2cap conn */
7354 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7358 conn = l2cap_conn_add(hcon);
7363 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7367 case ACL_START_NO_FLUSH:
7370 BT_ERR("Unexpected start frame (len %d)", skb->len);
7371 kfree_skb(conn->rx_skb);
7372 conn->rx_skb = NULL;
7374 l2cap_conn_unreliable(conn, ECOMM);
7377 /* Start fragment always begin with Basic L2CAP header */
7378 if (skb->len < L2CAP_HDR_SIZE) {
7379 BT_ERR("Frame is too short (len %d)", skb->len);
7380 l2cap_conn_unreliable(conn, ECOMM);
7384 hdr = (struct l2cap_hdr *) skb->data;
7385 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7387 if (len == skb->len) {
7388 /* Complete frame received */
7389 l2cap_recv_frame(conn, skb);
7393 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7395 if (skb->len > len) {
7396 BT_ERR("Frame is too long (len %d, expected len %d)",
7398 l2cap_conn_unreliable(conn, ECOMM);
7402 /* Allocate skb for the complete frame (with header) */
7403 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7407 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7409 conn->rx_len = len - skb->len;
7413 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7415 if (!conn->rx_len) {
7416 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7417 l2cap_conn_unreliable(conn, ECOMM);
7421 if (skb->len > conn->rx_len) {
7422 BT_ERR("Fragment is too long (len %d, expected %d)",
7423 skb->len, conn->rx_len);
7424 kfree_skb(conn->rx_skb);
7425 conn->rx_skb = NULL;
7427 l2cap_conn_unreliable(conn, ECOMM);
7431 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7433 conn->rx_len -= skb->len;
7435 if (!conn->rx_len) {
7436 /* Complete frame received. l2cap_recv_frame
7437 * takes ownership of the skb so set the global
7438 * rx_skb pointer to NULL first.
7440 struct sk_buff *rx_skb = conn->rx_skb;
7441 conn->rx_skb = NULL;
7442 l2cap_recv_frame(conn, rx_skb);
7452 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7454 struct l2cap_chan *c;
7456 read_lock(&chan_list_lock);
7458 list_for_each_entry(c, &chan_list, global_l) {
7459 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7461 c->state, __le16_to_cpu(c->psm),
7462 c->scid, c->dcid, c->imtu, c->omtu,
7463 c->sec_level, c->mode);
7466 read_unlock(&chan_list_lock);
7471 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7473 return single_open(file, l2cap_debugfs_show, inode->i_private);
7476 static const struct file_operations l2cap_debugfs_fops = {
7477 .open = l2cap_debugfs_open,
7479 .llseek = seq_lseek,
7480 .release = single_release,
7483 static struct dentry *l2cap_debugfs;
7485 int __init l2cap_init(void)
7489 err = l2cap_init_sockets();
7493 if (IS_ERR_OR_NULL(bt_debugfs))
7496 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7497 NULL, &l2cap_debugfs_fops);
7499 debugfs_create_u16("l2cap_le_max_credits", 0466, bt_debugfs,
7501 debugfs_create_u16("l2cap_le_default_mps", 0466, bt_debugfs,
7509 void l2cap_exit(void)
7511 bt_6lowpan_cleanup();
7512 debugfs_remove(l2cap_debugfs);
7513 l2cap_cleanup_sockets();
7516 module_param(disable_ertm, bool, 0644);
7517 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");