2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
46 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
47 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
49 static LIST_HEAD(chan_list);
50 static DEFINE_RWLOCK(chan_list_lock);
52 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
53 u8 code, u8 ident, u16 dlen, void *data);
54 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
57 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
60 struct sk_buff_head *skbs, u8 event);
62 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
64 if (hcon->type == LE_LINK) {
65 if (type == ADDR_LE_DEV_PUBLIC)
66 return BDADDR_LE_PUBLIC;
68 return BDADDR_LE_RANDOM;
74 /* ---- L2CAP channels ---- */
76 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
81 list_for_each_entry(c, &conn->chan_l, list) {
88 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
93 list_for_each_entry(c, &conn->chan_l, list) {
100 /* Find channel with given SCID.
101 * Returns locked channel. */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
105 struct l2cap_chan *c;
107 mutex_lock(&conn->chan_lock);
108 c = __l2cap_get_chan_by_scid(conn, cid);
111 mutex_unlock(&conn->chan_lock);
116 /* Find channel with given DCID.
117 * Returns locked channel.
119 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
122 struct l2cap_chan *c;
124 mutex_lock(&conn->chan_lock);
125 c = __l2cap_get_chan_by_dcid(conn, cid);
128 mutex_unlock(&conn->chan_lock);
133 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
136 struct l2cap_chan *c;
138 list_for_each_entry(c, &conn->chan_l, list) {
139 if (c->ident == ident)
145 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
148 struct l2cap_chan *c;
150 mutex_lock(&conn->chan_lock);
151 c = __l2cap_get_chan_by_ident(conn, ident);
154 mutex_unlock(&conn->chan_lock);
159 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
161 struct l2cap_chan *c;
163 list_for_each_entry(c, &chan_list, global_l) {
164 if (c->sport == psm && !bacmp(&c->src, src))
170 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
174 write_lock(&chan_list_lock);
176 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
189 for (p = 0x1001; p < 0x1100; p += 2)
190 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
191 chan->psm = cpu_to_le16(p);
192 chan->sport = cpu_to_le16(p);
199 write_unlock(&chan_list_lock);
203 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
205 write_lock(&chan_list_lock);
209 write_unlock(&chan_list_lock);
214 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
216 u16 cid = L2CAP_CID_DYN_START;
218 for (; cid < L2CAP_CID_DYN_END; cid++) {
219 if (!__l2cap_get_chan_by_scid(conn, cid))
226 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
228 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
229 state_to_string(state));
232 chan->ops->state_change(chan, state, 0);
235 static void l2cap_state_change(struct l2cap_chan *chan, int state)
237 struct sock *sk = chan->sk;
240 __l2cap_state_change(chan, state);
244 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
247 struct sock *sk = chan->sk;
251 chan->ops->state_change(chan, chan->state, err);
255 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
257 struct sock *sk = chan->sk;
260 chan->ops->state_change(chan, chan->state, err);
264 static void __set_retrans_timer(struct l2cap_chan *chan)
266 if (!delayed_work_pending(&chan->monitor_timer) &&
267 chan->retrans_timeout) {
268 l2cap_set_timer(chan, &chan->retrans_timer,
269 msecs_to_jiffies(chan->retrans_timeout));
273 static void __set_monitor_timer(struct l2cap_chan *chan)
275 __clear_retrans_timer(chan);
276 if (chan->monitor_timeout) {
277 l2cap_set_timer(chan, &chan->monitor_timer,
278 msecs_to_jiffies(chan->monitor_timeout));
282 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
287 skb_queue_walk(head, skb) {
288 if (bt_cb(skb)->control.txseq == seq)
295 /* ---- L2CAP sequence number lists ---- */
297 /* For ERTM, ordered lists of sequence numbers must be tracked for
298 * SREJ requests that are received and for frames that are to be
299 * retransmitted. These seq_list functions implement a singly-linked
300 * list in an array, where membership in the list can also be checked
301 * in constant time. Items can also be added to the tail of the list
302 * and removed from the head in constant time, without further memory
306 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
308 size_t alloc_size, i;
310 /* Allocated size is a power of 2 to map sequence numbers
311 * (which may be up to 14 bits) in to a smaller array that is
312 * sized for the negotiated ERTM transmit windows.
314 alloc_size = roundup_pow_of_two(size);
316 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
320 seq_list->mask = alloc_size - 1;
321 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
322 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
323 for (i = 0; i < alloc_size; i++)
324 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
329 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
331 kfree(seq_list->list);
334 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
337 /* Constant-time check for list membership */
338 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
341 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
343 u16 mask = seq_list->mask;
345 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
346 /* In case someone tries to pop the head of an empty list */
347 return L2CAP_SEQ_LIST_CLEAR;
348 } else if (seq_list->head == seq) {
349 /* Head can be removed in constant time */
350 seq_list->head = seq_list->list[seq & mask];
351 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
353 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
354 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
355 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
358 /* Walk the list to find the sequence number */
359 u16 prev = seq_list->head;
360 while (seq_list->list[prev & mask] != seq) {
361 prev = seq_list->list[prev & mask];
362 if (prev == L2CAP_SEQ_LIST_TAIL)
363 return L2CAP_SEQ_LIST_CLEAR;
366 /* Unlink the number from the list and clear it */
367 seq_list->list[prev & mask] = seq_list->list[seq & mask];
368 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
369 if (seq_list->tail == seq)
370 seq_list->tail = prev;
375 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
377 /* Remove the head in constant time */
378 return l2cap_seq_list_remove(seq_list, seq_list->head);
381 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
385 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
388 for (i = 0; i <= seq_list->mask; i++)
389 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
391 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
392 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
395 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
397 u16 mask = seq_list->mask;
399 /* All appends happen in constant time */
401 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
404 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
405 seq_list->head = seq;
407 seq_list->list[seq_list->tail & mask] = seq;
409 seq_list->tail = seq;
410 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
413 static void l2cap_chan_timeout(struct work_struct *work)
415 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
417 struct l2cap_conn *conn = chan->conn;
420 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
422 mutex_lock(&conn->chan_lock);
423 l2cap_chan_lock(chan);
425 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
426 reason = ECONNREFUSED;
427 else if (chan->state == BT_CONNECT &&
428 chan->sec_level != BT_SECURITY_SDP)
429 reason = ECONNREFUSED;
433 l2cap_chan_close(chan, reason);
435 l2cap_chan_unlock(chan);
437 chan->ops->close(chan);
438 mutex_unlock(&conn->chan_lock);
440 l2cap_chan_put(chan);
443 struct l2cap_chan *l2cap_chan_create(void)
445 struct l2cap_chan *chan;
447 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
451 mutex_init(&chan->lock);
453 write_lock(&chan_list_lock);
454 list_add(&chan->global_l, &chan_list);
455 write_unlock(&chan_list_lock);
457 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
459 chan->state = BT_OPEN;
461 kref_init(&chan->kref);
463 /* This flag is cleared in l2cap_chan_ready() */
464 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
466 BT_DBG("chan %p", chan);
471 static void l2cap_chan_destroy(struct kref *kref)
473 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
475 BT_DBG("chan %p", chan);
477 write_lock(&chan_list_lock);
478 list_del(&chan->global_l);
479 write_unlock(&chan_list_lock);
484 void l2cap_chan_hold(struct l2cap_chan *c)
486 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
491 void l2cap_chan_put(struct l2cap_chan *c)
493 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
495 kref_put(&c->kref, l2cap_chan_destroy);
498 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
500 chan->fcs = L2CAP_FCS_CRC16;
501 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
502 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
503 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
504 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
505 chan->sec_level = BT_SECURITY_LOW;
507 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
510 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
512 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
513 __le16_to_cpu(chan->psm), chan->dcid);
515 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
519 switch (chan->chan_type) {
520 case L2CAP_CHAN_CONN_ORIENTED:
521 if (conn->hcon->type == LE_LINK) {
523 chan->omtu = L2CAP_DEFAULT_MTU;
524 if (chan->dcid == L2CAP_CID_ATT)
525 chan->scid = L2CAP_CID_ATT;
527 chan->scid = l2cap_alloc_cid(conn);
529 /* Alloc CID for connection-oriented socket */
530 chan->scid = l2cap_alloc_cid(conn);
531 chan->omtu = L2CAP_DEFAULT_MTU;
535 case L2CAP_CHAN_CONN_LESS:
536 /* Connectionless socket */
537 chan->scid = L2CAP_CID_CONN_LESS;
538 chan->dcid = L2CAP_CID_CONN_LESS;
539 chan->omtu = L2CAP_DEFAULT_MTU;
542 case L2CAP_CHAN_CONN_FIX_A2MP:
543 chan->scid = L2CAP_CID_A2MP;
544 chan->dcid = L2CAP_CID_A2MP;
545 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
546 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
550 /* Raw socket can send/recv signalling messages only */
551 chan->scid = L2CAP_CID_SIGNALING;
552 chan->dcid = L2CAP_CID_SIGNALING;
553 chan->omtu = L2CAP_DEFAULT_MTU;
556 chan->local_id = L2CAP_BESTEFFORT_ID;
557 chan->local_stype = L2CAP_SERV_BESTEFFORT;
558 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
559 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
560 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
561 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
563 l2cap_chan_hold(chan);
565 hci_conn_hold(conn->hcon);
567 list_add(&chan->list, &conn->chan_l);
570 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
572 mutex_lock(&conn->chan_lock);
573 __l2cap_chan_add(conn, chan);
574 mutex_unlock(&conn->chan_lock);
577 void l2cap_chan_del(struct l2cap_chan *chan, int err)
579 struct l2cap_conn *conn = chan->conn;
581 __clear_chan_timer(chan);
583 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
586 struct amp_mgr *mgr = conn->hcon->amp_mgr;
587 /* Delete from channel list */
588 list_del(&chan->list);
590 l2cap_chan_put(chan);
594 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
595 hci_conn_drop(conn->hcon);
597 if (mgr && mgr->bredr_chan == chan)
598 mgr->bredr_chan = NULL;
601 if (chan->hs_hchan) {
602 struct hci_chan *hs_hchan = chan->hs_hchan;
604 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
605 amp_disconnect_logical_link(hs_hchan);
608 chan->ops->teardown(chan, err);
610 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
614 case L2CAP_MODE_BASIC:
617 case L2CAP_MODE_ERTM:
618 __clear_retrans_timer(chan);
619 __clear_monitor_timer(chan);
620 __clear_ack_timer(chan);
622 skb_queue_purge(&chan->srej_q);
624 l2cap_seq_list_free(&chan->srej_list);
625 l2cap_seq_list_free(&chan->retrans_list);
629 case L2CAP_MODE_STREAMING:
630 skb_queue_purge(&chan->tx_q);
637 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
639 struct l2cap_conn *conn = chan->conn;
641 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
643 switch (chan->state) {
645 chan->ops->teardown(chan, 0);
650 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
651 conn->hcon->type == ACL_LINK) {
652 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
653 l2cap_send_disconn_req(chan, reason);
655 l2cap_chan_del(chan, reason);
659 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
660 conn->hcon->type == ACL_LINK) {
661 struct l2cap_conn_rsp rsp;
664 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
665 result = L2CAP_CR_SEC_BLOCK;
667 result = L2CAP_CR_BAD_PSM;
669 l2cap_state_change(chan, BT_DISCONN);
671 rsp.scid = cpu_to_le16(chan->dcid);
672 rsp.dcid = cpu_to_le16(chan->scid);
673 rsp.result = cpu_to_le16(result);
674 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
675 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
679 l2cap_chan_del(chan, reason);
684 l2cap_chan_del(chan, reason);
688 chan->ops->teardown(chan, 0);
693 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
695 switch (chan->chan_type) {
697 switch (chan->sec_level) {
698 case BT_SECURITY_HIGH:
699 return HCI_AT_DEDICATED_BONDING_MITM;
700 case BT_SECURITY_MEDIUM:
701 return HCI_AT_DEDICATED_BONDING;
703 return HCI_AT_NO_BONDING;
706 case L2CAP_CHAN_CONN_LESS:
707 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
708 if (chan->sec_level == BT_SECURITY_LOW)
709 chan->sec_level = BT_SECURITY_SDP;
711 if (chan->sec_level == BT_SECURITY_HIGH)
712 return HCI_AT_NO_BONDING_MITM;
714 return HCI_AT_NO_BONDING;
716 case L2CAP_CHAN_CONN_ORIENTED:
717 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
718 if (chan->sec_level == BT_SECURITY_LOW)
719 chan->sec_level = BT_SECURITY_SDP;
721 if (chan->sec_level == BT_SECURITY_HIGH)
722 return HCI_AT_NO_BONDING_MITM;
724 return HCI_AT_NO_BONDING;
728 switch (chan->sec_level) {
729 case BT_SECURITY_HIGH:
730 return HCI_AT_GENERAL_BONDING_MITM;
731 case BT_SECURITY_MEDIUM:
732 return HCI_AT_GENERAL_BONDING;
734 return HCI_AT_NO_BONDING;
740 /* Service level security */
741 int l2cap_chan_check_security(struct l2cap_chan *chan)
743 struct l2cap_conn *conn = chan->conn;
746 auth_type = l2cap_get_auth_type(chan);
748 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
751 static u8 l2cap_get_ident(struct l2cap_conn *conn)
755 /* Get next available identificator.
756 * 1 - 128 are used by kernel.
757 * 129 - 199 are reserved.
758 * 200 - 254 are used by utilities like l2ping, etc.
761 spin_lock(&conn->lock);
763 if (++conn->tx_ident > 128)
768 spin_unlock(&conn->lock);
773 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
776 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
779 BT_DBG("code 0x%2.2x", code);
784 if (lmp_no_flush_capable(conn->hcon->hdev))
785 flags = ACL_START_NO_FLUSH;
789 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
790 skb->priority = HCI_PRIO_MAX;
792 hci_send_acl(conn->hchan, skb, flags);
795 static bool __chan_is_moving(struct l2cap_chan *chan)
797 return chan->move_state != L2CAP_MOVE_STABLE &&
798 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
801 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
803 struct hci_conn *hcon = chan->conn->hcon;
806 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
809 if (chan->hs_hcon && !__chan_is_moving(chan)) {
811 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
818 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
819 lmp_no_flush_capable(hcon->hdev))
820 flags = ACL_START_NO_FLUSH;
824 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
825 hci_send_acl(chan->conn->hchan, skb, flags);
828 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
830 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
831 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
833 if (enh & L2CAP_CTRL_FRAME_TYPE) {
836 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
837 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
844 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
845 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
852 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
854 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
855 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
857 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
860 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
861 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
868 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
869 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
876 static inline void __unpack_control(struct l2cap_chan *chan,
879 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
880 __unpack_extended_control(get_unaligned_le32(skb->data),
881 &bt_cb(skb)->control);
882 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
884 __unpack_enhanced_control(get_unaligned_le16(skb->data),
885 &bt_cb(skb)->control);
886 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
890 static u32 __pack_extended_control(struct l2cap_ctrl *control)
894 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
895 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
897 if (control->sframe) {
898 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
899 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
900 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
902 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
903 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
909 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
913 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
914 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
916 if (control->sframe) {
917 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
918 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
919 packed |= L2CAP_CTRL_FRAME_TYPE;
921 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
922 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
928 static inline void __pack_control(struct l2cap_chan *chan,
929 struct l2cap_ctrl *control,
932 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
933 put_unaligned_le32(__pack_extended_control(control),
934 skb->data + L2CAP_HDR_SIZE);
936 put_unaligned_le16(__pack_enhanced_control(control),
937 skb->data + L2CAP_HDR_SIZE);
941 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
943 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
944 return L2CAP_EXT_HDR_SIZE;
946 return L2CAP_ENH_HDR_SIZE;
949 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
953 struct l2cap_hdr *lh;
954 int hlen = __ertm_hdr_size(chan);
956 if (chan->fcs == L2CAP_FCS_CRC16)
957 hlen += L2CAP_FCS_SIZE;
959 skb = bt_skb_alloc(hlen, GFP_KERNEL);
962 return ERR_PTR(-ENOMEM);
964 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
965 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
966 lh->cid = cpu_to_le16(chan->dcid);
968 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
969 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
971 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
973 if (chan->fcs == L2CAP_FCS_CRC16) {
974 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
975 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
978 skb->priority = HCI_PRIO_MAX;
982 static void l2cap_send_sframe(struct l2cap_chan *chan,
983 struct l2cap_ctrl *control)
988 BT_DBG("chan %p, control %p", chan, control);
990 if (!control->sframe)
993 if (__chan_is_moving(chan))
996 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1000 if (control->super == L2CAP_SUPER_RR)
1001 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1002 else if (control->super == L2CAP_SUPER_RNR)
1003 set_bit(CONN_RNR_SENT, &chan->conn_state);
1005 if (control->super != L2CAP_SUPER_SREJ) {
1006 chan->last_acked_seq = control->reqseq;
1007 __clear_ack_timer(chan);
1010 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1011 control->final, control->poll, control->super);
1013 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1014 control_field = __pack_extended_control(control);
1016 control_field = __pack_enhanced_control(control);
1018 skb = l2cap_create_sframe_pdu(chan, control_field);
1020 l2cap_do_send(chan, skb);
1023 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1025 struct l2cap_ctrl control;
1027 BT_DBG("chan %p, poll %d", chan, poll);
1029 memset(&control, 0, sizeof(control));
1031 control.poll = poll;
1033 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1034 control.super = L2CAP_SUPER_RNR;
1036 control.super = L2CAP_SUPER_RR;
1038 control.reqseq = chan->buffer_seq;
1039 l2cap_send_sframe(chan, &control);
1042 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1044 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1047 static bool __amp_capable(struct l2cap_chan *chan)
1049 struct l2cap_conn *conn = chan->conn;
1050 struct hci_dev *hdev;
1051 bool amp_available = false;
1053 if (!conn->hs_enabled)
1056 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1059 read_lock(&hci_dev_list_lock);
1060 list_for_each_entry(hdev, &hci_dev_list, list) {
1061 if (hdev->amp_type != AMP_TYPE_BREDR &&
1062 test_bit(HCI_UP, &hdev->flags)) {
1063 amp_available = true;
1067 read_unlock(&hci_dev_list_lock);
1069 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1070 return amp_available;
1075 static bool l2cap_check_efs(struct l2cap_chan *chan)
1077 /* Check EFS parameters */
1081 void l2cap_send_conn_req(struct l2cap_chan *chan)
1083 struct l2cap_conn *conn = chan->conn;
1084 struct l2cap_conn_req req;
1086 req.scid = cpu_to_le16(chan->scid);
1087 req.psm = chan->psm;
1089 chan->ident = l2cap_get_ident(conn);
1091 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1093 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1096 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1098 struct l2cap_create_chan_req req;
1099 req.scid = cpu_to_le16(chan->scid);
1100 req.psm = chan->psm;
1101 req.amp_id = amp_id;
1103 chan->ident = l2cap_get_ident(chan->conn);
1105 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1109 static void l2cap_move_setup(struct l2cap_chan *chan)
1111 struct sk_buff *skb;
1113 BT_DBG("chan %p", chan);
1115 if (chan->mode != L2CAP_MODE_ERTM)
1118 __clear_retrans_timer(chan);
1119 __clear_monitor_timer(chan);
1120 __clear_ack_timer(chan);
1122 chan->retry_count = 0;
1123 skb_queue_walk(&chan->tx_q, skb) {
1124 if (bt_cb(skb)->control.retries)
1125 bt_cb(skb)->control.retries = 1;
1130 chan->expected_tx_seq = chan->buffer_seq;
1132 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1133 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1134 l2cap_seq_list_clear(&chan->retrans_list);
1135 l2cap_seq_list_clear(&chan->srej_list);
1136 skb_queue_purge(&chan->srej_q);
1138 chan->tx_state = L2CAP_TX_STATE_XMIT;
1139 chan->rx_state = L2CAP_RX_STATE_MOVE;
1141 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1144 static void l2cap_move_done(struct l2cap_chan *chan)
1146 u8 move_role = chan->move_role;
1147 BT_DBG("chan %p", chan);
1149 chan->move_state = L2CAP_MOVE_STABLE;
1150 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1152 if (chan->mode != L2CAP_MODE_ERTM)
1155 switch (move_role) {
1156 case L2CAP_MOVE_ROLE_INITIATOR:
1157 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1158 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1160 case L2CAP_MOVE_ROLE_RESPONDER:
1161 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1166 static void l2cap_chan_ready(struct l2cap_chan *chan)
1168 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1169 chan->conf_state = 0;
1170 __clear_chan_timer(chan);
1172 chan->state = BT_CONNECTED;
1174 chan->ops->ready(chan);
1177 static void l2cap_start_connection(struct l2cap_chan *chan)
1179 if (__amp_capable(chan)) {
1180 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1181 a2mp_discover_amp(chan);
1183 l2cap_send_conn_req(chan);
1187 static void l2cap_do_start(struct l2cap_chan *chan)
1189 struct l2cap_conn *conn = chan->conn;
1191 if (conn->hcon->type == LE_LINK) {
1192 l2cap_chan_ready(chan);
1196 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1197 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1200 if (l2cap_chan_check_security(chan) &&
1201 __l2cap_no_conn_pending(chan)) {
1202 l2cap_start_connection(chan);
1205 struct l2cap_info_req req;
1206 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1208 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1209 conn->info_ident = l2cap_get_ident(conn);
1211 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1213 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1218 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1220 u32 local_feat_mask = l2cap_feat_mask;
1222 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1225 case L2CAP_MODE_ERTM:
1226 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1227 case L2CAP_MODE_STREAMING:
1228 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1234 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1236 struct l2cap_conn *conn = chan->conn;
1237 struct l2cap_disconn_req req;
1242 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1243 __clear_retrans_timer(chan);
1244 __clear_monitor_timer(chan);
1245 __clear_ack_timer(chan);
1248 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1249 l2cap_state_change(chan, BT_DISCONN);
1253 req.dcid = cpu_to_le16(chan->dcid);
1254 req.scid = cpu_to_le16(chan->scid);
1255 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1258 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1261 /* ---- L2CAP connections ---- */
1262 static void l2cap_conn_start(struct l2cap_conn *conn)
1264 struct l2cap_chan *chan, *tmp;
1266 BT_DBG("conn %p", conn);
1268 mutex_lock(&conn->chan_lock);
1270 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1271 l2cap_chan_lock(chan);
1273 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1274 l2cap_chan_unlock(chan);
1278 if (chan->state == BT_CONNECT) {
1279 if (!l2cap_chan_check_security(chan) ||
1280 !__l2cap_no_conn_pending(chan)) {
1281 l2cap_chan_unlock(chan);
1285 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1286 && test_bit(CONF_STATE2_DEVICE,
1287 &chan->conf_state)) {
1288 l2cap_chan_close(chan, ECONNRESET);
1289 l2cap_chan_unlock(chan);
1293 l2cap_start_connection(chan);
1295 } else if (chan->state == BT_CONNECT2) {
1296 struct l2cap_conn_rsp rsp;
1298 rsp.scid = cpu_to_le16(chan->dcid);
1299 rsp.dcid = cpu_to_le16(chan->scid);
1301 if (l2cap_chan_check_security(chan)) {
1302 struct sock *sk = chan->sk;
1305 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1306 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1307 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1308 chan->ops->defer(chan);
1311 __l2cap_state_change(chan, BT_CONFIG);
1312 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1313 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1317 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1318 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1321 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1324 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1325 rsp.result != L2CAP_CR_SUCCESS) {
1326 l2cap_chan_unlock(chan);
1330 set_bit(CONF_REQ_SENT, &chan->conf_state);
1331 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1332 l2cap_build_conf_req(chan, buf), buf);
1333 chan->num_conf_req++;
1336 l2cap_chan_unlock(chan);
1339 mutex_unlock(&conn->chan_lock);
1342 /* Find socket with cid and source/destination bdaddr.
1343 * Returns closest match, locked.
1345 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1349 struct l2cap_chan *c, *c1 = NULL;
1351 read_lock(&chan_list_lock);
1353 list_for_each_entry(c, &chan_list, global_l) {
1354 if (state && c->state != state)
1357 if (c->scid == cid) {
1358 int src_match, dst_match;
1359 int src_any, dst_any;
1362 src_match = !bacmp(&c->src, src);
1363 dst_match = !bacmp(&c->dst, dst);
1364 if (src_match && dst_match) {
1365 read_unlock(&chan_list_lock);
1370 src_any = !bacmp(&c->src, BDADDR_ANY);
1371 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1372 if ((src_match && dst_any) || (src_any && dst_match) ||
1373 (src_any && dst_any))
1378 read_unlock(&chan_list_lock);
1383 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1385 struct sock *parent;
1386 struct l2cap_chan *chan, *pchan;
1390 /* Check if we have socket listening on cid */
1391 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1392 &conn->hcon->src, &conn->hcon->dst);
1396 /* Client ATT sockets should override the server one */
1397 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1404 chan = pchan->ops->new_connection(pchan);
1408 chan->dcid = L2CAP_CID_ATT;
1410 bacpy(&chan->src, &conn->hcon->src);
1411 bacpy(&chan->dst, &conn->hcon->dst);
1412 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
1413 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
1415 __l2cap_chan_add(conn, chan);
1418 release_sock(parent);
1421 static void l2cap_conn_ready(struct l2cap_conn *conn)
1423 struct l2cap_chan *chan;
1424 struct hci_conn *hcon = conn->hcon;
1426 BT_DBG("conn %p", conn);
1428 /* For outgoing pairing which doesn't necessarily have an
1429 * associated socket (e.g. mgmt_pair_device).
1431 if (hcon->out && hcon->type == LE_LINK)
1432 smp_conn_security(hcon, hcon->pending_sec_level);
1434 mutex_lock(&conn->chan_lock);
1436 if (hcon->type == LE_LINK)
1437 l2cap_le_conn_ready(conn);
1439 list_for_each_entry(chan, &conn->chan_l, list) {
1441 l2cap_chan_lock(chan);
1443 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1444 l2cap_chan_unlock(chan);
1448 if (hcon->type == LE_LINK) {
1449 if (smp_conn_security(hcon, chan->sec_level))
1450 l2cap_chan_ready(chan);
1452 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1453 l2cap_chan_ready(chan);
1455 } else if (chan->state == BT_CONNECT) {
1456 l2cap_do_start(chan);
1459 l2cap_chan_unlock(chan);
1462 mutex_unlock(&conn->chan_lock);
1465 /* Notify sockets that we cannot guaranty reliability anymore */
1466 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1468 struct l2cap_chan *chan;
1470 BT_DBG("conn %p", conn);
1472 mutex_lock(&conn->chan_lock);
1474 list_for_each_entry(chan, &conn->chan_l, list) {
1475 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1476 l2cap_chan_set_err(chan, err);
1479 mutex_unlock(&conn->chan_lock);
1482 static void l2cap_info_timeout(struct work_struct *work)
1484 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1487 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1488 conn->info_ident = 0;
1490 l2cap_conn_start(conn);
1495 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1496 * callback is called during registration. The ->remove callback is called
1497 * during unregistration.
1498 * An l2cap_user object can either be explicitly unregistered or when the
1499 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1500 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1501 * External modules must own a reference to the l2cap_conn object if they intend
1502 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1503 * any time if they don't.
1506 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1508 struct hci_dev *hdev = conn->hcon->hdev;
1511 /* We need to check whether l2cap_conn is registered. If it is not, we
1512 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1513 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1514 * relies on the parent hci_conn object to be locked. This itself relies
1515 * on the hci_dev object to be locked. So we must lock the hci device
1520 if (user->list.next || user->list.prev) {
1525 /* conn->hchan is NULL after l2cap_conn_del() was called */
1531 ret = user->probe(conn, user);
1535 list_add(&user->list, &conn->users);
1539 hci_dev_unlock(hdev);
1542 EXPORT_SYMBOL(l2cap_register_user);
1544 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1546 struct hci_dev *hdev = conn->hcon->hdev;
1550 if (!user->list.next || !user->list.prev)
1553 list_del(&user->list);
1554 user->list.next = NULL;
1555 user->list.prev = NULL;
1556 user->remove(conn, user);
1559 hci_dev_unlock(hdev);
1561 EXPORT_SYMBOL(l2cap_unregister_user);
1563 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1565 struct l2cap_user *user;
1567 while (!list_empty(&conn->users)) {
1568 user = list_first_entry(&conn->users, struct l2cap_user, list);
1569 list_del(&user->list);
1570 user->list.next = NULL;
1571 user->list.prev = NULL;
1572 user->remove(conn, user);
1576 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1578 struct l2cap_conn *conn = hcon->l2cap_data;
1579 struct l2cap_chan *chan, *l;
1584 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1586 kfree_skb(conn->rx_skb);
1588 l2cap_unregister_all_users(conn);
1590 mutex_lock(&conn->chan_lock);
1593 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1594 l2cap_chan_hold(chan);
1595 l2cap_chan_lock(chan);
1597 l2cap_chan_del(chan, err);
1599 l2cap_chan_unlock(chan);
1601 chan->ops->close(chan);
1602 l2cap_chan_put(chan);
1605 mutex_unlock(&conn->chan_lock);
1607 hci_chan_del(conn->hchan);
1609 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1610 cancel_delayed_work_sync(&conn->info_timer);
1612 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1613 cancel_delayed_work_sync(&conn->security_timer);
1614 smp_chan_destroy(conn);
1617 hcon->l2cap_data = NULL;
1619 l2cap_conn_put(conn);
1622 static void security_timeout(struct work_struct *work)
1624 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1625 security_timer.work);
1627 BT_DBG("conn %p", conn);
1629 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1630 smp_chan_destroy(conn);
1631 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1635 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1637 struct l2cap_conn *conn = hcon->l2cap_data;
1638 struct hci_chan *hchan;
1643 hchan = hci_chan_create(hcon);
1647 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1649 hci_chan_del(hchan);
1653 kref_init(&conn->ref);
1654 hcon->l2cap_data = conn;
1656 hci_conn_get(conn->hcon);
1657 conn->hchan = hchan;
1659 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1661 switch (hcon->type) {
1663 if (hcon->hdev->le_mtu) {
1664 conn->mtu = hcon->hdev->le_mtu;
1669 conn->mtu = hcon->hdev->acl_mtu;
1673 conn->feat_mask = 0;
1675 if (hcon->type == ACL_LINK)
1676 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1677 &hcon->hdev->dev_flags);
1679 spin_lock_init(&conn->lock);
1680 mutex_init(&conn->chan_lock);
1682 INIT_LIST_HEAD(&conn->chan_l);
1683 INIT_LIST_HEAD(&conn->users);
1685 if (hcon->type == LE_LINK)
1686 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1688 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1690 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1695 static void l2cap_conn_free(struct kref *ref)
1697 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1699 hci_conn_put(conn->hcon);
1703 void l2cap_conn_get(struct l2cap_conn *conn)
1705 kref_get(&conn->ref);
1707 EXPORT_SYMBOL(l2cap_conn_get);
1709 void l2cap_conn_put(struct l2cap_conn *conn)
1711 kref_put(&conn->ref, l2cap_conn_free);
1713 EXPORT_SYMBOL(l2cap_conn_put);
1715 /* ---- Socket interface ---- */
1717 /* Find socket with psm and source / destination bdaddr.
1718 * Returns closest match.
1720 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1724 struct l2cap_chan *c, *c1 = NULL;
1726 read_lock(&chan_list_lock);
1728 list_for_each_entry(c, &chan_list, global_l) {
1729 if (state && c->state != state)
1732 if (c->psm == psm) {
1733 int src_match, dst_match;
1734 int src_any, dst_any;
1737 src_match = !bacmp(&c->src, src);
1738 dst_match = !bacmp(&c->dst, dst);
1739 if (src_match && dst_match) {
1740 read_unlock(&chan_list_lock);
1745 src_any = !bacmp(&c->src, BDADDR_ANY);
1746 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1747 if ((src_match && dst_any) || (src_any && dst_match) ||
1748 (src_any && dst_any))
1753 read_unlock(&chan_list_lock);
1758 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1759 bdaddr_t *dst, u8 dst_type)
1761 struct l2cap_conn *conn;
1762 struct hci_conn *hcon;
1763 struct hci_dev *hdev;
1767 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
1768 dst_type, __le16_to_cpu(psm));
1770 hdev = hci_get_route(dst, &chan->src);
1772 return -EHOSTUNREACH;
1776 l2cap_chan_lock(chan);
1778 /* PSM must be odd and lsb of upper byte must be 0 */
1779 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1780 chan->chan_type != L2CAP_CHAN_RAW) {
1785 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1790 switch (chan->mode) {
1791 case L2CAP_MODE_BASIC:
1793 case L2CAP_MODE_ERTM:
1794 case L2CAP_MODE_STREAMING:
1803 switch (chan->state) {
1807 /* Already connecting */
1812 /* Already connected */
1826 /* Set destination address and psm */
1827 bacpy(&chan->dst, dst);
1828 chan->dst_type = dst_type;
1833 auth_type = l2cap_get_auth_type(chan);
1835 if (bdaddr_type_is_le(dst_type))
1836 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1837 chan->sec_level, auth_type);
1839 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1840 chan->sec_level, auth_type);
1843 err = PTR_ERR(hcon);
1847 conn = l2cap_conn_add(hcon);
1849 hci_conn_drop(hcon);
1854 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1855 hci_conn_drop(hcon);
1860 /* Update source addr of the socket */
1861 bacpy(&chan->src, &hcon->src);
1862 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1864 l2cap_chan_unlock(chan);
1865 l2cap_chan_add(conn, chan);
1866 l2cap_chan_lock(chan);
1868 /* l2cap_chan_add takes its own ref so we can drop this one */
1869 hci_conn_drop(hcon);
1871 l2cap_state_change(chan, BT_CONNECT);
1872 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
1874 if (hcon->state == BT_CONNECTED) {
1875 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1876 __clear_chan_timer(chan);
1877 if (l2cap_chan_check_security(chan))
1878 l2cap_state_change(chan, BT_CONNECTED);
1880 l2cap_do_start(chan);
1886 l2cap_chan_unlock(chan);
1887 hci_dev_unlock(hdev);
1892 static void l2cap_monitor_timeout(struct work_struct *work)
1894 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1895 monitor_timer.work);
1897 BT_DBG("chan %p", chan);
1899 l2cap_chan_lock(chan);
1902 l2cap_chan_unlock(chan);
1903 l2cap_chan_put(chan);
1907 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1909 l2cap_chan_unlock(chan);
1910 l2cap_chan_put(chan);
1913 static void l2cap_retrans_timeout(struct work_struct *work)
1915 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1916 retrans_timer.work);
1918 BT_DBG("chan %p", chan);
1920 l2cap_chan_lock(chan);
1923 l2cap_chan_unlock(chan);
1924 l2cap_chan_put(chan);
1928 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1929 l2cap_chan_unlock(chan);
1930 l2cap_chan_put(chan);
1933 static void l2cap_streaming_send(struct l2cap_chan *chan,
1934 struct sk_buff_head *skbs)
1936 struct sk_buff *skb;
1937 struct l2cap_ctrl *control;
1939 BT_DBG("chan %p, skbs %p", chan, skbs);
1941 if (__chan_is_moving(chan))
1944 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1946 while (!skb_queue_empty(&chan->tx_q)) {
1948 skb = skb_dequeue(&chan->tx_q);
1950 bt_cb(skb)->control.retries = 1;
1951 control = &bt_cb(skb)->control;
1953 control->reqseq = 0;
1954 control->txseq = chan->next_tx_seq;
1956 __pack_control(chan, control, skb);
1958 if (chan->fcs == L2CAP_FCS_CRC16) {
1959 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1960 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1963 l2cap_do_send(chan, skb);
1965 BT_DBG("Sent txseq %u", control->txseq);
1967 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1968 chan->frames_sent++;
1972 static int l2cap_ertm_send(struct l2cap_chan *chan)
1974 struct sk_buff *skb, *tx_skb;
1975 struct l2cap_ctrl *control;
1978 BT_DBG("chan %p", chan);
1980 if (chan->state != BT_CONNECTED)
1983 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1986 if (__chan_is_moving(chan))
1989 while (chan->tx_send_head &&
1990 chan->unacked_frames < chan->remote_tx_win &&
1991 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1993 skb = chan->tx_send_head;
1995 bt_cb(skb)->control.retries = 1;
1996 control = &bt_cb(skb)->control;
1998 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2001 control->reqseq = chan->buffer_seq;
2002 chan->last_acked_seq = chan->buffer_seq;
2003 control->txseq = chan->next_tx_seq;
2005 __pack_control(chan, control, skb);
2007 if (chan->fcs == L2CAP_FCS_CRC16) {
2008 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2009 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2012 /* Clone after data has been modified. Data is assumed to be
2013 read-only (for locking purposes) on cloned sk_buffs.
2015 tx_skb = skb_clone(skb, GFP_KERNEL);
2020 __set_retrans_timer(chan);
2022 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2023 chan->unacked_frames++;
2024 chan->frames_sent++;
2027 if (skb_queue_is_last(&chan->tx_q, skb))
2028 chan->tx_send_head = NULL;
2030 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2032 l2cap_do_send(chan, tx_skb);
2033 BT_DBG("Sent txseq %u", control->txseq);
2036 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2037 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2042 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2044 struct l2cap_ctrl control;
2045 struct sk_buff *skb;
2046 struct sk_buff *tx_skb;
2049 BT_DBG("chan %p", chan);
2051 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2054 if (__chan_is_moving(chan))
2057 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2058 seq = l2cap_seq_list_pop(&chan->retrans_list);
2060 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2062 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2067 bt_cb(skb)->control.retries++;
2068 control = bt_cb(skb)->control;
2070 if (chan->max_tx != 0 &&
2071 bt_cb(skb)->control.retries > chan->max_tx) {
2072 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2073 l2cap_send_disconn_req(chan, ECONNRESET);
2074 l2cap_seq_list_clear(&chan->retrans_list);
2078 control.reqseq = chan->buffer_seq;
2079 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2084 if (skb_cloned(skb)) {
2085 /* Cloned sk_buffs are read-only, so we need a
2088 tx_skb = skb_copy(skb, GFP_KERNEL);
2090 tx_skb = skb_clone(skb, GFP_KERNEL);
2094 l2cap_seq_list_clear(&chan->retrans_list);
2098 /* Update skb contents */
2099 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2100 put_unaligned_le32(__pack_extended_control(&control),
2101 tx_skb->data + L2CAP_HDR_SIZE);
2103 put_unaligned_le16(__pack_enhanced_control(&control),
2104 tx_skb->data + L2CAP_HDR_SIZE);
2107 if (chan->fcs == L2CAP_FCS_CRC16) {
2108 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2109 put_unaligned_le16(fcs, skb_put(tx_skb,
2113 l2cap_do_send(chan, tx_skb);
2115 BT_DBG("Resent txseq %d", control.txseq);
2117 chan->last_acked_seq = chan->buffer_seq;
2121 static void l2cap_retransmit(struct l2cap_chan *chan,
2122 struct l2cap_ctrl *control)
2124 BT_DBG("chan %p, control %p", chan, control);
2126 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2127 l2cap_ertm_resend(chan);
2130 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2131 struct l2cap_ctrl *control)
2133 struct sk_buff *skb;
2135 BT_DBG("chan %p, control %p", chan, control);
2138 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2140 l2cap_seq_list_clear(&chan->retrans_list);
2142 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2145 if (chan->unacked_frames) {
2146 skb_queue_walk(&chan->tx_q, skb) {
2147 if (bt_cb(skb)->control.txseq == control->reqseq ||
2148 skb == chan->tx_send_head)
2152 skb_queue_walk_from(&chan->tx_q, skb) {
2153 if (skb == chan->tx_send_head)
2156 l2cap_seq_list_append(&chan->retrans_list,
2157 bt_cb(skb)->control.txseq);
2160 l2cap_ertm_resend(chan);
2164 static void l2cap_send_ack(struct l2cap_chan *chan)
2166 struct l2cap_ctrl control;
2167 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2168 chan->last_acked_seq);
2171 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2172 chan, chan->last_acked_seq, chan->buffer_seq);
2174 memset(&control, 0, sizeof(control));
2177 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2178 chan->rx_state == L2CAP_RX_STATE_RECV) {
2179 __clear_ack_timer(chan);
2180 control.super = L2CAP_SUPER_RNR;
2181 control.reqseq = chan->buffer_seq;
2182 l2cap_send_sframe(chan, &control);
2184 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2185 l2cap_ertm_send(chan);
2186 /* If any i-frames were sent, they included an ack */
2187 if (chan->buffer_seq == chan->last_acked_seq)
2191 /* Ack now if the window is 3/4ths full.
2192 * Calculate without mul or div
2194 threshold = chan->ack_win;
2195 threshold += threshold << 1;
2198 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2201 if (frames_to_ack >= threshold) {
2202 __clear_ack_timer(chan);
2203 control.super = L2CAP_SUPER_RR;
2204 control.reqseq = chan->buffer_seq;
2205 l2cap_send_sframe(chan, &control);
2210 __set_ack_timer(chan);
2214 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2215 struct msghdr *msg, int len,
2216 int count, struct sk_buff *skb)
2218 struct l2cap_conn *conn = chan->conn;
2219 struct sk_buff **frag;
2222 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2228 /* Continuation fragments (no L2CAP header) */
2229 frag = &skb_shinfo(skb)->frag_list;
2231 struct sk_buff *tmp;
2233 count = min_t(unsigned int, conn->mtu, len);
2235 tmp = chan->ops->alloc_skb(chan, count,
2236 msg->msg_flags & MSG_DONTWAIT);
2238 return PTR_ERR(tmp);
2242 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2245 (*frag)->priority = skb->priority;
2250 skb->len += (*frag)->len;
2251 skb->data_len += (*frag)->len;
2253 frag = &(*frag)->next;
2259 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2260 struct msghdr *msg, size_t len,
2263 struct l2cap_conn *conn = chan->conn;
2264 struct sk_buff *skb;
2265 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2266 struct l2cap_hdr *lh;
2268 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2269 __le16_to_cpu(chan->psm), len, priority);
2271 count = min_t(unsigned int, (conn->mtu - hlen), len);
2273 skb = chan->ops->alloc_skb(chan, count + hlen,
2274 msg->msg_flags & MSG_DONTWAIT);
2278 skb->priority = priority;
2280 /* Create L2CAP header */
2281 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2282 lh->cid = cpu_to_le16(chan->dcid);
2283 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2284 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2286 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2287 if (unlikely(err < 0)) {
2289 return ERR_PTR(err);
2294 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2295 struct msghdr *msg, size_t len,
2298 struct l2cap_conn *conn = chan->conn;
2299 struct sk_buff *skb;
2301 struct l2cap_hdr *lh;
2303 BT_DBG("chan %p len %zu", chan, len);
2305 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2307 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2308 msg->msg_flags & MSG_DONTWAIT);
2312 skb->priority = priority;
2314 /* Create L2CAP header */
2315 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2316 lh->cid = cpu_to_le16(chan->dcid);
2317 lh->len = cpu_to_le16(len);
2319 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2320 if (unlikely(err < 0)) {
2322 return ERR_PTR(err);
2327 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2328 struct msghdr *msg, size_t len,
2331 struct l2cap_conn *conn = chan->conn;
2332 struct sk_buff *skb;
2333 int err, count, hlen;
2334 struct l2cap_hdr *lh;
2336 BT_DBG("chan %p len %zu", chan, len);
2339 return ERR_PTR(-ENOTCONN);
2341 hlen = __ertm_hdr_size(chan);
2344 hlen += L2CAP_SDULEN_SIZE;
2346 if (chan->fcs == L2CAP_FCS_CRC16)
2347 hlen += L2CAP_FCS_SIZE;
2349 count = min_t(unsigned int, (conn->mtu - hlen), len);
2351 skb = chan->ops->alloc_skb(chan, count + hlen,
2352 msg->msg_flags & MSG_DONTWAIT);
2356 /* Create L2CAP header */
2357 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2358 lh->cid = cpu_to_le16(chan->dcid);
2359 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2361 /* Control header is populated later */
2362 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2363 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2365 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2368 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2370 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2371 if (unlikely(err < 0)) {
2373 return ERR_PTR(err);
2376 bt_cb(skb)->control.fcs = chan->fcs;
2377 bt_cb(skb)->control.retries = 0;
2381 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2382 struct sk_buff_head *seg_queue,
2383 struct msghdr *msg, size_t len)
2385 struct sk_buff *skb;
2390 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2392 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2393 * so fragmented skbs are not used. The HCI layer's handling
2394 * of fragmented skbs is not compatible with ERTM's queueing.
2397 /* PDU size is derived from the HCI MTU */
2398 pdu_len = chan->conn->mtu;
2400 /* Constrain PDU size for BR/EDR connections */
2402 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2404 /* Adjust for largest possible L2CAP overhead. */
2406 pdu_len -= L2CAP_FCS_SIZE;
2408 pdu_len -= __ertm_hdr_size(chan);
2410 /* Remote device may have requested smaller PDUs */
2411 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2413 if (len <= pdu_len) {
2414 sar = L2CAP_SAR_UNSEGMENTED;
2418 sar = L2CAP_SAR_START;
2420 pdu_len -= L2CAP_SDULEN_SIZE;
2424 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2427 __skb_queue_purge(seg_queue);
2428 return PTR_ERR(skb);
2431 bt_cb(skb)->control.sar = sar;
2432 __skb_queue_tail(seg_queue, skb);
2437 pdu_len += L2CAP_SDULEN_SIZE;
2440 if (len <= pdu_len) {
2441 sar = L2CAP_SAR_END;
2444 sar = L2CAP_SAR_CONTINUE;
2451 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2454 struct sk_buff *skb;
2456 struct sk_buff_head seg_queue;
2458 /* Connectionless channel */
2459 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2460 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2462 return PTR_ERR(skb);
2464 l2cap_do_send(chan, skb);
2468 switch (chan->mode) {
2469 case L2CAP_MODE_BASIC:
2470 /* Check outgoing MTU */
2471 if (len > chan->omtu)
2474 /* Create a basic PDU */
2475 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2477 return PTR_ERR(skb);
2479 l2cap_do_send(chan, skb);
2483 case L2CAP_MODE_ERTM:
2484 case L2CAP_MODE_STREAMING:
2485 /* Check outgoing MTU */
2486 if (len > chan->omtu) {
2491 __skb_queue_head_init(&seg_queue);
2493 /* Do segmentation before calling in to the state machine,
2494 * since it's possible to block while waiting for memory
2497 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2499 /* The channel could have been closed while segmenting,
2500 * check that it is still connected.
2502 if (chan->state != BT_CONNECTED) {
2503 __skb_queue_purge(&seg_queue);
2510 if (chan->mode == L2CAP_MODE_ERTM)
2511 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2513 l2cap_streaming_send(chan, &seg_queue);
2517 /* If the skbs were not queued for sending, they'll still be in
2518 * seg_queue and need to be purged.
2520 __skb_queue_purge(&seg_queue);
2524 BT_DBG("bad state %1.1x", chan->mode);
2531 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2533 struct l2cap_ctrl control;
2536 BT_DBG("chan %p, txseq %u", chan, txseq);
2538 memset(&control, 0, sizeof(control));
2540 control.super = L2CAP_SUPER_SREJ;
2542 for (seq = chan->expected_tx_seq; seq != txseq;
2543 seq = __next_seq(chan, seq)) {
2544 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2545 control.reqseq = seq;
2546 l2cap_send_sframe(chan, &control);
2547 l2cap_seq_list_append(&chan->srej_list, seq);
2551 chan->expected_tx_seq = __next_seq(chan, txseq);
2554 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2556 struct l2cap_ctrl control;
2558 BT_DBG("chan %p", chan);
2560 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2563 memset(&control, 0, sizeof(control));
2565 control.super = L2CAP_SUPER_SREJ;
2566 control.reqseq = chan->srej_list.tail;
2567 l2cap_send_sframe(chan, &control);
2570 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2572 struct l2cap_ctrl control;
2576 BT_DBG("chan %p, txseq %u", chan, txseq);
2578 memset(&control, 0, sizeof(control));
2580 control.super = L2CAP_SUPER_SREJ;
2582 /* Capture initial list head to allow only one pass through the list. */
2583 initial_head = chan->srej_list.head;
2586 seq = l2cap_seq_list_pop(&chan->srej_list);
2587 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2590 control.reqseq = seq;
2591 l2cap_send_sframe(chan, &control);
2592 l2cap_seq_list_append(&chan->srej_list, seq);
2593 } while (chan->srej_list.head != initial_head);
2596 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2598 struct sk_buff *acked_skb;
2601 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2603 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2606 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2607 chan->expected_ack_seq, chan->unacked_frames);
2609 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2610 ackseq = __next_seq(chan, ackseq)) {
2612 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2614 skb_unlink(acked_skb, &chan->tx_q);
2615 kfree_skb(acked_skb);
2616 chan->unacked_frames--;
2620 chan->expected_ack_seq = reqseq;
2622 if (chan->unacked_frames == 0)
2623 __clear_retrans_timer(chan);
2625 BT_DBG("unacked_frames %u", chan->unacked_frames);
2628 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2630 BT_DBG("chan %p", chan);
2632 chan->expected_tx_seq = chan->buffer_seq;
2633 l2cap_seq_list_clear(&chan->srej_list);
2634 skb_queue_purge(&chan->srej_q);
2635 chan->rx_state = L2CAP_RX_STATE_RECV;
2638 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2639 struct l2cap_ctrl *control,
2640 struct sk_buff_head *skbs, u8 event)
2642 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2646 case L2CAP_EV_DATA_REQUEST:
2647 if (chan->tx_send_head == NULL)
2648 chan->tx_send_head = skb_peek(skbs);
2650 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2651 l2cap_ertm_send(chan);
2653 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2654 BT_DBG("Enter LOCAL_BUSY");
2655 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2657 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2658 /* The SREJ_SENT state must be aborted if we are to
2659 * enter the LOCAL_BUSY state.
2661 l2cap_abort_rx_srej_sent(chan);
2664 l2cap_send_ack(chan);
2667 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2668 BT_DBG("Exit LOCAL_BUSY");
2669 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2671 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2672 struct l2cap_ctrl local_control;
2674 memset(&local_control, 0, sizeof(local_control));
2675 local_control.sframe = 1;
2676 local_control.super = L2CAP_SUPER_RR;
2677 local_control.poll = 1;
2678 local_control.reqseq = chan->buffer_seq;
2679 l2cap_send_sframe(chan, &local_control);
2681 chan->retry_count = 1;
2682 __set_monitor_timer(chan);
2683 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2686 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2687 l2cap_process_reqseq(chan, control->reqseq);
2689 case L2CAP_EV_EXPLICIT_POLL:
2690 l2cap_send_rr_or_rnr(chan, 1);
2691 chan->retry_count = 1;
2692 __set_monitor_timer(chan);
2693 __clear_ack_timer(chan);
2694 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2696 case L2CAP_EV_RETRANS_TO:
2697 l2cap_send_rr_or_rnr(chan, 1);
2698 chan->retry_count = 1;
2699 __set_monitor_timer(chan);
2700 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2702 case L2CAP_EV_RECV_FBIT:
2703 /* Nothing to process */
2710 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2711 struct l2cap_ctrl *control,
2712 struct sk_buff_head *skbs, u8 event)
2714 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2718 case L2CAP_EV_DATA_REQUEST:
2719 if (chan->tx_send_head == NULL)
2720 chan->tx_send_head = skb_peek(skbs);
2721 /* Queue data, but don't send. */
2722 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2724 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2725 BT_DBG("Enter LOCAL_BUSY");
2726 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2728 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2729 /* The SREJ_SENT state must be aborted if we are to
2730 * enter the LOCAL_BUSY state.
2732 l2cap_abort_rx_srej_sent(chan);
2735 l2cap_send_ack(chan);
2738 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2739 BT_DBG("Exit LOCAL_BUSY");
2740 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2742 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2743 struct l2cap_ctrl local_control;
2744 memset(&local_control, 0, sizeof(local_control));
2745 local_control.sframe = 1;
2746 local_control.super = L2CAP_SUPER_RR;
2747 local_control.poll = 1;
2748 local_control.reqseq = chan->buffer_seq;
2749 l2cap_send_sframe(chan, &local_control);
2751 chan->retry_count = 1;
2752 __set_monitor_timer(chan);
2753 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2756 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2757 l2cap_process_reqseq(chan, control->reqseq);
2761 case L2CAP_EV_RECV_FBIT:
2762 if (control && control->final) {
2763 __clear_monitor_timer(chan);
2764 if (chan->unacked_frames > 0)
2765 __set_retrans_timer(chan);
2766 chan->retry_count = 0;
2767 chan->tx_state = L2CAP_TX_STATE_XMIT;
2768 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2771 case L2CAP_EV_EXPLICIT_POLL:
2774 case L2CAP_EV_MONITOR_TO:
2775 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2776 l2cap_send_rr_or_rnr(chan, 1);
2777 __set_monitor_timer(chan);
2778 chan->retry_count++;
2780 l2cap_send_disconn_req(chan, ECONNABORTED);
2788 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2789 struct sk_buff_head *skbs, u8 event)
2791 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2792 chan, control, skbs, event, chan->tx_state);
2794 switch (chan->tx_state) {
2795 case L2CAP_TX_STATE_XMIT:
2796 l2cap_tx_state_xmit(chan, control, skbs, event);
2798 case L2CAP_TX_STATE_WAIT_F:
2799 l2cap_tx_state_wait_f(chan, control, skbs, event);
2807 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2808 struct l2cap_ctrl *control)
2810 BT_DBG("chan %p, control %p", chan, control);
2811 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2814 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2815 struct l2cap_ctrl *control)
2817 BT_DBG("chan %p, control %p", chan, control);
2818 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2821 /* Copy frame to all raw sockets on that connection */
2822 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2824 struct sk_buff *nskb;
2825 struct l2cap_chan *chan;
2827 BT_DBG("conn %p", conn);
2829 mutex_lock(&conn->chan_lock);
2831 list_for_each_entry(chan, &conn->chan_l, list) {
2832 struct sock *sk = chan->sk;
2833 if (chan->chan_type != L2CAP_CHAN_RAW)
2836 /* Don't send frame to the socket it came from */
2839 nskb = skb_clone(skb, GFP_KERNEL);
2843 if (chan->ops->recv(chan, nskb))
2847 mutex_unlock(&conn->chan_lock);
2850 /* ---- L2CAP signalling commands ---- */
2851 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2852 u8 ident, u16 dlen, void *data)
2854 struct sk_buff *skb, **frag;
2855 struct l2cap_cmd_hdr *cmd;
2856 struct l2cap_hdr *lh;
2859 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2860 conn, code, ident, dlen);
2862 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2865 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2866 count = min_t(unsigned int, conn->mtu, len);
2868 skb = bt_skb_alloc(count, GFP_KERNEL);
2872 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2873 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2875 if (conn->hcon->type == LE_LINK)
2876 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2878 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2880 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2883 cmd->len = cpu_to_le16(dlen);
2886 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2887 memcpy(skb_put(skb, count), data, count);
2893 /* Continuation fragments (no L2CAP header) */
2894 frag = &skb_shinfo(skb)->frag_list;
2896 count = min_t(unsigned int, conn->mtu, len);
2898 *frag = bt_skb_alloc(count, GFP_KERNEL);
2902 memcpy(skb_put(*frag, count), data, count);
2907 frag = &(*frag)->next;
2917 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2920 struct l2cap_conf_opt *opt = *ptr;
2923 len = L2CAP_CONF_OPT_SIZE + opt->len;
2931 *val = *((u8 *) opt->val);
2935 *val = get_unaligned_le16(opt->val);
2939 *val = get_unaligned_le32(opt->val);
2943 *val = (unsigned long) opt->val;
2947 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2951 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2953 struct l2cap_conf_opt *opt = *ptr;
2955 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2962 *((u8 *) opt->val) = val;
2966 put_unaligned_le16(val, opt->val);
2970 put_unaligned_le32(val, opt->val);
2974 memcpy(opt->val, (void *) val, len);
2978 *ptr += L2CAP_CONF_OPT_SIZE + len;
2981 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2983 struct l2cap_conf_efs efs;
2985 switch (chan->mode) {
2986 case L2CAP_MODE_ERTM:
2987 efs.id = chan->local_id;
2988 efs.stype = chan->local_stype;
2989 efs.msdu = cpu_to_le16(chan->local_msdu);
2990 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2991 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2992 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2995 case L2CAP_MODE_STREAMING:
2997 efs.stype = L2CAP_SERV_BESTEFFORT;
2998 efs.msdu = cpu_to_le16(chan->local_msdu);
2999 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3008 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3009 (unsigned long) &efs);
3012 static void l2cap_ack_timeout(struct work_struct *work)
3014 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3018 BT_DBG("chan %p", chan);
3020 l2cap_chan_lock(chan);
3022 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3023 chan->last_acked_seq);
3026 l2cap_send_rr_or_rnr(chan, 0);
3028 l2cap_chan_unlock(chan);
3029 l2cap_chan_put(chan);
3032 int l2cap_ertm_init(struct l2cap_chan *chan)
3036 chan->next_tx_seq = 0;
3037 chan->expected_tx_seq = 0;
3038 chan->expected_ack_seq = 0;
3039 chan->unacked_frames = 0;
3040 chan->buffer_seq = 0;
3041 chan->frames_sent = 0;
3042 chan->last_acked_seq = 0;
3044 chan->sdu_last_frag = NULL;
3047 skb_queue_head_init(&chan->tx_q);
3049 chan->local_amp_id = AMP_ID_BREDR;
3050 chan->move_id = AMP_ID_BREDR;
3051 chan->move_state = L2CAP_MOVE_STABLE;
3052 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3054 if (chan->mode != L2CAP_MODE_ERTM)
3057 chan->rx_state = L2CAP_RX_STATE_RECV;
3058 chan->tx_state = L2CAP_TX_STATE_XMIT;
3060 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3061 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3062 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3064 skb_queue_head_init(&chan->srej_q);
3066 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3070 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3072 l2cap_seq_list_free(&chan->srej_list);
3077 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3080 case L2CAP_MODE_STREAMING:
3081 case L2CAP_MODE_ERTM:
3082 if (l2cap_mode_supported(mode, remote_feat_mask))
3086 return L2CAP_MODE_BASIC;
3090 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3092 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3095 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3097 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3100 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3101 struct l2cap_conf_rfc *rfc)
3103 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3104 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3106 /* Class 1 devices have must have ERTM timeouts
3107 * exceeding the Link Supervision Timeout. The
3108 * default Link Supervision Timeout for AMP
3109 * controllers is 10 seconds.
3111 * Class 1 devices use 0xffffffff for their
3112 * best-effort flush timeout, so the clamping logic
3113 * will result in a timeout that meets the above
3114 * requirement. ERTM timeouts are 16-bit values, so
3115 * the maximum timeout is 65.535 seconds.
3118 /* Convert timeout to milliseconds and round */
3119 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3121 /* This is the recommended formula for class 2 devices
3122 * that start ERTM timers when packets are sent to the
3125 ertm_to = 3 * ertm_to + 500;
3127 if (ertm_to > 0xffff)
3130 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3131 rfc->monitor_timeout = rfc->retrans_timeout;
3133 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3134 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3138 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3140 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3141 __l2cap_ews_supported(chan->conn)) {
3142 /* use extended control field */
3143 set_bit(FLAG_EXT_CTRL, &chan->flags);
3144 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3146 chan->tx_win = min_t(u16, chan->tx_win,
3147 L2CAP_DEFAULT_TX_WINDOW);
3148 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3150 chan->ack_win = chan->tx_win;
3153 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3155 struct l2cap_conf_req *req = data;
3156 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3157 void *ptr = req->data;
3160 BT_DBG("chan %p", chan);
3162 if (chan->num_conf_req || chan->num_conf_rsp)
3165 switch (chan->mode) {
3166 case L2CAP_MODE_STREAMING:
3167 case L2CAP_MODE_ERTM:
3168 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3171 if (__l2cap_efs_supported(chan->conn))
3172 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3176 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3181 if (chan->imtu != L2CAP_DEFAULT_MTU)
3182 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3184 switch (chan->mode) {
3185 case L2CAP_MODE_BASIC:
3186 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3187 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3190 rfc.mode = L2CAP_MODE_BASIC;
3192 rfc.max_transmit = 0;
3193 rfc.retrans_timeout = 0;
3194 rfc.monitor_timeout = 0;
3195 rfc.max_pdu_size = 0;
3197 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3198 (unsigned long) &rfc);
3201 case L2CAP_MODE_ERTM:
3202 rfc.mode = L2CAP_MODE_ERTM;
3203 rfc.max_transmit = chan->max_tx;
3205 __l2cap_set_ertm_timeouts(chan, &rfc);
3207 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3208 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3210 rfc.max_pdu_size = cpu_to_le16(size);
3212 l2cap_txwin_setup(chan);
3214 rfc.txwin_size = min_t(u16, chan->tx_win,
3215 L2CAP_DEFAULT_TX_WINDOW);
3217 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3218 (unsigned long) &rfc);
3220 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3221 l2cap_add_opt_efs(&ptr, chan);
3223 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3224 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3227 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3228 if (chan->fcs == L2CAP_FCS_NONE ||
3229 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3230 chan->fcs = L2CAP_FCS_NONE;
3231 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3236 case L2CAP_MODE_STREAMING:
3237 l2cap_txwin_setup(chan);
3238 rfc.mode = L2CAP_MODE_STREAMING;
3240 rfc.max_transmit = 0;
3241 rfc.retrans_timeout = 0;
3242 rfc.monitor_timeout = 0;
3244 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3245 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3247 rfc.max_pdu_size = cpu_to_le16(size);
3249 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3250 (unsigned long) &rfc);
3252 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3253 l2cap_add_opt_efs(&ptr, chan);
3255 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3256 if (chan->fcs == L2CAP_FCS_NONE ||
3257 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3258 chan->fcs = L2CAP_FCS_NONE;
3259 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3265 req->dcid = cpu_to_le16(chan->dcid);
3266 req->flags = __constant_cpu_to_le16(0);
3271 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3273 struct l2cap_conf_rsp *rsp = data;
3274 void *ptr = rsp->data;
3275 void *req = chan->conf_req;
3276 int len = chan->conf_len;
3277 int type, hint, olen;
3279 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3280 struct l2cap_conf_efs efs;
3282 u16 mtu = L2CAP_DEFAULT_MTU;
3283 u16 result = L2CAP_CONF_SUCCESS;
3286 BT_DBG("chan %p", chan);
3288 while (len >= L2CAP_CONF_OPT_SIZE) {
3289 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3291 hint = type & L2CAP_CONF_HINT;
3292 type &= L2CAP_CONF_MASK;
3295 case L2CAP_CONF_MTU:
3299 case L2CAP_CONF_FLUSH_TO:
3300 chan->flush_to = val;
3303 case L2CAP_CONF_QOS:
3306 case L2CAP_CONF_RFC:
3307 if (olen == sizeof(rfc))
3308 memcpy(&rfc, (void *) val, olen);
3311 case L2CAP_CONF_FCS:
3312 if (val == L2CAP_FCS_NONE)
3313 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3316 case L2CAP_CONF_EFS:
3318 if (olen == sizeof(efs))
3319 memcpy(&efs, (void *) val, olen);
3322 case L2CAP_CONF_EWS:
3323 if (!chan->conn->hs_enabled)
3324 return -ECONNREFUSED;
3326 set_bit(FLAG_EXT_CTRL, &chan->flags);
3327 set_bit(CONF_EWS_RECV, &chan->conf_state);
3328 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3329 chan->remote_tx_win = val;
3336 result = L2CAP_CONF_UNKNOWN;
3337 *((u8 *) ptr++) = type;
3342 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3345 switch (chan->mode) {
3346 case L2CAP_MODE_STREAMING:
3347 case L2CAP_MODE_ERTM:
3348 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3349 chan->mode = l2cap_select_mode(rfc.mode,
3350 chan->conn->feat_mask);
3355 if (__l2cap_efs_supported(chan->conn))
3356 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3358 return -ECONNREFUSED;
3361 if (chan->mode != rfc.mode)
3362 return -ECONNREFUSED;
3368 if (chan->mode != rfc.mode) {
3369 result = L2CAP_CONF_UNACCEPT;
3370 rfc.mode = chan->mode;
3372 if (chan->num_conf_rsp == 1)
3373 return -ECONNREFUSED;
3375 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3376 (unsigned long) &rfc);
3379 if (result == L2CAP_CONF_SUCCESS) {
3380 /* Configure output options and let the other side know
3381 * which ones we don't like. */
3383 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3384 result = L2CAP_CONF_UNACCEPT;
3387 set_bit(CONF_MTU_DONE, &chan->conf_state);
3389 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3392 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3393 efs.stype != L2CAP_SERV_NOTRAFIC &&
3394 efs.stype != chan->local_stype) {
3396 result = L2CAP_CONF_UNACCEPT;
3398 if (chan->num_conf_req >= 1)
3399 return -ECONNREFUSED;
3401 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3403 (unsigned long) &efs);
3405 /* Send PENDING Conf Rsp */
3406 result = L2CAP_CONF_PENDING;
3407 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3412 case L2CAP_MODE_BASIC:
3413 chan->fcs = L2CAP_FCS_NONE;
3414 set_bit(CONF_MODE_DONE, &chan->conf_state);
3417 case L2CAP_MODE_ERTM:
3418 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3419 chan->remote_tx_win = rfc.txwin_size;
3421 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3423 chan->remote_max_tx = rfc.max_transmit;
3425 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3426 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3427 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3428 rfc.max_pdu_size = cpu_to_le16(size);
3429 chan->remote_mps = size;
3431 __l2cap_set_ertm_timeouts(chan, &rfc);
3433 set_bit(CONF_MODE_DONE, &chan->conf_state);
3435 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3436 sizeof(rfc), (unsigned long) &rfc);
3438 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3439 chan->remote_id = efs.id;
3440 chan->remote_stype = efs.stype;
3441 chan->remote_msdu = le16_to_cpu(efs.msdu);
3442 chan->remote_flush_to =
3443 le32_to_cpu(efs.flush_to);
3444 chan->remote_acc_lat =
3445 le32_to_cpu(efs.acc_lat);
3446 chan->remote_sdu_itime =
3447 le32_to_cpu(efs.sdu_itime);
3448 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3450 (unsigned long) &efs);
3454 case L2CAP_MODE_STREAMING:
3455 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3456 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3457 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3458 rfc.max_pdu_size = cpu_to_le16(size);
3459 chan->remote_mps = size;
3461 set_bit(CONF_MODE_DONE, &chan->conf_state);
3463 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3464 (unsigned long) &rfc);
3469 result = L2CAP_CONF_UNACCEPT;
3471 memset(&rfc, 0, sizeof(rfc));
3472 rfc.mode = chan->mode;
3475 if (result == L2CAP_CONF_SUCCESS)
3476 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3478 rsp->scid = cpu_to_le16(chan->dcid);
3479 rsp->result = cpu_to_le16(result);
3480 rsp->flags = __constant_cpu_to_le16(0);
3485 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3486 void *data, u16 *result)
3488 struct l2cap_conf_req *req = data;
3489 void *ptr = req->data;
3492 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3493 struct l2cap_conf_efs efs;
3495 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3497 while (len >= L2CAP_CONF_OPT_SIZE) {
3498 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3501 case L2CAP_CONF_MTU:
3502 if (val < L2CAP_DEFAULT_MIN_MTU) {
3503 *result = L2CAP_CONF_UNACCEPT;
3504 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3507 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3510 case L2CAP_CONF_FLUSH_TO:
3511 chan->flush_to = val;
3512 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3516 case L2CAP_CONF_RFC:
3517 if (olen == sizeof(rfc))
3518 memcpy(&rfc, (void *)val, olen);
3520 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3521 rfc.mode != chan->mode)
3522 return -ECONNREFUSED;
3526 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3527 sizeof(rfc), (unsigned long) &rfc);
3530 case L2CAP_CONF_EWS:
3531 chan->ack_win = min_t(u16, val, chan->ack_win);
3532 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3536 case L2CAP_CONF_EFS:
3537 if (olen == sizeof(efs))
3538 memcpy(&efs, (void *)val, olen);
3540 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3541 efs.stype != L2CAP_SERV_NOTRAFIC &&
3542 efs.stype != chan->local_stype)
3543 return -ECONNREFUSED;
3545 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3546 (unsigned long) &efs);
3549 case L2CAP_CONF_FCS:
3550 if (*result == L2CAP_CONF_PENDING)
3551 if (val == L2CAP_FCS_NONE)
3552 set_bit(CONF_RECV_NO_FCS,
3558 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3559 return -ECONNREFUSED;
3561 chan->mode = rfc.mode;
3563 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3565 case L2CAP_MODE_ERTM:
3566 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3567 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3568 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3569 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3570 chan->ack_win = min_t(u16, chan->ack_win,
3573 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3574 chan->local_msdu = le16_to_cpu(efs.msdu);
3575 chan->local_sdu_itime =
3576 le32_to_cpu(efs.sdu_itime);
3577 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3578 chan->local_flush_to =
3579 le32_to_cpu(efs.flush_to);
3583 case L2CAP_MODE_STREAMING:
3584 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3588 req->dcid = cpu_to_le16(chan->dcid);
3589 req->flags = __constant_cpu_to_le16(0);
3594 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3595 u16 result, u16 flags)
3597 struct l2cap_conf_rsp *rsp = data;
3598 void *ptr = rsp->data;
3600 BT_DBG("chan %p", chan);
3602 rsp->scid = cpu_to_le16(chan->dcid);
3603 rsp->result = cpu_to_le16(result);
3604 rsp->flags = cpu_to_le16(flags);
3609 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3611 struct l2cap_conn_rsp rsp;
3612 struct l2cap_conn *conn = chan->conn;
3616 rsp.scid = cpu_to_le16(chan->dcid);
3617 rsp.dcid = cpu_to_le16(chan->scid);
3618 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3619 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3622 rsp_code = L2CAP_CREATE_CHAN_RSP;
3624 rsp_code = L2CAP_CONN_RSP;
3626 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3628 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3630 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3633 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3634 l2cap_build_conf_req(chan, buf), buf);
3635 chan->num_conf_req++;
3638 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3642 /* Use sane default values in case a misbehaving remote device
3643 * did not send an RFC or extended window size option.
3645 u16 txwin_ext = chan->ack_win;
3646 struct l2cap_conf_rfc rfc = {
3648 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3649 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3650 .max_pdu_size = cpu_to_le16(chan->imtu),
3651 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3654 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3656 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3659 while (len >= L2CAP_CONF_OPT_SIZE) {
3660 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3663 case L2CAP_CONF_RFC:
3664 if (olen == sizeof(rfc))
3665 memcpy(&rfc, (void *)val, olen);
3667 case L2CAP_CONF_EWS:
3674 case L2CAP_MODE_ERTM:
3675 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3676 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3677 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3678 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3679 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3681 chan->ack_win = min_t(u16, chan->ack_win,
3684 case L2CAP_MODE_STREAMING:
3685 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3689 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3690 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3693 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3695 if (cmd_len < sizeof(*rej))
3698 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3701 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3702 cmd->ident == conn->info_ident) {
3703 cancel_delayed_work(&conn->info_timer);
3705 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3706 conn->info_ident = 0;
3708 l2cap_conn_start(conn);
3714 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3715 struct l2cap_cmd_hdr *cmd,
3716 u8 *data, u8 rsp_code, u8 amp_id)
3718 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3719 struct l2cap_conn_rsp rsp;
3720 struct l2cap_chan *chan = NULL, *pchan;
3721 struct sock *parent, *sk = NULL;
3722 int result, status = L2CAP_CS_NO_INFO;
3724 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3725 __le16 psm = req->psm;
3727 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3729 /* Check if we have socket listening on psm */
3730 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3733 result = L2CAP_CR_BAD_PSM;
3739 mutex_lock(&conn->chan_lock);
3742 /* Check if the ACL is secure enough (if not SDP) */
3743 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3744 !hci_conn_check_link_mode(conn->hcon)) {
3745 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3746 result = L2CAP_CR_SEC_BLOCK;
3750 result = L2CAP_CR_NO_MEM;
3752 /* Check if we already have channel with that dcid */
3753 if (__l2cap_get_chan_by_dcid(conn, scid))
3756 chan = pchan->ops->new_connection(pchan);
3762 /* For certain devices (ex: HID mouse), support for authentication,
3763 * pairing and bonding is optional. For such devices, inorder to avoid
3764 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3765 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3767 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3769 bacpy(&chan->src, &conn->hcon->src);
3770 bacpy(&chan->dst, &conn->hcon->dst);
3771 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3772 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3775 chan->local_amp_id = amp_id;
3777 __l2cap_chan_add(conn, chan);
3781 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3783 chan->ident = cmd->ident;
3785 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3786 if (l2cap_chan_check_security(chan)) {
3787 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3788 __l2cap_state_change(chan, BT_CONNECT2);
3789 result = L2CAP_CR_PEND;
3790 status = L2CAP_CS_AUTHOR_PEND;
3791 chan->ops->defer(chan);
3793 /* Force pending result for AMP controllers.
3794 * The connection will succeed after the
3795 * physical link is up.
3797 if (amp_id == AMP_ID_BREDR) {
3798 __l2cap_state_change(chan, BT_CONFIG);
3799 result = L2CAP_CR_SUCCESS;
3801 __l2cap_state_change(chan, BT_CONNECT2);
3802 result = L2CAP_CR_PEND;
3804 status = L2CAP_CS_NO_INFO;
3807 __l2cap_state_change(chan, BT_CONNECT2);
3808 result = L2CAP_CR_PEND;
3809 status = L2CAP_CS_AUTHEN_PEND;
3812 __l2cap_state_change(chan, BT_CONNECT2);
3813 result = L2CAP_CR_PEND;
3814 status = L2CAP_CS_NO_INFO;
3818 release_sock(parent);
3819 mutex_unlock(&conn->chan_lock);
3822 rsp.scid = cpu_to_le16(scid);
3823 rsp.dcid = cpu_to_le16(dcid);
3824 rsp.result = cpu_to_le16(result);
3825 rsp.status = cpu_to_le16(status);
3826 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3828 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3829 struct l2cap_info_req info;
3830 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3832 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3833 conn->info_ident = l2cap_get_ident(conn);
3835 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3837 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3838 sizeof(info), &info);
3841 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3842 result == L2CAP_CR_SUCCESS) {
3844 set_bit(CONF_REQ_SENT, &chan->conf_state);
3845 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3846 l2cap_build_conf_req(chan, buf), buf);
3847 chan->num_conf_req++;
3853 static int l2cap_connect_req(struct l2cap_conn *conn,
3854 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3856 struct hci_dev *hdev = conn->hcon->hdev;
3857 struct hci_conn *hcon = conn->hcon;
3859 if (cmd_len < sizeof(struct l2cap_conn_req))
3863 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3864 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3865 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3866 hcon->dst_type, 0, NULL, 0,
3868 hci_dev_unlock(hdev);
3870 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3874 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3875 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3878 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3879 u16 scid, dcid, result, status;
3880 struct l2cap_chan *chan;
3884 if (cmd_len < sizeof(*rsp))
3887 scid = __le16_to_cpu(rsp->scid);
3888 dcid = __le16_to_cpu(rsp->dcid);
3889 result = __le16_to_cpu(rsp->result);
3890 status = __le16_to_cpu(rsp->status);
3892 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3893 dcid, scid, result, status);
3895 mutex_lock(&conn->chan_lock);
3898 chan = __l2cap_get_chan_by_scid(conn, scid);
3904 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3913 l2cap_chan_lock(chan);
3916 case L2CAP_CR_SUCCESS:
3917 l2cap_state_change(chan, BT_CONFIG);
3920 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3922 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3925 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3926 l2cap_build_conf_req(chan, req), req);
3927 chan->num_conf_req++;
3931 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3935 l2cap_chan_del(chan, ECONNREFUSED);
3939 l2cap_chan_unlock(chan);
3942 mutex_unlock(&conn->chan_lock);
3947 static inline void set_default_fcs(struct l2cap_chan *chan)
3949 /* FCS is enabled only in ERTM or streaming mode, if one or both
3952 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3953 chan->fcs = L2CAP_FCS_NONE;
3954 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3955 chan->fcs = L2CAP_FCS_CRC16;
3958 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3959 u8 ident, u16 flags)
3961 struct l2cap_conn *conn = chan->conn;
3963 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3966 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3967 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3969 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3970 l2cap_build_conf_rsp(chan, data,
3971 L2CAP_CONF_SUCCESS, flags), data);
3974 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
3977 struct l2cap_cmd_rej_cid rej;
3979 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3980 rej.scid = __cpu_to_le16(scid);
3981 rej.dcid = __cpu_to_le16(dcid);
3983 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3986 static inline int l2cap_config_req(struct l2cap_conn *conn,
3987 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3990 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3993 struct l2cap_chan *chan;
3996 if (cmd_len < sizeof(*req))
3999 dcid = __le16_to_cpu(req->dcid);
4000 flags = __le16_to_cpu(req->flags);
4002 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4004 chan = l2cap_get_chan_by_scid(conn, dcid);
4006 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4010 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4011 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4016 /* Reject if config buffer is too small. */
4017 len = cmd_len - sizeof(*req);
4018 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4019 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4020 l2cap_build_conf_rsp(chan, rsp,
4021 L2CAP_CONF_REJECT, flags), rsp);
4026 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4027 chan->conf_len += len;
4029 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4030 /* Incomplete config. Send empty response. */
4031 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4032 l2cap_build_conf_rsp(chan, rsp,
4033 L2CAP_CONF_SUCCESS, flags), rsp);
4037 /* Complete config. */
4038 len = l2cap_parse_conf_req(chan, rsp);
4040 l2cap_send_disconn_req(chan, ECONNRESET);
4044 chan->ident = cmd->ident;
4045 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4046 chan->num_conf_rsp++;
4048 /* Reset config buffer. */
4051 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4054 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4055 set_default_fcs(chan);
4057 if (chan->mode == L2CAP_MODE_ERTM ||
4058 chan->mode == L2CAP_MODE_STREAMING)
4059 err = l2cap_ertm_init(chan);
4062 l2cap_send_disconn_req(chan, -err);
4064 l2cap_chan_ready(chan);
4069 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4071 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4072 l2cap_build_conf_req(chan, buf), buf);
4073 chan->num_conf_req++;
4076 /* Got Conf Rsp PENDING from remote side and asume we sent
4077 Conf Rsp PENDING in the code above */
4078 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4079 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4081 /* check compatibility */
4083 /* Send rsp for BR/EDR channel */
4085 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4087 chan->ident = cmd->ident;
4091 l2cap_chan_unlock(chan);
4095 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4096 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4099 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4100 u16 scid, flags, result;
4101 struct l2cap_chan *chan;
4102 int len = cmd_len - sizeof(*rsp);
4105 if (cmd_len < sizeof(*rsp))
4108 scid = __le16_to_cpu(rsp->scid);
4109 flags = __le16_to_cpu(rsp->flags);
4110 result = __le16_to_cpu(rsp->result);
4112 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4115 chan = l2cap_get_chan_by_scid(conn, scid);
4120 case L2CAP_CONF_SUCCESS:
4121 l2cap_conf_rfc_get(chan, rsp->data, len);
4122 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4125 case L2CAP_CONF_PENDING:
4126 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4128 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4131 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4134 l2cap_send_disconn_req(chan, ECONNRESET);
4138 if (!chan->hs_hcon) {
4139 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4142 if (l2cap_check_efs(chan)) {
4143 amp_create_logical_link(chan);
4144 chan->ident = cmd->ident;
4150 case L2CAP_CONF_UNACCEPT:
4151 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4154 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4155 l2cap_send_disconn_req(chan, ECONNRESET);
4159 /* throw out any old stored conf requests */
4160 result = L2CAP_CONF_SUCCESS;
4161 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4164 l2cap_send_disconn_req(chan, ECONNRESET);
4168 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4169 L2CAP_CONF_REQ, len, req);
4170 chan->num_conf_req++;
4171 if (result != L2CAP_CONF_SUCCESS)
4177 l2cap_chan_set_err(chan, ECONNRESET);
4179 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4180 l2cap_send_disconn_req(chan, ECONNRESET);
4184 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4187 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4189 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4190 set_default_fcs(chan);
4192 if (chan->mode == L2CAP_MODE_ERTM ||
4193 chan->mode == L2CAP_MODE_STREAMING)
4194 err = l2cap_ertm_init(chan);
4197 l2cap_send_disconn_req(chan, -err);
4199 l2cap_chan_ready(chan);
4203 l2cap_chan_unlock(chan);
4207 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4208 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4211 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4212 struct l2cap_disconn_rsp rsp;
4214 struct l2cap_chan *chan;
4216 if (cmd_len != sizeof(*req))
4219 scid = __le16_to_cpu(req->scid);
4220 dcid = __le16_to_cpu(req->dcid);
4222 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4224 mutex_lock(&conn->chan_lock);
4226 chan = __l2cap_get_chan_by_scid(conn, dcid);
4228 mutex_unlock(&conn->chan_lock);
4229 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4233 l2cap_chan_lock(chan);
4235 rsp.dcid = cpu_to_le16(chan->scid);
4236 rsp.scid = cpu_to_le16(chan->dcid);
4237 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4239 chan->ops->set_shutdown(chan);
4241 l2cap_chan_hold(chan);
4242 l2cap_chan_del(chan, ECONNRESET);
4244 l2cap_chan_unlock(chan);
4246 chan->ops->close(chan);
4247 l2cap_chan_put(chan);
4249 mutex_unlock(&conn->chan_lock);
4254 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4255 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4258 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4260 struct l2cap_chan *chan;
4262 if (cmd_len != sizeof(*rsp))
4265 scid = __le16_to_cpu(rsp->scid);
4266 dcid = __le16_to_cpu(rsp->dcid);
4268 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4270 mutex_lock(&conn->chan_lock);
4272 chan = __l2cap_get_chan_by_scid(conn, scid);
4274 mutex_unlock(&conn->chan_lock);
4278 l2cap_chan_lock(chan);
4280 l2cap_chan_hold(chan);
4281 l2cap_chan_del(chan, 0);
4283 l2cap_chan_unlock(chan);
4285 chan->ops->close(chan);
4286 l2cap_chan_put(chan);
4288 mutex_unlock(&conn->chan_lock);
4293 static inline int l2cap_information_req(struct l2cap_conn *conn,
4294 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4297 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4300 if (cmd_len != sizeof(*req))
4303 type = __le16_to_cpu(req->type);
4305 BT_DBG("type 0x%4.4x", type);
4307 if (type == L2CAP_IT_FEAT_MASK) {
4309 u32 feat_mask = l2cap_feat_mask;
4310 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4311 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4312 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4314 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4316 if (conn->hs_enabled)
4317 feat_mask |= L2CAP_FEAT_EXT_FLOW
4318 | L2CAP_FEAT_EXT_WINDOW;
4320 put_unaligned_le32(feat_mask, rsp->data);
4321 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4323 } else if (type == L2CAP_IT_FIXED_CHAN) {
4325 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4327 if (conn->hs_enabled)
4328 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4330 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4332 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4333 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4334 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4335 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4338 struct l2cap_info_rsp rsp;
4339 rsp.type = cpu_to_le16(type);
4340 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4341 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4348 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4349 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4352 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4355 if (cmd_len < sizeof(*rsp))
4358 type = __le16_to_cpu(rsp->type);
4359 result = __le16_to_cpu(rsp->result);
4361 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4363 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4364 if (cmd->ident != conn->info_ident ||
4365 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4368 cancel_delayed_work(&conn->info_timer);
4370 if (result != L2CAP_IR_SUCCESS) {
4371 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4372 conn->info_ident = 0;
4374 l2cap_conn_start(conn);
4380 case L2CAP_IT_FEAT_MASK:
4381 conn->feat_mask = get_unaligned_le32(rsp->data);
4383 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4384 struct l2cap_info_req req;
4385 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4387 conn->info_ident = l2cap_get_ident(conn);
4389 l2cap_send_cmd(conn, conn->info_ident,
4390 L2CAP_INFO_REQ, sizeof(req), &req);
4392 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4393 conn->info_ident = 0;
4395 l2cap_conn_start(conn);
4399 case L2CAP_IT_FIXED_CHAN:
4400 conn->fixed_chan_mask = rsp->data[0];
4401 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4402 conn->info_ident = 0;
4404 l2cap_conn_start(conn);
4411 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4412 struct l2cap_cmd_hdr *cmd,
4413 u16 cmd_len, void *data)
4415 struct l2cap_create_chan_req *req = data;
4416 struct l2cap_create_chan_rsp rsp;
4417 struct l2cap_chan *chan;
4418 struct hci_dev *hdev;
4421 if (cmd_len != sizeof(*req))
4424 if (!conn->hs_enabled)
4427 psm = le16_to_cpu(req->psm);
4428 scid = le16_to_cpu(req->scid);
4430 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4432 /* For controller id 0 make BR/EDR connection */
4433 if (req->amp_id == AMP_ID_BREDR) {
4434 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4439 /* Validate AMP controller id */
4440 hdev = hci_dev_get(req->amp_id);
4444 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4449 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4452 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4453 struct hci_conn *hs_hcon;
4455 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4459 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4464 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4466 mgr->bredr_chan = chan;
4467 chan->hs_hcon = hs_hcon;
4468 chan->fcs = L2CAP_FCS_NONE;
4469 conn->mtu = hdev->block_mtu;
4478 rsp.scid = cpu_to_le16(scid);
4479 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4480 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4482 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4488 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4490 struct l2cap_move_chan_req req;
4493 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4495 ident = l2cap_get_ident(chan->conn);
4496 chan->ident = ident;
4498 req.icid = cpu_to_le16(chan->scid);
4499 req.dest_amp_id = dest_amp_id;
4501 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4504 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4507 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4509 struct l2cap_move_chan_rsp rsp;
4511 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4513 rsp.icid = cpu_to_le16(chan->dcid);
4514 rsp.result = cpu_to_le16(result);
4516 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4520 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4522 struct l2cap_move_chan_cfm cfm;
4524 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4526 chan->ident = l2cap_get_ident(chan->conn);
4528 cfm.icid = cpu_to_le16(chan->scid);
4529 cfm.result = cpu_to_le16(result);
4531 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4534 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4537 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4539 struct l2cap_move_chan_cfm cfm;
4541 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4543 cfm.icid = cpu_to_le16(icid);
4544 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4546 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4550 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4553 struct l2cap_move_chan_cfm_rsp rsp;
4555 BT_DBG("icid 0x%4.4x", icid);
4557 rsp.icid = cpu_to_le16(icid);
4558 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4561 static void __release_logical_link(struct l2cap_chan *chan)
4563 chan->hs_hchan = NULL;
4564 chan->hs_hcon = NULL;
4566 /* Placeholder - release the logical link */
4569 static void l2cap_logical_fail(struct l2cap_chan *chan)
4571 /* Logical link setup failed */
4572 if (chan->state != BT_CONNECTED) {
4573 /* Create channel failure, disconnect */
4574 l2cap_send_disconn_req(chan, ECONNRESET);
4578 switch (chan->move_role) {
4579 case L2CAP_MOVE_ROLE_RESPONDER:
4580 l2cap_move_done(chan);
4581 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4583 case L2CAP_MOVE_ROLE_INITIATOR:
4584 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4585 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4586 /* Remote has only sent pending or
4587 * success responses, clean up
4589 l2cap_move_done(chan);
4592 /* Other amp move states imply that the move
4593 * has already aborted
4595 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4600 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4601 struct hci_chan *hchan)
4603 struct l2cap_conf_rsp rsp;
4605 chan->hs_hchan = hchan;
4606 chan->hs_hcon->l2cap_data = chan->conn;
4608 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4610 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4613 set_default_fcs(chan);
4615 err = l2cap_ertm_init(chan);
4617 l2cap_send_disconn_req(chan, -err);
4619 l2cap_chan_ready(chan);
4623 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4624 struct hci_chan *hchan)
4626 chan->hs_hcon = hchan->conn;
4627 chan->hs_hcon->l2cap_data = chan->conn;
4629 BT_DBG("move_state %d", chan->move_state);
4631 switch (chan->move_state) {
4632 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4633 /* Move confirm will be sent after a success
4634 * response is received
4636 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4638 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4639 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4640 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4641 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4642 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4643 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4644 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4645 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4646 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4650 /* Move was not in expected state, free the channel */
4651 __release_logical_link(chan);
4653 chan->move_state = L2CAP_MOVE_STABLE;
4657 /* Call with chan locked */
4658 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4661 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4664 l2cap_logical_fail(chan);
4665 __release_logical_link(chan);
4669 if (chan->state != BT_CONNECTED) {
4670 /* Ignore logical link if channel is on BR/EDR */
4671 if (chan->local_amp_id != AMP_ID_BREDR)
4672 l2cap_logical_finish_create(chan, hchan);
4674 l2cap_logical_finish_move(chan, hchan);
4678 void l2cap_move_start(struct l2cap_chan *chan)
4680 BT_DBG("chan %p", chan);
4682 if (chan->local_amp_id == AMP_ID_BREDR) {
4683 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4685 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4686 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4687 /* Placeholder - start physical link setup */
4689 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4690 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4692 l2cap_move_setup(chan);
4693 l2cap_send_move_chan_req(chan, 0);
4697 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4698 u8 local_amp_id, u8 remote_amp_id)
4700 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4701 local_amp_id, remote_amp_id);
4703 chan->fcs = L2CAP_FCS_NONE;
4705 /* Outgoing channel on AMP */
4706 if (chan->state == BT_CONNECT) {
4707 if (result == L2CAP_CR_SUCCESS) {
4708 chan->local_amp_id = local_amp_id;
4709 l2cap_send_create_chan_req(chan, remote_amp_id);
4711 /* Revert to BR/EDR connect */
4712 l2cap_send_conn_req(chan);
4718 /* Incoming channel on AMP */
4719 if (__l2cap_no_conn_pending(chan)) {
4720 struct l2cap_conn_rsp rsp;
4722 rsp.scid = cpu_to_le16(chan->dcid);
4723 rsp.dcid = cpu_to_le16(chan->scid);
4725 if (result == L2CAP_CR_SUCCESS) {
4726 /* Send successful response */
4727 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4728 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4730 /* Send negative response */
4731 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4732 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4735 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4738 if (result == L2CAP_CR_SUCCESS) {
4739 __l2cap_state_change(chan, BT_CONFIG);
4740 set_bit(CONF_REQ_SENT, &chan->conf_state);
4741 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4743 l2cap_build_conf_req(chan, buf), buf);
4744 chan->num_conf_req++;
4749 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4752 l2cap_move_setup(chan);
4753 chan->move_id = local_amp_id;
4754 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4756 l2cap_send_move_chan_req(chan, remote_amp_id);
4759 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4761 struct hci_chan *hchan = NULL;
4763 /* Placeholder - get hci_chan for logical link */
4766 if (hchan->state == BT_CONNECTED) {
4767 /* Logical link is ready to go */
4768 chan->hs_hcon = hchan->conn;
4769 chan->hs_hcon->l2cap_data = chan->conn;
4770 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4771 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4773 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4775 /* Wait for logical link to be ready */
4776 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4779 /* Logical link not available */
4780 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4784 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4786 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4788 if (result == -EINVAL)
4789 rsp_result = L2CAP_MR_BAD_ID;
4791 rsp_result = L2CAP_MR_NOT_ALLOWED;
4793 l2cap_send_move_chan_rsp(chan, rsp_result);
4796 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4797 chan->move_state = L2CAP_MOVE_STABLE;
4799 /* Restart data transmission */
4800 l2cap_ertm_send(chan);
4803 /* Invoke with locked chan */
4804 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4806 u8 local_amp_id = chan->local_amp_id;
4807 u8 remote_amp_id = chan->remote_amp_id;
4809 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4810 chan, result, local_amp_id, remote_amp_id);
4812 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4813 l2cap_chan_unlock(chan);
4817 if (chan->state != BT_CONNECTED) {
4818 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4819 } else if (result != L2CAP_MR_SUCCESS) {
4820 l2cap_do_move_cancel(chan, result);
4822 switch (chan->move_role) {
4823 case L2CAP_MOVE_ROLE_INITIATOR:
4824 l2cap_do_move_initiate(chan, local_amp_id,
4827 case L2CAP_MOVE_ROLE_RESPONDER:
4828 l2cap_do_move_respond(chan, result);
4831 l2cap_do_move_cancel(chan, result);
4837 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4838 struct l2cap_cmd_hdr *cmd,
4839 u16 cmd_len, void *data)
4841 struct l2cap_move_chan_req *req = data;
4842 struct l2cap_move_chan_rsp rsp;
4843 struct l2cap_chan *chan;
4845 u16 result = L2CAP_MR_NOT_ALLOWED;
4847 if (cmd_len != sizeof(*req))
4850 icid = le16_to_cpu(req->icid);
4852 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4854 if (!conn->hs_enabled)
4857 chan = l2cap_get_chan_by_dcid(conn, icid);
4859 rsp.icid = cpu_to_le16(icid);
4860 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4861 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4866 chan->ident = cmd->ident;
4868 if (chan->scid < L2CAP_CID_DYN_START ||
4869 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4870 (chan->mode != L2CAP_MODE_ERTM &&
4871 chan->mode != L2CAP_MODE_STREAMING)) {
4872 result = L2CAP_MR_NOT_ALLOWED;
4873 goto send_move_response;
4876 if (chan->local_amp_id == req->dest_amp_id) {
4877 result = L2CAP_MR_SAME_ID;
4878 goto send_move_response;
4881 if (req->dest_amp_id != AMP_ID_BREDR) {
4882 struct hci_dev *hdev;
4883 hdev = hci_dev_get(req->dest_amp_id);
4884 if (!hdev || hdev->dev_type != HCI_AMP ||
4885 !test_bit(HCI_UP, &hdev->flags)) {
4889 result = L2CAP_MR_BAD_ID;
4890 goto send_move_response;
4895 /* Detect a move collision. Only send a collision response
4896 * if this side has "lost", otherwise proceed with the move.
4897 * The winner has the larger bd_addr.
4899 if ((__chan_is_moving(chan) ||
4900 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4901 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4902 result = L2CAP_MR_COLLISION;
4903 goto send_move_response;
4906 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4907 l2cap_move_setup(chan);
4908 chan->move_id = req->dest_amp_id;
4911 if (req->dest_amp_id == AMP_ID_BREDR) {
4912 /* Moving to BR/EDR */
4913 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4914 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4915 result = L2CAP_MR_PEND;
4917 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4918 result = L2CAP_MR_SUCCESS;
4921 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4922 /* Placeholder - uncomment when amp functions are available */
4923 /*amp_accept_physical(chan, req->dest_amp_id);*/
4924 result = L2CAP_MR_PEND;
4928 l2cap_send_move_chan_rsp(chan, result);
4930 l2cap_chan_unlock(chan);
4935 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4937 struct l2cap_chan *chan;
4938 struct hci_chan *hchan = NULL;
4940 chan = l2cap_get_chan_by_scid(conn, icid);
4942 l2cap_send_move_chan_cfm_icid(conn, icid);
4946 __clear_chan_timer(chan);
4947 if (result == L2CAP_MR_PEND)
4948 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4950 switch (chan->move_state) {
4951 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4952 /* Move confirm will be sent when logical link
4955 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4957 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4958 if (result == L2CAP_MR_PEND) {
4960 } else if (test_bit(CONN_LOCAL_BUSY,
4961 &chan->conn_state)) {
4962 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4964 /* Logical link is up or moving to BR/EDR,
4967 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4968 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4971 case L2CAP_MOVE_WAIT_RSP:
4973 if (result == L2CAP_MR_SUCCESS) {
4974 /* Remote is ready, send confirm immediately
4975 * after logical link is ready
4977 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4979 /* Both logical link and move success
4980 * are required to confirm
4982 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4985 /* Placeholder - get hci_chan for logical link */
4987 /* Logical link not available */
4988 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4992 /* If the logical link is not yet connected, do not
4993 * send confirmation.
4995 if (hchan->state != BT_CONNECTED)
4998 /* Logical link is already ready to go */
5000 chan->hs_hcon = hchan->conn;
5001 chan->hs_hcon->l2cap_data = chan->conn;
5003 if (result == L2CAP_MR_SUCCESS) {
5004 /* Can confirm now */
5005 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5007 /* Now only need move success
5010 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5013 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5016 /* Any other amp move state means the move failed. */
5017 chan->move_id = chan->local_amp_id;
5018 l2cap_move_done(chan);
5019 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5022 l2cap_chan_unlock(chan);
5025 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5028 struct l2cap_chan *chan;
5030 chan = l2cap_get_chan_by_ident(conn, ident);
5032 /* Could not locate channel, icid is best guess */
5033 l2cap_send_move_chan_cfm_icid(conn, icid);
5037 __clear_chan_timer(chan);
5039 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5040 if (result == L2CAP_MR_COLLISION) {
5041 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5043 /* Cleanup - cancel move */
5044 chan->move_id = chan->local_amp_id;
5045 l2cap_move_done(chan);
5049 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5051 l2cap_chan_unlock(chan);
5054 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5055 struct l2cap_cmd_hdr *cmd,
5056 u16 cmd_len, void *data)
5058 struct l2cap_move_chan_rsp *rsp = data;
5061 if (cmd_len != sizeof(*rsp))
5064 icid = le16_to_cpu(rsp->icid);
5065 result = le16_to_cpu(rsp->result);
5067 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5069 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5070 l2cap_move_continue(conn, icid, result);
5072 l2cap_move_fail(conn, cmd->ident, icid, result);
5077 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5078 struct l2cap_cmd_hdr *cmd,
5079 u16 cmd_len, void *data)
5081 struct l2cap_move_chan_cfm *cfm = data;
5082 struct l2cap_chan *chan;
5085 if (cmd_len != sizeof(*cfm))
5088 icid = le16_to_cpu(cfm->icid);
5089 result = le16_to_cpu(cfm->result);
5091 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5093 chan = l2cap_get_chan_by_dcid(conn, icid);
5095 /* Spec requires a response even if the icid was not found */
5096 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5100 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5101 if (result == L2CAP_MC_CONFIRMED) {
5102 chan->local_amp_id = chan->move_id;
5103 if (chan->local_amp_id == AMP_ID_BREDR)
5104 __release_logical_link(chan);
5106 chan->move_id = chan->local_amp_id;
5109 l2cap_move_done(chan);
5112 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5114 l2cap_chan_unlock(chan);
5119 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5120 struct l2cap_cmd_hdr *cmd,
5121 u16 cmd_len, void *data)
5123 struct l2cap_move_chan_cfm_rsp *rsp = data;
5124 struct l2cap_chan *chan;
5127 if (cmd_len != sizeof(*rsp))
5130 icid = le16_to_cpu(rsp->icid);
5132 BT_DBG("icid 0x%4.4x", icid);
5134 chan = l2cap_get_chan_by_scid(conn, icid);
5138 __clear_chan_timer(chan);
5140 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5141 chan->local_amp_id = chan->move_id;
5143 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5144 __release_logical_link(chan);
5146 l2cap_move_done(chan);
5149 l2cap_chan_unlock(chan);
5154 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5159 if (min > max || min < 6 || max > 3200)
5162 if (to_multiplier < 10 || to_multiplier > 3200)
5165 if (max >= to_multiplier * 8)
5168 max_latency = (to_multiplier * 8 / max) - 1;
5169 if (latency > 499 || latency > max_latency)
5175 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5176 struct l2cap_cmd_hdr *cmd,
5179 struct hci_conn *hcon = conn->hcon;
5180 struct l2cap_conn_param_update_req *req;
5181 struct l2cap_conn_param_update_rsp rsp;
5182 u16 min, max, latency, to_multiplier, cmd_len;
5185 if (!(hcon->link_mode & HCI_LM_MASTER))
5188 cmd_len = __le16_to_cpu(cmd->len);
5189 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5192 req = (struct l2cap_conn_param_update_req *) data;
5193 min = __le16_to_cpu(req->min);
5194 max = __le16_to_cpu(req->max);
5195 latency = __le16_to_cpu(req->latency);
5196 to_multiplier = __le16_to_cpu(req->to_multiplier);
5198 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5199 min, max, latency, to_multiplier);
5201 memset(&rsp, 0, sizeof(rsp));
5203 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5205 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5207 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5209 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5213 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5218 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5219 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5224 switch (cmd->code) {
5225 case L2CAP_COMMAND_REJ:
5226 l2cap_command_rej(conn, cmd, cmd_len, data);
5229 case L2CAP_CONN_REQ:
5230 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5233 case L2CAP_CONN_RSP:
5234 case L2CAP_CREATE_CHAN_RSP:
5235 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5238 case L2CAP_CONF_REQ:
5239 err = l2cap_config_req(conn, cmd, cmd_len, data);
5242 case L2CAP_CONF_RSP:
5243 l2cap_config_rsp(conn, cmd, cmd_len, data);
5246 case L2CAP_DISCONN_REQ:
5247 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5250 case L2CAP_DISCONN_RSP:
5251 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5254 case L2CAP_ECHO_REQ:
5255 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5258 case L2CAP_ECHO_RSP:
5261 case L2CAP_INFO_REQ:
5262 err = l2cap_information_req(conn, cmd, cmd_len, data);
5265 case L2CAP_INFO_RSP:
5266 l2cap_information_rsp(conn, cmd, cmd_len, data);
5269 case L2CAP_CREATE_CHAN_REQ:
5270 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5273 case L2CAP_MOVE_CHAN_REQ:
5274 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5277 case L2CAP_MOVE_CHAN_RSP:
5278 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5281 case L2CAP_MOVE_CHAN_CFM:
5282 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5285 case L2CAP_MOVE_CHAN_CFM_RSP:
5286 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5290 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5298 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5299 struct l2cap_cmd_hdr *cmd, u8 *data)
5301 switch (cmd->code) {
5302 case L2CAP_COMMAND_REJ:
5305 case L2CAP_CONN_PARAM_UPDATE_REQ:
5306 return l2cap_conn_param_update_req(conn, cmd, data);
5308 case L2CAP_CONN_PARAM_UPDATE_RSP:
5312 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5317 static __le16 l2cap_err_to_reason(int err)
5323 return __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5327 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5328 struct sk_buff *skb)
5330 struct hci_conn *hcon = conn->hcon;
5331 struct l2cap_cmd_hdr *cmd;
5335 if (hcon->type != LE_LINK)
5338 if (skb->len < L2CAP_CMD_HDR_SIZE)
5341 cmd = (void *) skb->data;
5342 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5344 len = le16_to_cpu(cmd->len);
5346 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5348 if (len != skb->len || !cmd->ident) {
5349 BT_DBG("corrupted command");
5353 err = l2cap_le_sig_cmd(conn, cmd, skb->data);
5355 struct l2cap_cmd_rej_unk rej;
5357 BT_ERR("Wrong link type (%d)", err);
5359 rej.reason = l2cap_err_to_reason(err);
5360 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5368 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5369 struct sk_buff *skb)
5371 struct hci_conn *hcon = conn->hcon;
5372 u8 *data = skb->data;
5374 struct l2cap_cmd_hdr cmd;
5377 l2cap_raw_recv(conn, skb);
5379 if (hcon->type != ACL_LINK)
5382 while (len >= L2CAP_CMD_HDR_SIZE) {
5384 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5385 data += L2CAP_CMD_HDR_SIZE;
5386 len -= L2CAP_CMD_HDR_SIZE;
5388 cmd_len = le16_to_cpu(cmd.len);
5390 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5393 if (cmd_len > len || !cmd.ident) {
5394 BT_DBG("corrupted command");
5398 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5400 struct l2cap_cmd_rej_unk rej;
5402 BT_ERR("Wrong link type (%d)", err);
5404 rej.reason = l2cap_err_to_reason(err);
5405 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5417 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5419 u16 our_fcs, rcv_fcs;
5422 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5423 hdr_size = L2CAP_EXT_HDR_SIZE;
5425 hdr_size = L2CAP_ENH_HDR_SIZE;
5427 if (chan->fcs == L2CAP_FCS_CRC16) {
5428 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5429 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5430 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5432 if (our_fcs != rcv_fcs)
5438 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5440 struct l2cap_ctrl control;
5442 BT_DBG("chan %p", chan);
5444 memset(&control, 0, sizeof(control));
5447 control.reqseq = chan->buffer_seq;
5448 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5450 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5451 control.super = L2CAP_SUPER_RNR;
5452 l2cap_send_sframe(chan, &control);
5455 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5456 chan->unacked_frames > 0)
5457 __set_retrans_timer(chan);
5459 /* Send pending iframes */
5460 l2cap_ertm_send(chan);
5462 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5463 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5464 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5467 control.super = L2CAP_SUPER_RR;
5468 l2cap_send_sframe(chan, &control);
5472 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5473 struct sk_buff **last_frag)
5475 /* skb->len reflects data in skb as well as all fragments
5476 * skb->data_len reflects only data in fragments
5478 if (!skb_has_frag_list(skb))
5479 skb_shinfo(skb)->frag_list = new_frag;
5481 new_frag->next = NULL;
5483 (*last_frag)->next = new_frag;
5484 *last_frag = new_frag;
5486 skb->len += new_frag->len;
5487 skb->data_len += new_frag->len;
5488 skb->truesize += new_frag->truesize;
5491 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5492 struct l2cap_ctrl *control)
5496 switch (control->sar) {
5497 case L2CAP_SAR_UNSEGMENTED:
5501 err = chan->ops->recv(chan, skb);
5504 case L2CAP_SAR_START:
5508 chan->sdu_len = get_unaligned_le16(skb->data);
5509 skb_pull(skb, L2CAP_SDULEN_SIZE);
5511 if (chan->sdu_len > chan->imtu) {
5516 if (skb->len >= chan->sdu_len)
5520 chan->sdu_last_frag = skb;
5526 case L2CAP_SAR_CONTINUE:
5530 append_skb_frag(chan->sdu, skb,
5531 &chan->sdu_last_frag);
5534 if (chan->sdu->len >= chan->sdu_len)
5544 append_skb_frag(chan->sdu, skb,
5545 &chan->sdu_last_frag);
5548 if (chan->sdu->len != chan->sdu_len)
5551 err = chan->ops->recv(chan, chan->sdu);
5554 /* Reassembly complete */
5556 chan->sdu_last_frag = NULL;
5564 kfree_skb(chan->sdu);
5566 chan->sdu_last_frag = NULL;
5573 static int l2cap_resegment(struct l2cap_chan *chan)
5579 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5583 if (chan->mode != L2CAP_MODE_ERTM)
5586 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5587 l2cap_tx(chan, NULL, NULL, event);
5590 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5593 /* Pass sequential frames to l2cap_reassemble_sdu()
5594 * until a gap is encountered.
5597 BT_DBG("chan %p", chan);
5599 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5600 struct sk_buff *skb;
5601 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5602 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5604 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5609 skb_unlink(skb, &chan->srej_q);
5610 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5611 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5616 if (skb_queue_empty(&chan->srej_q)) {
5617 chan->rx_state = L2CAP_RX_STATE_RECV;
5618 l2cap_send_ack(chan);
5624 static void l2cap_handle_srej(struct l2cap_chan *chan,
5625 struct l2cap_ctrl *control)
5627 struct sk_buff *skb;
5629 BT_DBG("chan %p, control %p", chan, control);
5631 if (control->reqseq == chan->next_tx_seq) {
5632 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5633 l2cap_send_disconn_req(chan, ECONNRESET);
5637 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5640 BT_DBG("Seq %d not available for retransmission",
5645 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5646 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5647 l2cap_send_disconn_req(chan, ECONNRESET);
5651 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5653 if (control->poll) {
5654 l2cap_pass_to_tx(chan, control);
5656 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5657 l2cap_retransmit(chan, control);
5658 l2cap_ertm_send(chan);
5660 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5661 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5662 chan->srej_save_reqseq = control->reqseq;
5665 l2cap_pass_to_tx_fbit(chan, control);
5667 if (control->final) {
5668 if (chan->srej_save_reqseq != control->reqseq ||
5669 !test_and_clear_bit(CONN_SREJ_ACT,
5671 l2cap_retransmit(chan, control);
5673 l2cap_retransmit(chan, control);
5674 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5675 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5676 chan->srej_save_reqseq = control->reqseq;
5682 static void l2cap_handle_rej(struct l2cap_chan *chan,
5683 struct l2cap_ctrl *control)
5685 struct sk_buff *skb;
5687 BT_DBG("chan %p, control %p", chan, control);
5689 if (control->reqseq == chan->next_tx_seq) {
5690 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5691 l2cap_send_disconn_req(chan, ECONNRESET);
5695 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5697 if (chan->max_tx && skb &&
5698 bt_cb(skb)->control.retries >= chan->max_tx) {
5699 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5700 l2cap_send_disconn_req(chan, ECONNRESET);
5704 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5706 l2cap_pass_to_tx(chan, control);
5708 if (control->final) {
5709 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5710 l2cap_retransmit_all(chan, control);
5712 l2cap_retransmit_all(chan, control);
5713 l2cap_ertm_send(chan);
5714 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5715 set_bit(CONN_REJ_ACT, &chan->conn_state);
5719 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5721 BT_DBG("chan %p, txseq %d", chan, txseq);
5723 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5724 chan->expected_tx_seq);
5726 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5727 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5729 /* See notes below regarding "double poll" and
5732 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5733 BT_DBG("Invalid/Ignore - after SREJ");
5734 return L2CAP_TXSEQ_INVALID_IGNORE;
5736 BT_DBG("Invalid - in window after SREJ sent");
5737 return L2CAP_TXSEQ_INVALID;
5741 if (chan->srej_list.head == txseq) {
5742 BT_DBG("Expected SREJ");
5743 return L2CAP_TXSEQ_EXPECTED_SREJ;
5746 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5747 BT_DBG("Duplicate SREJ - txseq already stored");
5748 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5751 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5752 BT_DBG("Unexpected SREJ - not requested");
5753 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5757 if (chan->expected_tx_seq == txseq) {
5758 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5760 BT_DBG("Invalid - txseq outside tx window");
5761 return L2CAP_TXSEQ_INVALID;
5764 return L2CAP_TXSEQ_EXPECTED;
5768 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5769 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5770 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5771 return L2CAP_TXSEQ_DUPLICATE;
5774 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5775 /* A source of invalid packets is a "double poll" condition,
5776 * where delays cause us to send multiple poll packets. If
5777 * the remote stack receives and processes both polls,
5778 * sequence numbers can wrap around in such a way that a
5779 * resent frame has a sequence number that looks like new data
5780 * with a sequence gap. This would trigger an erroneous SREJ
5783 * Fortunately, this is impossible with a tx window that's
5784 * less than half of the maximum sequence number, which allows
5785 * invalid frames to be safely ignored.
5787 * With tx window sizes greater than half of the tx window
5788 * maximum, the frame is invalid and cannot be ignored. This
5789 * causes a disconnect.
5792 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5793 BT_DBG("Invalid/Ignore - txseq outside tx window");
5794 return L2CAP_TXSEQ_INVALID_IGNORE;
5796 BT_DBG("Invalid - txseq outside tx window");
5797 return L2CAP_TXSEQ_INVALID;
5800 BT_DBG("Unexpected - txseq indicates missing frames");
5801 return L2CAP_TXSEQ_UNEXPECTED;
5805 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5806 struct l2cap_ctrl *control,
5807 struct sk_buff *skb, u8 event)
5810 bool skb_in_use = false;
5812 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5816 case L2CAP_EV_RECV_IFRAME:
5817 switch (l2cap_classify_txseq(chan, control->txseq)) {
5818 case L2CAP_TXSEQ_EXPECTED:
5819 l2cap_pass_to_tx(chan, control);
5821 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5822 BT_DBG("Busy, discarding expected seq %d",
5827 chan->expected_tx_seq = __next_seq(chan,
5830 chan->buffer_seq = chan->expected_tx_seq;
5833 err = l2cap_reassemble_sdu(chan, skb, control);
5837 if (control->final) {
5838 if (!test_and_clear_bit(CONN_REJ_ACT,
5839 &chan->conn_state)) {
5841 l2cap_retransmit_all(chan, control);
5842 l2cap_ertm_send(chan);
5846 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5847 l2cap_send_ack(chan);
5849 case L2CAP_TXSEQ_UNEXPECTED:
5850 l2cap_pass_to_tx(chan, control);
5852 /* Can't issue SREJ frames in the local busy state.
5853 * Drop this frame, it will be seen as missing
5854 * when local busy is exited.
5856 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5857 BT_DBG("Busy, discarding unexpected seq %d",
5862 /* There was a gap in the sequence, so an SREJ
5863 * must be sent for each missing frame. The
5864 * current frame is stored for later use.
5866 skb_queue_tail(&chan->srej_q, skb);
5868 BT_DBG("Queued %p (queue len %d)", skb,
5869 skb_queue_len(&chan->srej_q));
5871 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5872 l2cap_seq_list_clear(&chan->srej_list);
5873 l2cap_send_srej(chan, control->txseq);
5875 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5877 case L2CAP_TXSEQ_DUPLICATE:
5878 l2cap_pass_to_tx(chan, control);
5880 case L2CAP_TXSEQ_INVALID_IGNORE:
5882 case L2CAP_TXSEQ_INVALID:
5884 l2cap_send_disconn_req(chan, ECONNRESET);
5888 case L2CAP_EV_RECV_RR:
5889 l2cap_pass_to_tx(chan, control);
5890 if (control->final) {
5891 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5893 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5894 !__chan_is_moving(chan)) {
5896 l2cap_retransmit_all(chan, control);
5899 l2cap_ertm_send(chan);
5900 } else if (control->poll) {
5901 l2cap_send_i_or_rr_or_rnr(chan);
5903 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5904 &chan->conn_state) &&
5905 chan->unacked_frames)
5906 __set_retrans_timer(chan);
5908 l2cap_ertm_send(chan);
5911 case L2CAP_EV_RECV_RNR:
5912 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5913 l2cap_pass_to_tx(chan, control);
5914 if (control && control->poll) {
5915 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5916 l2cap_send_rr_or_rnr(chan, 0);
5918 __clear_retrans_timer(chan);
5919 l2cap_seq_list_clear(&chan->retrans_list);
5921 case L2CAP_EV_RECV_REJ:
5922 l2cap_handle_rej(chan, control);
5924 case L2CAP_EV_RECV_SREJ:
5925 l2cap_handle_srej(chan, control);
5931 if (skb && !skb_in_use) {
5932 BT_DBG("Freeing %p", skb);
5939 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5940 struct l2cap_ctrl *control,
5941 struct sk_buff *skb, u8 event)
5944 u16 txseq = control->txseq;
5945 bool skb_in_use = false;
5947 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5951 case L2CAP_EV_RECV_IFRAME:
5952 switch (l2cap_classify_txseq(chan, txseq)) {
5953 case L2CAP_TXSEQ_EXPECTED:
5954 /* Keep frame for reassembly later */
5955 l2cap_pass_to_tx(chan, control);
5956 skb_queue_tail(&chan->srej_q, skb);
5958 BT_DBG("Queued %p (queue len %d)", skb,
5959 skb_queue_len(&chan->srej_q));
5961 chan->expected_tx_seq = __next_seq(chan, txseq);
5963 case L2CAP_TXSEQ_EXPECTED_SREJ:
5964 l2cap_seq_list_pop(&chan->srej_list);
5966 l2cap_pass_to_tx(chan, control);
5967 skb_queue_tail(&chan->srej_q, skb);
5969 BT_DBG("Queued %p (queue len %d)", skb,
5970 skb_queue_len(&chan->srej_q));
5972 err = l2cap_rx_queued_iframes(chan);
5977 case L2CAP_TXSEQ_UNEXPECTED:
5978 /* Got a frame that can't be reassembled yet.
5979 * Save it for later, and send SREJs to cover
5980 * the missing frames.
5982 skb_queue_tail(&chan->srej_q, skb);
5984 BT_DBG("Queued %p (queue len %d)", skb,
5985 skb_queue_len(&chan->srej_q));
5987 l2cap_pass_to_tx(chan, control);
5988 l2cap_send_srej(chan, control->txseq);
5990 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
5991 /* This frame was requested with an SREJ, but
5992 * some expected retransmitted frames are
5993 * missing. Request retransmission of missing
5996 skb_queue_tail(&chan->srej_q, skb);
5998 BT_DBG("Queued %p (queue len %d)", skb,
5999 skb_queue_len(&chan->srej_q));
6001 l2cap_pass_to_tx(chan, control);
6002 l2cap_send_srej_list(chan, control->txseq);
6004 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6005 /* We've already queued this frame. Drop this copy. */
6006 l2cap_pass_to_tx(chan, control);
6008 case L2CAP_TXSEQ_DUPLICATE:
6009 /* Expecting a later sequence number, so this frame
6010 * was already received. Ignore it completely.
6013 case L2CAP_TXSEQ_INVALID_IGNORE:
6015 case L2CAP_TXSEQ_INVALID:
6017 l2cap_send_disconn_req(chan, ECONNRESET);
6021 case L2CAP_EV_RECV_RR:
6022 l2cap_pass_to_tx(chan, control);
6023 if (control->final) {
6024 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6026 if (!test_and_clear_bit(CONN_REJ_ACT,
6027 &chan->conn_state)) {
6029 l2cap_retransmit_all(chan, control);
6032 l2cap_ertm_send(chan);
6033 } else if (control->poll) {
6034 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6035 &chan->conn_state) &&
6036 chan->unacked_frames) {
6037 __set_retrans_timer(chan);
6040 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6041 l2cap_send_srej_tail(chan);
6043 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6044 &chan->conn_state) &&
6045 chan->unacked_frames)
6046 __set_retrans_timer(chan);
6048 l2cap_send_ack(chan);
6051 case L2CAP_EV_RECV_RNR:
6052 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6053 l2cap_pass_to_tx(chan, control);
6054 if (control->poll) {
6055 l2cap_send_srej_tail(chan);
6057 struct l2cap_ctrl rr_control;
6058 memset(&rr_control, 0, sizeof(rr_control));
6059 rr_control.sframe = 1;
6060 rr_control.super = L2CAP_SUPER_RR;
6061 rr_control.reqseq = chan->buffer_seq;
6062 l2cap_send_sframe(chan, &rr_control);
6066 case L2CAP_EV_RECV_REJ:
6067 l2cap_handle_rej(chan, control);
6069 case L2CAP_EV_RECV_SREJ:
6070 l2cap_handle_srej(chan, control);
6074 if (skb && !skb_in_use) {
6075 BT_DBG("Freeing %p", skb);
6082 static int l2cap_finish_move(struct l2cap_chan *chan)
6084 BT_DBG("chan %p", chan);
6086 chan->rx_state = L2CAP_RX_STATE_RECV;
6089 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6091 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6093 return l2cap_resegment(chan);
6096 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6097 struct l2cap_ctrl *control,
6098 struct sk_buff *skb, u8 event)
6102 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6108 l2cap_process_reqseq(chan, control->reqseq);
6110 if (!skb_queue_empty(&chan->tx_q))
6111 chan->tx_send_head = skb_peek(&chan->tx_q);
6113 chan->tx_send_head = NULL;
6115 /* Rewind next_tx_seq to the point expected
6118 chan->next_tx_seq = control->reqseq;
6119 chan->unacked_frames = 0;
6121 err = l2cap_finish_move(chan);
6125 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6126 l2cap_send_i_or_rr_or_rnr(chan);
6128 if (event == L2CAP_EV_RECV_IFRAME)
6131 return l2cap_rx_state_recv(chan, control, NULL, event);
6134 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6135 struct l2cap_ctrl *control,
6136 struct sk_buff *skb, u8 event)
6140 if (!control->final)
6143 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6145 chan->rx_state = L2CAP_RX_STATE_RECV;
6146 l2cap_process_reqseq(chan, control->reqseq);
6148 if (!skb_queue_empty(&chan->tx_q))
6149 chan->tx_send_head = skb_peek(&chan->tx_q);
6151 chan->tx_send_head = NULL;
6153 /* Rewind next_tx_seq to the point expected
6156 chan->next_tx_seq = control->reqseq;
6157 chan->unacked_frames = 0;
6160 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6162 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6164 err = l2cap_resegment(chan);
6167 err = l2cap_rx_state_recv(chan, control, skb, event);
6172 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6174 /* Make sure reqseq is for a packet that has been sent but not acked */
6177 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6178 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6181 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6182 struct sk_buff *skb, u8 event)
6186 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6187 control, skb, event, chan->rx_state);
6189 if (__valid_reqseq(chan, control->reqseq)) {
6190 switch (chan->rx_state) {
6191 case L2CAP_RX_STATE_RECV:
6192 err = l2cap_rx_state_recv(chan, control, skb, event);
6194 case L2CAP_RX_STATE_SREJ_SENT:
6195 err = l2cap_rx_state_srej_sent(chan, control, skb,
6198 case L2CAP_RX_STATE_WAIT_P:
6199 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6201 case L2CAP_RX_STATE_WAIT_F:
6202 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6209 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6210 control->reqseq, chan->next_tx_seq,
6211 chan->expected_ack_seq);
6212 l2cap_send_disconn_req(chan, ECONNRESET);
6218 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6219 struct sk_buff *skb)
6223 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6226 if (l2cap_classify_txseq(chan, control->txseq) ==
6227 L2CAP_TXSEQ_EXPECTED) {
6228 l2cap_pass_to_tx(chan, control);
6230 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6231 __next_seq(chan, chan->buffer_seq));
6233 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6235 l2cap_reassemble_sdu(chan, skb, control);
6238 kfree_skb(chan->sdu);
6241 chan->sdu_last_frag = NULL;
6245 BT_DBG("Freeing %p", skb);
6250 chan->last_acked_seq = control->txseq;
6251 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6256 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6258 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6262 __unpack_control(chan, skb);
6267 * We can just drop the corrupted I-frame here.
6268 * Receiver will miss it and start proper recovery
6269 * procedures and ask for retransmission.
6271 if (l2cap_check_fcs(chan, skb))
6274 if (!control->sframe && control->sar == L2CAP_SAR_START)
6275 len -= L2CAP_SDULEN_SIZE;
6277 if (chan->fcs == L2CAP_FCS_CRC16)
6278 len -= L2CAP_FCS_SIZE;
6280 if (len > chan->mps) {
6281 l2cap_send_disconn_req(chan, ECONNRESET);
6285 if (!control->sframe) {
6288 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6289 control->sar, control->reqseq, control->final,
6292 /* Validate F-bit - F=0 always valid, F=1 only
6293 * valid in TX WAIT_F
6295 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6298 if (chan->mode != L2CAP_MODE_STREAMING) {
6299 event = L2CAP_EV_RECV_IFRAME;
6300 err = l2cap_rx(chan, control, skb, event);
6302 err = l2cap_stream_rx(chan, control, skb);
6306 l2cap_send_disconn_req(chan, ECONNRESET);
6308 const u8 rx_func_to_event[4] = {
6309 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6310 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6313 /* Only I-frames are expected in streaming mode */
6314 if (chan->mode == L2CAP_MODE_STREAMING)
6317 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6318 control->reqseq, control->final, control->poll,
6322 BT_ERR("Trailing bytes: %d in sframe", len);
6323 l2cap_send_disconn_req(chan, ECONNRESET);
6327 /* Validate F and P bits */
6328 if (control->final && (control->poll ||
6329 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6332 event = rx_func_to_event[control->super];
6333 if (l2cap_rx(chan, control, skb, event))
6334 l2cap_send_disconn_req(chan, ECONNRESET);
6344 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6345 struct sk_buff *skb)
6347 struct l2cap_chan *chan;
6349 chan = l2cap_get_chan_by_scid(conn, cid);
6351 if (cid == L2CAP_CID_A2MP) {
6352 chan = a2mp_channel_create(conn, skb);
6358 l2cap_chan_lock(chan);
6360 BT_DBG("unknown cid 0x%4.4x", cid);
6361 /* Drop packet and return */
6367 BT_DBG("chan %p, len %d", chan, skb->len);
6369 if (chan->state != BT_CONNECTED)
6372 switch (chan->mode) {
6373 case L2CAP_MODE_BASIC:
6374 /* If socket recv buffers overflows we drop data here
6375 * which is *bad* because L2CAP has to be reliable.
6376 * But we don't have any other choice. L2CAP doesn't
6377 * provide flow control mechanism. */
6379 if (chan->imtu < skb->len)
6382 if (!chan->ops->recv(chan, skb))
6386 case L2CAP_MODE_ERTM:
6387 case L2CAP_MODE_STREAMING:
6388 l2cap_data_rcv(chan, skb);
6392 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6400 l2cap_chan_unlock(chan);
6403 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6404 struct sk_buff *skb)
6406 struct hci_conn *hcon = conn->hcon;
6407 struct l2cap_chan *chan;
6409 if (hcon->type != ACL_LINK)
6412 chan = l2cap_global_chan_by_psm(0, psm, &conn->hcon->src,
6417 BT_DBG("chan %p, len %d", chan, skb->len);
6419 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6422 if (chan->imtu < skb->len)
6425 /* Store remote BD_ADDR and PSM for msg_name */
6426 bacpy(&bt_cb(skb)->bdaddr, &conn->hcon->dst);
6427 bt_cb(skb)->psm = psm;
6429 if (!chan->ops->recv(chan, skb))
6436 static void l2cap_att_channel(struct l2cap_conn *conn,
6437 struct sk_buff *skb)
6439 struct hci_conn *hcon = conn->hcon;
6440 struct l2cap_chan *chan;
6442 if (hcon->type != LE_LINK)
6445 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6446 &conn->hcon->src, &conn->hcon->dst);
6450 BT_DBG("chan %p, len %d", chan, skb->len);
6452 if (chan->imtu < skb->len)
6455 if (!chan->ops->recv(chan, skb))
6462 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6464 struct l2cap_hdr *lh = (void *) skb->data;
6468 skb_pull(skb, L2CAP_HDR_SIZE);
6469 cid = __le16_to_cpu(lh->cid);
6470 len = __le16_to_cpu(lh->len);
6472 if (len != skb->len) {
6477 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6480 case L2CAP_CID_SIGNALING:
6481 l2cap_sig_channel(conn, skb);
6484 case L2CAP_CID_CONN_LESS:
6485 psm = get_unaligned((__le16 *) skb->data);
6486 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6487 l2cap_conless_channel(conn, psm, skb);
6491 l2cap_att_channel(conn, skb);
6494 case L2CAP_CID_LE_SIGNALING:
6495 l2cap_le_sig_channel(conn, skb);
6499 if (smp_sig_channel(conn, skb))
6500 l2cap_conn_del(conn->hcon, EACCES);
6504 l2cap_data_channel(conn, cid, skb);
6509 /* ---- L2CAP interface with lower layer (HCI) ---- */
6511 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6513 int exact = 0, lm1 = 0, lm2 = 0;
6514 struct l2cap_chan *c;
6516 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6518 /* Find listening sockets and check their link_mode */
6519 read_lock(&chan_list_lock);
6520 list_for_each_entry(c, &chan_list, global_l) {
6521 if (c->state != BT_LISTEN)
6524 if (!bacmp(&c->src, &hdev->bdaddr)) {
6525 lm1 |= HCI_LM_ACCEPT;
6526 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6527 lm1 |= HCI_LM_MASTER;
6529 } else if (!bacmp(&c->src, BDADDR_ANY)) {
6530 lm2 |= HCI_LM_ACCEPT;
6531 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6532 lm2 |= HCI_LM_MASTER;
6535 read_unlock(&chan_list_lock);
6537 return exact ? lm1 : lm2;
6540 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6542 struct l2cap_conn *conn;
6544 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6547 conn = l2cap_conn_add(hcon);
6549 l2cap_conn_ready(conn);
6551 l2cap_conn_del(hcon, bt_to_errno(status));
6555 int l2cap_disconn_ind(struct hci_conn *hcon)
6557 struct l2cap_conn *conn = hcon->l2cap_data;
6559 BT_DBG("hcon %p", hcon);
6562 return HCI_ERROR_REMOTE_USER_TERM;
6563 return conn->disc_reason;
6566 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6568 BT_DBG("hcon %p reason %d", hcon, reason);
6570 l2cap_conn_del(hcon, bt_to_errno(reason));
6573 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6575 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6578 if (encrypt == 0x00) {
6579 if (chan->sec_level == BT_SECURITY_MEDIUM) {
6580 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6581 } else if (chan->sec_level == BT_SECURITY_HIGH)
6582 l2cap_chan_close(chan, ECONNREFUSED);
6584 if (chan->sec_level == BT_SECURITY_MEDIUM)
6585 __clear_chan_timer(chan);
6589 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6591 struct l2cap_conn *conn = hcon->l2cap_data;
6592 struct l2cap_chan *chan;
6597 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6599 if (hcon->type == LE_LINK) {
6600 if (!status && encrypt)
6601 smp_distribute_keys(conn, 0);
6602 cancel_delayed_work(&conn->security_timer);
6605 mutex_lock(&conn->chan_lock);
6607 list_for_each_entry(chan, &conn->chan_l, list) {
6608 l2cap_chan_lock(chan);
6610 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6611 state_to_string(chan->state));
6613 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6614 l2cap_chan_unlock(chan);
6618 if (chan->scid == L2CAP_CID_ATT) {
6619 if (!status && encrypt) {
6620 chan->sec_level = hcon->sec_level;
6621 l2cap_chan_ready(chan);
6624 l2cap_chan_unlock(chan);
6628 if (!__l2cap_no_conn_pending(chan)) {
6629 l2cap_chan_unlock(chan);
6633 if (!status && (chan->state == BT_CONNECTED ||
6634 chan->state == BT_CONFIG)) {
6635 chan->ops->resume(chan);
6636 l2cap_check_encryption(chan, encrypt);
6637 l2cap_chan_unlock(chan);
6641 if (chan->state == BT_CONNECT) {
6643 l2cap_start_connection(chan);
6645 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6647 } else if (chan->state == BT_CONNECT2) {
6648 struct sock *sk = chan->sk;
6649 struct l2cap_conn_rsp rsp;
6655 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6656 res = L2CAP_CR_PEND;
6657 stat = L2CAP_CS_AUTHOR_PEND;
6658 chan->ops->defer(chan);
6660 __l2cap_state_change(chan, BT_CONFIG);
6661 res = L2CAP_CR_SUCCESS;
6662 stat = L2CAP_CS_NO_INFO;
6665 __l2cap_state_change(chan, BT_DISCONN);
6666 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6667 res = L2CAP_CR_SEC_BLOCK;
6668 stat = L2CAP_CS_NO_INFO;
6673 rsp.scid = cpu_to_le16(chan->dcid);
6674 rsp.dcid = cpu_to_le16(chan->scid);
6675 rsp.result = cpu_to_le16(res);
6676 rsp.status = cpu_to_le16(stat);
6677 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6680 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6681 res == L2CAP_CR_SUCCESS) {
6683 set_bit(CONF_REQ_SENT, &chan->conf_state);
6684 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6686 l2cap_build_conf_req(chan, buf),
6688 chan->num_conf_req++;
6692 l2cap_chan_unlock(chan);
6695 mutex_unlock(&conn->chan_lock);
6700 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6702 struct l2cap_conn *conn = hcon->l2cap_data;
6703 struct l2cap_hdr *hdr;
6706 /* For AMP controller do not create l2cap conn */
6707 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6711 conn = l2cap_conn_add(hcon);
6716 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6720 case ACL_START_NO_FLUSH:
6723 BT_ERR("Unexpected start frame (len %d)", skb->len);
6724 kfree_skb(conn->rx_skb);
6725 conn->rx_skb = NULL;
6727 l2cap_conn_unreliable(conn, ECOMM);
6730 /* Start fragment always begin with Basic L2CAP header */
6731 if (skb->len < L2CAP_HDR_SIZE) {
6732 BT_ERR("Frame is too short (len %d)", skb->len);
6733 l2cap_conn_unreliable(conn, ECOMM);
6737 hdr = (struct l2cap_hdr *) skb->data;
6738 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6740 if (len == skb->len) {
6741 /* Complete frame received */
6742 l2cap_recv_frame(conn, skb);
6746 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6748 if (skb->len > len) {
6749 BT_ERR("Frame is too long (len %d, expected len %d)",
6751 l2cap_conn_unreliable(conn, ECOMM);
6755 /* Allocate skb for the complete frame (with header) */
6756 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6760 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6762 conn->rx_len = len - skb->len;
6766 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6768 if (!conn->rx_len) {
6769 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6770 l2cap_conn_unreliable(conn, ECOMM);
6774 if (skb->len > conn->rx_len) {
6775 BT_ERR("Fragment is too long (len %d, expected %d)",
6776 skb->len, conn->rx_len);
6777 kfree_skb(conn->rx_skb);
6778 conn->rx_skb = NULL;
6780 l2cap_conn_unreliable(conn, ECOMM);
6784 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6786 conn->rx_len -= skb->len;
6788 if (!conn->rx_len) {
6789 /* Complete frame received. l2cap_recv_frame
6790 * takes ownership of the skb so set the global
6791 * rx_skb pointer to NULL first.
6793 struct sk_buff *rx_skb = conn->rx_skb;
6794 conn->rx_skb = NULL;
6795 l2cap_recv_frame(conn, rx_skb);
6805 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6807 struct l2cap_chan *c;
6809 read_lock(&chan_list_lock);
6811 list_for_each_entry(c, &chan_list, global_l) {
6812 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6814 c->state, __le16_to_cpu(c->psm),
6815 c->scid, c->dcid, c->imtu, c->omtu,
6816 c->sec_level, c->mode);
6819 read_unlock(&chan_list_lock);
6824 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6826 return single_open(file, l2cap_debugfs_show, inode->i_private);
6829 static const struct file_operations l2cap_debugfs_fops = {
6830 .open = l2cap_debugfs_open,
6832 .llseek = seq_lseek,
6833 .release = single_release,
6836 static struct dentry *l2cap_debugfs;
6838 int __init l2cap_init(void)
6842 err = l2cap_init_sockets();
6847 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6848 NULL, &l2cap_debugfs_fops);
6850 BT_ERR("Failed to create L2CAP debug file");
6856 void l2cap_exit(void)
6858 debugfs_remove(l2cap_debugfs);
6859 l2cap_cleanup_sockets();
6862 module_param(disable_ertm, bool, 0644);
6863 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");