2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
46 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
47 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
49 static LIST_HEAD(chan_list);
50 static DEFINE_RWLOCK(chan_list_lock);
52 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
53 u8 code, u8 ident, u16 dlen, void *data);
54 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
57 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
60 struct sk_buff_head *skbs, u8 event);
62 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
64 if (hcon->type == LE_LINK) {
65 if (type == ADDR_LE_DEV_PUBLIC)
66 return BDADDR_LE_PUBLIC;
68 return BDADDR_LE_RANDOM;
74 /* ---- L2CAP channels ---- */
76 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
81 list_for_each_entry(c, &conn->chan_l, list) {
88 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
93 list_for_each_entry(c, &conn->chan_l, list) {
100 /* Find channel with given SCID.
101 * Returns locked channel. */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
105 struct l2cap_chan *c;
107 mutex_lock(&conn->chan_lock);
108 c = __l2cap_get_chan_by_scid(conn, cid);
111 mutex_unlock(&conn->chan_lock);
116 /* Find channel with given DCID.
117 * Returns locked channel.
119 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
122 struct l2cap_chan *c;
124 mutex_lock(&conn->chan_lock);
125 c = __l2cap_get_chan_by_dcid(conn, cid);
128 mutex_unlock(&conn->chan_lock);
133 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
136 struct l2cap_chan *c;
138 list_for_each_entry(c, &conn->chan_l, list) {
139 if (c->ident == ident)
145 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
148 struct l2cap_chan *c;
150 mutex_lock(&conn->chan_lock);
151 c = __l2cap_get_chan_by_ident(conn, ident);
154 mutex_unlock(&conn->chan_lock);
159 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
161 struct l2cap_chan *c;
163 list_for_each_entry(c, &chan_list, global_l) {
164 if (c->sport == psm && !bacmp(&c->src, src))
170 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
174 write_lock(&chan_list_lock);
176 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
189 for (p = 0x1001; p < 0x1100; p += 2)
190 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
191 chan->psm = cpu_to_le16(p);
192 chan->sport = cpu_to_le16(p);
199 write_unlock(&chan_list_lock);
203 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
205 write_lock(&chan_list_lock);
209 write_unlock(&chan_list_lock);
214 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
216 u16 cid = L2CAP_CID_DYN_START;
218 for (; cid < L2CAP_CID_DYN_END; cid++) {
219 if (!__l2cap_get_chan_by_scid(conn, cid))
226 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
228 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
229 state_to_string(state));
232 chan->ops->state_change(chan, state, 0);
235 static void l2cap_state_change(struct l2cap_chan *chan, int state)
237 struct sock *sk = chan->sk;
240 __l2cap_state_change(chan, state);
244 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
247 struct sock *sk = chan->sk;
251 chan->ops->state_change(chan, chan->state, err);
255 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
257 struct sock *sk = chan->sk;
260 chan->ops->state_change(chan, chan->state, err);
264 static void __set_retrans_timer(struct l2cap_chan *chan)
266 if (!delayed_work_pending(&chan->monitor_timer) &&
267 chan->retrans_timeout) {
268 l2cap_set_timer(chan, &chan->retrans_timer,
269 msecs_to_jiffies(chan->retrans_timeout));
273 static void __set_monitor_timer(struct l2cap_chan *chan)
275 __clear_retrans_timer(chan);
276 if (chan->monitor_timeout) {
277 l2cap_set_timer(chan, &chan->monitor_timer,
278 msecs_to_jiffies(chan->monitor_timeout));
282 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
287 skb_queue_walk(head, skb) {
288 if (bt_cb(skb)->control.txseq == seq)
295 /* ---- L2CAP sequence number lists ---- */
297 /* For ERTM, ordered lists of sequence numbers must be tracked for
298 * SREJ requests that are received and for frames that are to be
299 * retransmitted. These seq_list functions implement a singly-linked
300 * list in an array, where membership in the list can also be checked
301 * in constant time. Items can also be added to the tail of the list
302 * and removed from the head in constant time, without further memory
306 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
308 size_t alloc_size, i;
310 /* Allocated size is a power of 2 to map sequence numbers
311 * (which may be up to 14 bits) in to a smaller array that is
312 * sized for the negotiated ERTM transmit windows.
314 alloc_size = roundup_pow_of_two(size);
316 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
320 seq_list->mask = alloc_size - 1;
321 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
322 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
323 for (i = 0; i < alloc_size; i++)
324 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
329 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
331 kfree(seq_list->list);
334 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
337 /* Constant-time check for list membership */
338 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
341 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
343 u16 mask = seq_list->mask;
345 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
346 /* In case someone tries to pop the head of an empty list */
347 return L2CAP_SEQ_LIST_CLEAR;
348 } else if (seq_list->head == seq) {
349 /* Head can be removed in constant time */
350 seq_list->head = seq_list->list[seq & mask];
351 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
353 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
354 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
355 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
358 /* Walk the list to find the sequence number */
359 u16 prev = seq_list->head;
360 while (seq_list->list[prev & mask] != seq) {
361 prev = seq_list->list[prev & mask];
362 if (prev == L2CAP_SEQ_LIST_TAIL)
363 return L2CAP_SEQ_LIST_CLEAR;
366 /* Unlink the number from the list and clear it */
367 seq_list->list[prev & mask] = seq_list->list[seq & mask];
368 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
369 if (seq_list->tail == seq)
370 seq_list->tail = prev;
375 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
377 /* Remove the head in constant time */
378 return l2cap_seq_list_remove(seq_list, seq_list->head);
381 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
385 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
388 for (i = 0; i <= seq_list->mask; i++)
389 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
391 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
392 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
395 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
397 u16 mask = seq_list->mask;
399 /* All appends happen in constant time */
401 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
404 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
405 seq_list->head = seq;
407 seq_list->list[seq_list->tail & mask] = seq;
409 seq_list->tail = seq;
410 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
413 static void l2cap_chan_timeout(struct work_struct *work)
415 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
417 struct l2cap_conn *conn = chan->conn;
420 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
422 mutex_lock(&conn->chan_lock);
423 l2cap_chan_lock(chan);
425 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
426 reason = ECONNREFUSED;
427 else if (chan->state == BT_CONNECT &&
428 chan->sec_level != BT_SECURITY_SDP)
429 reason = ECONNREFUSED;
433 l2cap_chan_close(chan, reason);
435 l2cap_chan_unlock(chan);
437 chan->ops->close(chan);
438 mutex_unlock(&conn->chan_lock);
440 l2cap_chan_put(chan);
443 struct l2cap_chan *l2cap_chan_create(void)
445 struct l2cap_chan *chan;
447 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
451 mutex_init(&chan->lock);
453 write_lock(&chan_list_lock);
454 list_add(&chan->global_l, &chan_list);
455 write_unlock(&chan_list_lock);
457 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
459 chan->state = BT_OPEN;
461 kref_init(&chan->kref);
463 /* This flag is cleared in l2cap_chan_ready() */
464 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
466 BT_DBG("chan %p", chan);
471 static void l2cap_chan_destroy(struct kref *kref)
473 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
475 BT_DBG("chan %p", chan);
477 write_lock(&chan_list_lock);
478 list_del(&chan->global_l);
479 write_unlock(&chan_list_lock);
484 void l2cap_chan_hold(struct l2cap_chan *c)
486 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
491 void l2cap_chan_put(struct l2cap_chan *c)
493 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
495 kref_put(&c->kref, l2cap_chan_destroy);
498 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
500 chan->fcs = L2CAP_FCS_CRC16;
501 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
502 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
503 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
504 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
505 chan->sec_level = BT_SECURITY_LOW;
507 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
510 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
512 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
513 __le16_to_cpu(chan->psm), chan->dcid);
515 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
519 switch (chan->chan_type) {
520 case L2CAP_CHAN_CONN_ORIENTED:
521 if (conn->hcon->type == LE_LINK) {
523 chan->omtu = L2CAP_DEFAULT_MTU;
524 if (chan->dcid == L2CAP_CID_ATT)
525 chan->scid = L2CAP_CID_ATT;
527 chan->scid = l2cap_alloc_cid(conn);
529 /* Alloc CID for connection-oriented socket */
530 chan->scid = l2cap_alloc_cid(conn);
531 chan->omtu = L2CAP_DEFAULT_MTU;
535 case L2CAP_CHAN_CONN_LESS:
536 /* Connectionless socket */
537 chan->scid = L2CAP_CID_CONN_LESS;
538 chan->dcid = L2CAP_CID_CONN_LESS;
539 chan->omtu = L2CAP_DEFAULT_MTU;
542 case L2CAP_CHAN_CONN_FIX_A2MP:
543 chan->scid = L2CAP_CID_A2MP;
544 chan->dcid = L2CAP_CID_A2MP;
545 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
546 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
550 /* Raw socket can send/recv signalling messages only */
551 chan->scid = L2CAP_CID_SIGNALING;
552 chan->dcid = L2CAP_CID_SIGNALING;
553 chan->omtu = L2CAP_DEFAULT_MTU;
556 chan->local_id = L2CAP_BESTEFFORT_ID;
557 chan->local_stype = L2CAP_SERV_BESTEFFORT;
558 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
559 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
560 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
561 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
563 l2cap_chan_hold(chan);
565 hci_conn_hold(conn->hcon);
567 list_add(&chan->list, &conn->chan_l);
570 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
572 mutex_lock(&conn->chan_lock);
573 __l2cap_chan_add(conn, chan);
574 mutex_unlock(&conn->chan_lock);
577 void l2cap_chan_del(struct l2cap_chan *chan, int err)
579 struct l2cap_conn *conn = chan->conn;
581 __clear_chan_timer(chan);
583 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
586 struct amp_mgr *mgr = conn->hcon->amp_mgr;
587 /* Delete from channel list */
588 list_del(&chan->list);
590 l2cap_chan_put(chan);
594 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
595 hci_conn_drop(conn->hcon);
597 if (mgr && mgr->bredr_chan == chan)
598 mgr->bredr_chan = NULL;
601 if (chan->hs_hchan) {
602 struct hci_chan *hs_hchan = chan->hs_hchan;
604 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
605 amp_disconnect_logical_link(hs_hchan);
608 chan->ops->teardown(chan, err);
610 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
614 case L2CAP_MODE_BASIC:
617 case L2CAP_MODE_ERTM:
618 __clear_retrans_timer(chan);
619 __clear_monitor_timer(chan);
620 __clear_ack_timer(chan);
622 skb_queue_purge(&chan->srej_q);
624 l2cap_seq_list_free(&chan->srej_list);
625 l2cap_seq_list_free(&chan->retrans_list);
629 case L2CAP_MODE_STREAMING:
630 skb_queue_purge(&chan->tx_q);
637 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
639 struct l2cap_conn *conn = chan->conn;
641 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
643 switch (chan->state) {
645 chan->ops->teardown(chan, 0);
650 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
651 conn->hcon->type == ACL_LINK) {
652 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
653 l2cap_send_disconn_req(chan, reason);
655 l2cap_chan_del(chan, reason);
659 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
660 conn->hcon->type == ACL_LINK) {
661 struct l2cap_conn_rsp rsp;
664 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
665 result = L2CAP_CR_SEC_BLOCK;
667 result = L2CAP_CR_BAD_PSM;
669 l2cap_state_change(chan, BT_DISCONN);
671 rsp.scid = cpu_to_le16(chan->dcid);
672 rsp.dcid = cpu_to_le16(chan->scid);
673 rsp.result = cpu_to_le16(result);
674 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
675 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
679 l2cap_chan_del(chan, reason);
684 l2cap_chan_del(chan, reason);
688 chan->ops->teardown(chan, 0);
693 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
695 switch (chan->chan_type) {
697 switch (chan->sec_level) {
698 case BT_SECURITY_HIGH:
699 return HCI_AT_DEDICATED_BONDING_MITM;
700 case BT_SECURITY_MEDIUM:
701 return HCI_AT_DEDICATED_BONDING;
703 return HCI_AT_NO_BONDING;
706 case L2CAP_CHAN_CONN_LESS:
707 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
708 if (chan->sec_level == BT_SECURITY_LOW)
709 chan->sec_level = BT_SECURITY_SDP;
711 if (chan->sec_level == BT_SECURITY_HIGH)
712 return HCI_AT_NO_BONDING_MITM;
714 return HCI_AT_NO_BONDING;
716 case L2CAP_CHAN_CONN_ORIENTED:
717 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
718 if (chan->sec_level == BT_SECURITY_LOW)
719 chan->sec_level = BT_SECURITY_SDP;
721 if (chan->sec_level == BT_SECURITY_HIGH)
722 return HCI_AT_NO_BONDING_MITM;
724 return HCI_AT_NO_BONDING;
728 switch (chan->sec_level) {
729 case BT_SECURITY_HIGH:
730 return HCI_AT_GENERAL_BONDING_MITM;
731 case BT_SECURITY_MEDIUM:
732 return HCI_AT_GENERAL_BONDING;
734 return HCI_AT_NO_BONDING;
740 /* Service level security */
741 int l2cap_chan_check_security(struct l2cap_chan *chan)
743 struct l2cap_conn *conn = chan->conn;
746 auth_type = l2cap_get_auth_type(chan);
748 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
751 static u8 l2cap_get_ident(struct l2cap_conn *conn)
755 /* Get next available identificator.
756 * 1 - 128 are used by kernel.
757 * 129 - 199 are reserved.
758 * 200 - 254 are used by utilities like l2ping, etc.
761 spin_lock(&conn->lock);
763 if (++conn->tx_ident > 128)
768 spin_unlock(&conn->lock);
773 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
776 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
779 BT_DBG("code 0x%2.2x", code);
784 if (lmp_no_flush_capable(conn->hcon->hdev))
785 flags = ACL_START_NO_FLUSH;
789 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
790 skb->priority = HCI_PRIO_MAX;
792 hci_send_acl(conn->hchan, skb, flags);
795 static bool __chan_is_moving(struct l2cap_chan *chan)
797 return chan->move_state != L2CAP_MOVE_STABLE &&
798 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
801 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
803 struct hci_conn *hcon = chan->conn->hcon;
806 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
809 if (chan->hs_hcon && !__chan_is_moving(chan)) {
811 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
818 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
819 lmp_no_flush_capable(hcon->hdev))
820 flags = ACL_START_NO_FLUSH;
824 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
825 hci_send_acl(chan->conn->hchan, skb, flags);
828 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
830 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
831 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
833 if (enh & L2CAP_CTRL_FRAME_TYPE) {
836 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
837 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
844 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
845 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
852 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
854 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
855 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
857 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
860 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
861 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
868 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
869 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
876 static inline void __unpack_control(struct l2cap_chan *chan,
879 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
880 __unpack_extended_control(get_unaligned_le32(skb->data),
881 &bt_cb(skb)->control);
882 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
884 __unpack_enhanced_control(get_unaligned_le16(skb->data),
885 &bt_cb(skb)->control);
886 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
890 static u32 __pack_extended_control(struct l2cap_ctrl *control)
894 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
895 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
897 if (control->sframe) {
898 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
899 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
900 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
902 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
903 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
909 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
913 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
914 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
916 if (control->sframe) {
917 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
918 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
919 packed |= L2CAP_CTRL_FRAME_TYPE;
921 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
922 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
928 static inline void __pack_control(struct l2cap_chan *chan,
929 struct l2cap_ctrl *control,
932 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
933 put_unaligned_le32(__pack_extended_control(control),
934 skb->data + L2CAP_HDR_SIZE);
936 put_unaligned_le16(__pack_enhanced_control(control),
937 skb->data + L2CAP_HDR_SIZE);
941 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
943 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
944 return L2CAP_EXT_HDR_SIZE;
946 return L2CAP_ENH_HDR_SIZE;
949 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
953 struct l2cap_hdr *lh;
954 int hlen = __ertm_hdr_size(chan);
956 if (chan->fcs == L2CAP_FCS_CRC16)
957 hlen += L2CAP_FCS_SIZE;
959 skb = bt_skb_alloc(hlen, GFP_KERNEL);
962 return ERR_PTR(-ENOMEM);
964 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
965 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
966 lh->cid = cpu_to_le16(chan->dcid);
968 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
969 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
971 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
973 if (chan->fcs == L2CAP_FCS_CRC16) {
974 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
975 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
978 skb->priority = HCI_PRIO_MAX;
982 static void l2cap_send_sframe(struct l2cap_chan *chan,
983 struct l2cap_ctrl *control)
988 BT_DBG("chan %p, control %p", chan, control);
990 if (!control->sframe)
993 if (__chan_is_moving(chan))
996 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1000 if (control->super == L2CAP_SUPER_RR)
1001 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1002 else if (control->super == L2CAP_SUPER_RNR)
1003 set_bit(CONN_RNR_SENT, &chan->conn_state);
1005 if (control->super != L2CAP_SUPER_SREJ) {
1006 chan->last_acked_seq = control->reqseq;
1007 __clear_ack_timer(chan);
1010 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1011 control->final, control->poll, control->super);
1013 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1014 control_field = __pack_extended_control(control);
1016 control_field = __pack_enhanced_control(control);
1018 skb = l2cap_create_sframe_pdu(chan, control_field);
1020 l2cap_do_send(chan, skb);
1023 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1025 struct l2cap_ctrl control;
1027 BT_DBG("chan %p, poll %d", chan, poll);
1029 memset(&control, 0, sizeof(control));
1031 control.poll = poll;
1033 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1034 control.super = L2CAP_SUPER_RNR;
1036 control.super = L2CAP_SUPER_RR;
1038 control.reqseq = chan->buffer_seq;
1039 l2cap_send_sframe(chan, &control);
1042 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1044 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1047 static bool __amp_capable(struct l2cap_chan *chan)
1049 struct l2cap_conn *conn = chan->conn;
1050 struct hci_dev *hdev;
1051 bool amp_available = false;
1053 if (!conn->hs_enabled)
1056 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1059 read_lock(&hci_dev_list_lock);
1060 list_for_each_entry(hdev, &hci_dev_list, list) {
1061 if (hdev->amp_type != AMP_TYPE_BREDR &&
1062 test_bit(HCI_UP, &hdev->flags)) {
1063 amp_available = true;
1067 read_unlock(&hci_dev_list_lock);
1069 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1070 return amp_available;
1075 static bool l2cap_check_efs(struct l2cap_chan *chan)
1077 /* Check EFS parameters */
1081 void l2cap_send_conn_req(struct l2cap_chan *chan)
1083 struct l2cap_conn *conn = chan->conn;
1084 struct l2cap_conn_req req;
1086 req.scid = cpu_to_le16(chan->scid);
1087 req.psm = chan->psm;
1089 chan->ident = l2cap_get_ident(conn);
1091 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1093 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1096 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1098 struct l2cap_create_chan_req req;
1099 req.scid = cpu_to_le16(chan->scid);
1100 req.psm = chan->psm;
1101 req.amp_id = amp_id;
1103 chan->ident = l2cap_get_ident(chan->conn);
1105 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1109 static void l2cap_move_setup(struct l2cap_chan *chan)
1111 struct sk_buff *skb;
1113 BT_DBG("chan %p", chan);
1115 if (chan->mode != L2CAP_MODE_ERTM)
1118 __clear_retrans_timer(chan);
1119 __clear_monitor_timer(chan);
1120 __clear_ack_timer(chan);
1122 chan->retry_count = 0;
1123 skb_queue_walk(&chan->tx_q, skb) {
1124 if (bt_cb(skb)->control.retries)
1125 bt_cb(skb)->control.retries = 1;
1130 chan->expected_tx_seq = chan->buffer_seq;
1132 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1133 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1134 l2cap_seq_list_clear(&chan->retrans_list);
1135 l2cap_seq_list_clear(&chan->srej_list);
1136 skb_queue_purge(&chan->srej_q);
1138 chan->tx_state = L2CAP_TX_STATE_XMIT;
1139 chan->rx_state = L2CAP_RX_STATE_MOVE;
1141 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1144 static void l2cap_move_done(struct l2cap_chan *chan)
1146 u8 move_role = chan->move_role;
1147 BT_DBG("chan %p", chan);
1149 chan->move_state = L2CAP_MOVE_STABLE;
1150 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1152 if (chan->mode != L2CAP_MODE_ERTM)
1155 switch (move_role) {
1156 case L2CAP_MOVE_ROLE_INITIATOR:
1157 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1158 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1160 case L2CAP_MOVE_ROLE_RESPONDER:
1161 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1166 static void l2cap_chan_ready(struct l2cap_chan *chan)
1168 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1169 chan->conf_state = 0;
1170 __clear_chan_timer(chan);
1172 chan->state = BT_CONNECTED;
1174 chan->ops->ready(chan);
1177 static void l2cap_start_connection(struct l2cap_chan *chan)
1179 if (__amp_capable(chan)) {
1180 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1181 a2mp_discover_amp(chan);
1183 l2cap_send_conn_req(chan);
1187 static void l2cap_do_start(struct l2cap_chan *chan)
1189 struct l2cap_conn *conn = chan->conn;
1191 if (conn->hcon->type == LE_LINK) {
1192 l2cap_chan_ready(chan);
1196 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1197 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1200 if (l2cap_chan_check_security(chan) &&
1201 __l2cap_no_conn_pending(chan)) {
1202 l2cap_start_connection(chan);
1205 struct l2cap_info_req req;
1206 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1208 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1209 conn->info_ident = l2cap_get_ident(conn);
1211 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1213 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1218 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1220 u32 local_feat_mask = l2cap_feat_mask;
1222 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1225 case L2CAP_MODE_ERTM:
1226 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1227 case L2CAP_MODE_STREAMING:
1228 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1234 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1236 struct l2cap_conn *conn = chan->conn;
1237 struct l2cap_disconn_req req;
1242 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1243 __clear_retrans_timer(chan);
1244 __clear_monitor_timer(chan);
1245 __clear_ack_timer(chan);
1248 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1249 l2cap_state_change(chan, BT_DISCONN);
1253 req.dcid = cpu_to_le16(chan->dcid);
1254 req.scid = cpu_to_le16(chan->scid);
1255 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1258 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1261 /* ---- L2CAP connections ---- */
1262 static void l2cap_conn_start(struct l2cap_conn *conn)
1264 struct l2cap_chan *chan, *tmp;
1266 BT_DBG("conn %p", conn);
1268 mutex_lock(&conn->chan_lock);
1270 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1271 l2cap_chan_lock(chan);
1273 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1274 l2cap_chan_unlock(chan);
1278 if (chan->state == BT_CONNECT) {
1279 if (!l2cap_chan_check_security(chan) ||
1280 !__l2cap_no_conn_pending(chan)) {
1281 l2cap_chan_unlock(chan);
1285 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1286 && test_bit(CONF_STATE2_DEVICE,
1287 &chan->conf_state)) {
1288 l2cap_chan_close(chan, ECONNRESET);
1289 l2cap_chan_unlock(chan);
1293 l2cap_start_connection(chan);
1295 } else if (chan->state == BT_CONNECT2) {
1296 struct l2cap_conn_rsp rsp;
1298 rsp.scid = cpu_to_le16(chan->dcid);
1299 rsp.dcid = cpu_to_le16(chan->scid);
1301 if (l2cap_chan_check_security(chan)) {
1302 struct sock *sk = chan->sk;
1305 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1306 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1307 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1308 chan->ops->defer(chan);
1311 __l2cap_state_change(chan, BT_CONFIG);
1312 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1313 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1317 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1318 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1321 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1324 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1325 rsp.result != L2CAP_CR_SUCCESS) {
1326 l2cap_chan_unlock(chan);
1330 set_bit(CONF_REQ_SENT, &chan->conf_state);
1331 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1332 l2cap_build_conf_req(chan, buf), buf);
1333 chan->num_conf_req++;
1336 l2cap_chan_unlock(chan);
1339 mutex_unlock(&conn->chan_lock);
1342 /* Find socket with cid and source/destination bdaddr.
1343 * Returns closest match, locked.
1345 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1349 struct l2cap_chan *c, *c1 = NULL;
1351 read_lock(&chan_list_lock);
1353 list_for_each_entry(c, &chan_list, global_l) {
1354 if (state && c->state != state)
1357 if (c->scid == cid) {
1358 int src_match, dst_match;
1359 int src_any, dst_any;
1362 src_match = !bacmp(&c->src, src);
1363 dst_match = !bacmp(&c->dst, dst);
1364 if (src_match && dst_match) {
1365 read_unlock(&chan_list_lock);
1370 src_any = !bacmp(&c->src, BDADDR_ANY);
1371 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1372 if ((src_match && dst_any) || (src_any && dst_match) ||
1373 (src_any && dst_any))
1378 read_unlock(&chan_list_lock);
1383 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1385 struct sock *parent;
1386 struct l2cap_chan *chan, *pchan;
1390 /* Check if we have socket listening on cid */
1391 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1392 &conn->hcon->src, &conn->hcon->dst);
1396 /* Client ATT sockets should override the server one */
1397 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1404 chan = pchan->ops->new_connection(pchan);
1408 chan->dcid = L2CAP_CID_ATT;
1410 bacpy(&chan->src, &conn->hcon->src);
1411 bacpy(&chan->dst, &conn->hcon->dst);
1412 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
1413 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
1415 __l2cap_chan_add(conn, chan);
1418 release_sock(parent);
1421 static void l2cap_conn_ready(struct l2cap_conn *conn)
1423 struct l2cap_chan *chan;
1424 struct hci_conn *hcon = conn->hcon;
1426 BT_DBG("conn %p", conn);
1428 /* For outgoing pairing which doesn't necessarily have an
1429 * associated socket (e.g. mgmt_pair_device).
1431 if (hcon->out && hcon->type == LE_LINK)
1432 smp_conn_security(hcon, hcon->pending_sec_level);
1434 mutex_lock(&conn->chan_lock);
1436 if (hcon->type == LE_LINK)
1437 l2cap_le_conn_ready(conn);
1439 list_for_each_entry(chan, &conn->chan_l, list) {
1441 l2cap_chan_lock(chan);
1443 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1444 l2cap_chan_unlock(chan);
1448 if (hcon->type == LE_LINK) {
1449 if (smp_conn_security(hcon, chan->sec_level))
1450 l2cap_chan_ready(chan);
1452 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1453 struct sock *sk = chan->sk;
1454 __clear_chan_timer(chan);
1456 __l2cap_state_change(chan, BT_CONNECTED);
1457 sk->sk_state_change(sk);
1460 } else if (chan->state == BT_CONNECT) {
1461 l2cap_do_start(chan);
1464 l2cap_chan_unlock(chan);
1467 mutex_unlock(&conn->chan_lock);
1470 /* Notify sockets that we cannot guaranty reliability anymore */
1471 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1473 struct l2cap_chan *chan;
1475 BT_DBG("conn %p", conn);
1477 mutex_lock(&conn->chan_lock);
1479 list_for_each_entry(chan, &conn->chan_l, list) {
1480 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1481 l2cap_chan_set_err(chan, err);
1484 mutex_unlock(&conn->chan_lock);
1487 static void l2cap_info_timeout(struct work_struct *work)
1489 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1492 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1493 conn->info_ident = 0;
1495 l2cap_conn_start(conn);
1500 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1501 * callback is called during registration. The ->remove callback is called
1502 * during unregistration.
1503 * An l2cap_user object can either be explicitly unregistered or when the
1504 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1505 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1506 * External modules must own a reference to the l2cap_conn object if they intend
1507 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1508 * any time if they don't.
1511 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1513 struct hci_dev *hdev = conn->hcon->hdev;
1516 /* We need to check whether l2cap_conn is registered. If it is not, we
1517 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1518 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1519 * relies on the parent hci_conn object to be locked. This itself relies
1520 * on the hci_dev object to be locked. So we must lock the hci device
1525 if (user->list.next || user->list.prev) {
1530 /* conn->hchan is NULL after l2cap_conn_del() was called */
1536 ret = user->probe(conn, user);
1540 list_add(&user->list, &conn->users);
1544 hci_dev_unlock(hdev);
1547 EXPORT_SYMBOL(l2cap_register_user);
1549 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1551 struct hci_dev *hdev = conn->hcon->hdev;
1555 if (!user->list.next || !user->list.prev)
1558 list_del(&user->list);
1559 user->list.next = NULL;
1560 user->list.prev = NULL;
1561 user->remove(conn, user);
1564 hci_dev_unlock(hdev);
1566 EXPORT_SYMBOL(l2cap_unregister_user);
1568 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1570 struct l2cap_user *user;
1572 while (!list_empty(&conn->users)) {
1573 user = list_first_entry(&conn->users, struct l2cap_user, list);
1574 list_del(&user->list);
1575 user->list.next = NULL;
1576 user->list.prev = NULL;
1577 user->remove(conn, user);
1581 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1583 struct l2cap_conn *conn = hcon->l2cap_data;
1584 struct l2cap_chan *chan, *l;
1589 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1591 kfree_skb(conn->rx_skb);
1593 l2cap_unregister_all_users(conn);
1595 mutex_lock(&conn->chan_lock);
1598 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1599 l2cap_chan_hold(chan);
1600 l2cap_chan_lock(chan);
1602 l2cap_chan_del(chan, err);
1604 l2cap_chan_unlock(chan);
1606 chan->ops->close(chan);
1607 l2cap_chan_put(chan);
1610 mutex_unlock(&conn->chan_lock);
1612 hci_chan_del(conn->hchan);
1614 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1615 cancel_delayed_work_sync(&conn->info_timer);
1617 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1618 cancel_delayed_work_sync(&conn->security_timer);
1619 smp_chan_destroy(conn);
1622 hcon->l2cap_data = NULL;
1624 l2cap_conn_put(conn);
1627 static void security_timeout(struct work_struct *work)
1629 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1630 security_timer.work);
1632 BT_DBG("conn %p", conn);
1634 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1635 smp_chan_destroy(conn);
1636 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1640 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1642 struct l2cap_conn *conn = hcon->l2cap_data;
1643 struct hci_chan *hchan;
1648 hchan = hci_chan_create(hcon);
1652 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1654 hci_chan_del(hchan);
1658 kref_init(&conn->ref);
1659 hcon->l2cap_data = conn;
1661 hci_conn_get(conn->hcon);
1662 conn->hchan = hchan;
1664 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1666 switch (hcon->type) {
1668 if (hcon->hdev->le_mtu) {
1669 conn->mtu = hcon->hdev->le_mtu;
1674 conn->mtu = hcon->hdev->acl_mtu;
1678 conn->feat_mask = 0;
1680 if (hcon->type == ACL_LINK)
1681 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1682 &hcon->hdev->dev_flags);
1684 spin_lock_init(&conn->lock);
1685 mutex_init(&conn->chan_lock);
1687 INIT_LIST_HEAD(&conn->chan_l);
1688 INIT_LIST_HEAD(&conn->users);
1690 if (hcon->type == LE_LINK)
1691 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1693 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1695 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1700 static void l2cap_conn_free(struct kref *ref)
1702 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1704 hci_conn_put(conn->hcon);
1708 void l2cap_conn_get(struct l2cap_conn *conn)
1710 kref_get(&conn->ref);
1712 EXPORT_SYMBOL(l2cap_conn_get);
1714 void l2cap_conn_put(struct l2cap_conn *conn)
1716 kref_put(&conn->ref, l2cap_conn_free);
1718 EXPORT_SYMBOL(l2cap_conn_put);
1720 /* ---- Socket interface ---- */
1722 /* Find socket with psm and source / destination bdaddr.
1723 * Returns closest match.
1725 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1729 struct l2cap_chan *c, *c1 = NULL;
1731 read_lock(&chan_list_lock);
1733 list_for_each_entry(c, &chan_list, global_l) {
1734 if (state && c->state != state)
1737 if (c->psm == psm) {
1738 int src_match, dst_match;
1739 int src_any, dst_any;
1742 src_match = !bacmp(&c->src, src);
1743 dst_match = !bacmp(&c->dst, dst);
1744 if (src_match && dst_match) {
1745 read_unlock(&chan_list_lock);
1750 src_any = !bacmp(&c->src, BDADDR_ANY);
1751 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1752 if ((src_match && dst_any) || (src_any && dst_match) ||
1753 (src_any && dst_any))
1758 read_unlock(&chan_list_lock);
1763 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1764 bdaddr_t *dst, u8 dst_type)
1766 struct l2cap_conn *conn;
1767 struct hci_conn *hcon;
1768 struct hci_dev *hdev;
1772 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
1773 dst_type, __le16_to_cpu(psm));
1775 hdev = hci_get_route(dst, &chan->src);
1777 return -EHOSTUNREACH;
1781 l2cap_chan_lock(chan);
1783 /* PSM must be odd and lsb of upper byte must be 0 */
1784 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1785 chan->chan_type != L2CAP_CHAN_RAW) {
1790 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1795 switch (chan->mode) {
1796 case L2CAP_MODE_BASIC:
1798 case L2CAP_MODE_ERTM:
1799 case L2CAP_MODE_STREAMING:
1808 switch (chan->state) {
1812 /* Already connecting */
1817 /* Already connected */
1831 /* Set destination address and psm */
1832 bacpy(&chan->dst, dst);
1833 chan->dst_type = dst_type;
1838 auth_type = l2cap_get_auth_type(chan);
1840 if (bdaddr_type_is_le(dst_type))
1841 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1842 chan->sec_level, auth_type);
1844 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1845 chan->sec_level, auth_type);
1848 err = PTR_ERR(hcon);
1852 conn = l2cap_conn_add(hcon);
1854 hci_conn_drop(hcon);
1859 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1860 hci_conn_drop(hcon);
1865 /* Update source addr of the socket */
1866 bacpy(&chan->src, &hcon->src);
1867 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1869 l2cap_chan_unlock(chan);
1870 l2cap_chan_add(conn, chan);
1871 l2cap_chan_lock(chan);
1873 /* l2cap_chan_add takes its own ref so we can drop this one */
1874 hci_conn_drop(hcon);
1876 l2cap_state_change(chan, BT_CONNECT);
1877 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
1879 if (hcon->state == BT_CONNECTED) {
1880 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1881 __clear_chan_timer(chan);
1882 if (l2cap_chan_check_security(chan))
1883 l2cap_state_change(chan, BT_CONNECTED);
1885 l2cap_do_start(chan);
1891 l2cap_chan_unlock(chan);
1892 hci_dev_unlock(hdev);
1897 static void l2cap_monitor_timeout(struct work_struct *work)
1899 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1900 monitor_timer.work);
1902 BT_DBG("chan %p", chan);
1904 l2cap_chan_lock(chan);
1907 l2cap_chan_unlock(chan);
1908 l2cap_chan_put(chan);
1912 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1914 l2cap_chan_unlock(chan);
1915 l2cap_chan_put(chan);
1918 static void l2cap_retrans_timeout(struct work_struct *work)
1920 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1921 retrans_timer.work);
1923 BT_DBG("chan %p", chan);
1925 l2cap_chan_lock(chan);
1928 l2cap_chan_unlock(chan);
1929 l2cap_chan_put(chan);
1933 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1934 l2cap_chan_unlock(chan);
1935 l2cap_chan_put(chan);
1938 static void l2cap_streaming_send(struct l2cap_chan *chan,
1939 struct sk_buff_head *skbs)
1941 struct sk_buff *skb;
1942 struct l2cap_ctrl *control;
1944 BT_DBG("chan %p, skbs %p", chan, skbs);
1946 if (__chan_is_moving(chan))
1949 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1951 while (!skb_queue_empty(&chan->tx_q)) {
1953 skb = skb_dequeue(&chan->tx_q);
1955 bt_cb(skb)->control.retries = 1;
1956 control = &bt_cb(skb)->control;
1958 control->reqseq = 0;
1959 control->txseq = chan->next_tx_seq;
1961 __pack_control(chan, control, skb);
1963 if (chan->fcs == L2CAP_FCS_CRC16) {
1964 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1965 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1968 l2cap_do_send(chan, skb);
1970 BT_DBG("Sent txseq %u", control->txseq);
1972 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1973 chan->frames_sent++;
1977 static int l2cap_ertm_send(struct l2cap_chan *chan)
1979 struct sk_buff *skb, *tx_skb;
1980 struct l2cap_ctrl *control;
1983 BT_DBG("chan %p", chan);
1985 if (chan->state != BT_CONNECTED)
1988 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1991 if (__chan_is_moving(chan))
1994 while (chan->tx_send_head &&
1995 chan->unacked_frames < chan->remote_tx_win &&
1996 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1998 skb = chan->tx_send_head;
2000 bt_cb(skb)->control.retries = 1;
2001 control = &bt_cb(skb)->control;
2003 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2006 control->reqseq = chan->buffer_seq;
2007 chan->last_acked_seq = chan->buffer_seq;
2008 control->txseq = chan->next_tx_seq;
2010 __pack_control(chan, control, skb);
2012 if (chan->fcs == L2CAP_FCS_CRC16) {
2013 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2014 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2017 /* Clone after data has been modified. Data is assumed to be
2018 read-only (for locking purposes) on cloned sk_buffs.
2020 tx_skb = skb_clone(skb, GFP_KERNEL);
2025 __set_retrans_timer(chan);
2027 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2028 chan->unacked_frames++;
2029 chan->frames_sent++;
2032 if (skb_queue_is_last(&chan->tx_q, skb))
2033 chan->tx_send_head = NULL;
2035 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2037 l2cap_do_send(chan, tx_skb);
2038 BT_DBG("Sent txseq %u", control->txseq);
2041 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2042 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2047 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2049 struct l2cap_ctrl control;
2050 struct sk_buff *skb;
2051 struct sk_buff *tx_skb;
2054 BT_DBG("chan %p", chan);
2056 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2059 if (__chan_is_moving(chan))
2062 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2063 seq = l2cap_seq_list_pop(&chan->retrans_list);
2065 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2067 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2072 bt_cb(skb)->control.retries++;
2073 control = bt_cb(skb)->control;
2075 if (chan->max_tx != 0 &&
2076 bt_cb(skb)->control.retries > chan->max_tx) {
2077 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2078 l2cap_send_disconn_req(chan, ECONNRESET);
2079 l2cap_seq_list_clear(&chan->retrans_list);
2083 control.reqseq = chan->buffer_seq;
2084 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2089 if (skb_cloned(skb)) {
2090 /* Cloned sk_buffs are read-only, so we need a
2093 tx_skb = skb_copy(skb, GFP_KERNEL);
2095 tx_skb = skb_clone(skb, GFP_KERNEL);
2099 l2cap_seq_list_clear(&chan->retrans_list);
2103 /* Update skb contents */
2104 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2105 put_unaligned_le32(__pack_extended_control(&control),
2106 tx_skb->data + L2CAP_HDR_SIZE);
2108 put_unaligned_le16(__pack_enhanced_control(&control),
2109 tx_skb->data + L2CAP_HDR_SIZE);
2112 if (chan->fcs == L2CAP_FCS_CRC16) {
2113 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2114 put_unaligned_le16(fcs, skb_put(tx_skb,
2118 l2cap_do_send(chan, tx_skb);
2120 BT_DBG("Resent txseq %d", control.txseq);
2122 chan->last_acked_seq = chan->buffer_seq;
2126 static void l2cap_retransmit(struct l2cap_chan *chan,
2127 struct l2cap_ctrl *control)
2129 BT_DBG("chan %p, control %p", chan, control);
2131 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2132 l2cap_ertm_resend(chan);
2135 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2136 struct l2cap_ctrl *control)
2138 struct sk_buff *skb;
2140 BT_DBG("chan %p, control %p", chan, control);
2143 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2145 l2cap_seq_list_clear(&chan->retrans_list);
2147 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2150 if (chan->unacked_frames) {
2151 skb_queue_walk(&chan->tx_q, skb) {
2152 if (bt_cb(skb)->control.txseq == control->reqseq ||
2153 skb == chan->tx_send_head)
2157 skb_queue_walk_from(&chan->tx_q, skb) {
2158 if (skb == chan->tx_send_head)
2161 l2cap_seq_list_append(&chan->retrans_list,
2162 bt_cb(skb)->control.txseq);
2165 l2cap_ertm_resend(chan);
2169 static void l2cap_send_ack(struct l2cap_chan *chan)
2171 struct l2cap_ctrl control;
2172 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2173 chan->last_acked_seq);
2176 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2177 chan, chan->last_acked_seq, chan->buffer_seq);
2179 memset(&control, 0, sizeof(control));
2182 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2183 chan->rx_state == L2CAP_RX_STATE_RECV) {
2184 __clear_ack_timer(chan);
2185 control.super = L2CAP_SUPER_RNR;
2186 control.reqseq = chan->buffer_seq;
2187 l2cap_send_sframe(chan, &control);
2189 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2190 l2cap_ertm_send(chan);
2191 /* If any i-frames were sent, they included an ack */
2192 if (chan->buffer_seq == chan->last_acked_seq)
2196 /* Ack now if the window is 3/4ths full.
2197 * Calculate without mul or div
2199 threshold = chan->ack_win;
2200 threshold += threshold << 1;
2203 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2206 if (frames_to_ack >= threshold) {
2207 __clear_ack_timer(chan);
2208 control.super = L2CAP_SUPER_RR;
2209 control.reqseq = chan->buffer_seq;
2210 l2cap_send_sframe(chan, &control);
2215 __set_ack_timer(chan);
2219 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2220 struct msghdr *msg, int len,
2221 int count, struct sk_buff *skb)
2223 struct l2cap_conn *conn = chan->conn;
2224 struct sk_buff **frag;
2227 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2233 /* Continuation fragments (no L2CAP header) */
2234 frag = &skb_shinfo(skb)->frag_list;
2236 struct sk_buff *tmp;
2238 count = min_t(unsigned int, conn->mtu, len);
2240 tmp = chan->ops->alloc_skb(chan, count,
2241 msg->msg_flags & MSG_DONTWAIT);
2243 return PTR_ERR(tmp);
2247 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2250 (*frag)->priority = skb->priority;
2255 skb->len += (*frag)->len;
2256 skb->data_len += (*frag)->len;
2258 frag = &(*frag)->next;
2264 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2265 struct msghdr *msg, size_t len,
2268 struct l2cap_conn *conn = chan->conn;
2269 struct sk_buff *skb;
2270 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2271 struct l2cap_hdr *lh;
2273 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2274 __le16_to_cpu(chan->psm), len, priority);
2276 count = min_t(unsigned int, (conn->mtu - hlen), len);
2278 skb = chan->ops->alloc_skb(chan, count + hlen,
2279 msg->msg_flags & MSG_DONTWAIT);
2283 skb->priority = priority;
2285 /* Create L2CAP header */
2286 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2287 lh->cid = cpu_to_le16(chan->dcid);
2288 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2289 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2291 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2292 if (unlikely(err < 0)) {
2294 return ERR_PTR(err);
2299 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2300 struct msghdr *msg, size_t len,
2303 struct l2cap_conn *conn = chan->conn;
2304 struct sk_buff *skb;
2306 struct l2cap_hdr *lh;
2308 BT_DBG("chan %p len %zu", chan, len);
2310 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2312 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2313 msg->msg_flags & MSG_DONTWAIT);
2317 skb->priority = priority;
2319 /* Create L2CAP header */
2320 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2321 lh->cid = cpu_to_le16(chan->dcid);
2322 lh->len = cpu_to_le16(len);
2324 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2325 if (unlikely(err < 0)) {
2327 return ERR_PTR(err);
2332 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2333 struct msghdr *msg, size_t len,
2336 struct l2cap_conn *conn = chan->conn;
2337 struct sk_buff *skb;
2338 int err, count, hlen;
2339 struct l2cap_hdr *lh;
2341 BT_DBG("chan %p len %zu", chan, len);
2344 return ERR_PTR(-ENOTCONN);
2346 hlen = __ertm_hdr_size(chan);
2349 hlen += L2CAP_SDULEN_SIZE;
2351 if (chan->fcs == L2CAP_FCS_CRC16)
2352 hlen += L2CAP_FCS_SIZE;
2354 count = min_t(unsigned int, (conn->mtu - hlen), len);
2356 skb = chan->ops->alloc_skb(chan, count + hlen,
2357 msg->msg_flags & MSG_DONTWAIT);
2361 /* Create L2CAP header */
2362 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2363 lh->cid = cpu_to_le16(chan->dcid);
2364 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2366 /* Control header is populated later */
2367 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2368 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2370 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2373 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2375 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2376 if (unlikely(err < 0)) {
2378 return ERR_PTR(err);
2381 bt_cb(skb)->control.fcs = chan->fcs;
2382 bt_cb(skb)->control.retries = 0;
2386 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2387 struct sk_buff_head *seg_queue,
2388 struct msghdr *msg, size_t len)
2390 struct sk_buff *skb;
2395 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2397 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2398 * so fragmented skbs are not used. The HCI layer's handling
2399 * of fragmented skbs is not compatible with ERTM's queueing.
2402 /* PDU size is derived from the HCI MTU */
2403 pdu_len = chan->conn->mtu;
2405 /* Constrain PDU size for BR/EDR connections */
2407 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2409 /* Adjust for largest possible L2CAP overhead. */
2411 pdu_len -= L2CAP_FCS_SIZE;
2413 pdu_len -= __ertm_hdr_size(chan);
2415 /* Remote device may have requested smaller PDUs */
2416 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2418 if (len <= pdu_len) {
2419 sar = L2CAP_SAR_UNSEGMENTED;
2423 sar = L2CAP_SAR_START;
2425 pdu_len -= L2CAP_SDULEN_SIZE;
2429 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2432 __skb_queue_purge(seg_queue);
2433 return PTR_ERR(skb);
2436 bt_cb(skb)->control.sar = sar;
2437 __skb_queue_tail(seg_queue, skb);
2442 pdu_len += L2CAP_SDULEN_SIZE;
2445 if (len <= pdu_len) {
2446 sar = L2CAP_SAR_END;
2449 sar = L2CAP_SAR_CONTINUE;
2456 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2459 struct sk_buff *skb;
2461 struct sk_buff_head seg_queue;
2463 /* Connectionless channel */
2464 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2465 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2467 return PTR_ERR(skb);
2469 l2cap_do_send(chan, skb);
2473 switch (chan->mode) {
2474 case L2CAP_MODE_BASIC:
2475 /* Check outgoing MTU */
2476 if (len > chan->omtu)
2479 /* Create a basic PDU */
2480 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2482 return PTR_ERR(skb);
2484 l2cap_do_send(chan, skb);
2488 case L2CAP_MODE_ERTM:
2489 case L2CAP_MODE_STREAMING:
2490 /* Check outgoing MTU */
2491 if (len > chan->omtu) {
2496 __skb_queue_head_init(&seg_queue);
2498 /* Do segmentation before calling in to the state machine,
2499 * since it's possible to block while waiting for memory
2502 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2504 /* The channel could have been closed while segmenting,
2505 * check that it is still connected.
2507 if (chan->state != BT_CONNECTED) {
2508 __skb_queue_purge(&seg_queue);
2515 if (chan->mode == L2CAP_MODE_ERTM)
2516 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2518 l2cap_streaming_send(chan, &seg_queue);
2522 /* If the skbs were not queued for sending, they'll still be in
2523 * seg_queue and need to be purged.
2525 __skb_queue_purge(&seg_queue);
2529 BT_DBG("bad state %1.1x", chan->mode);
2536 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2538 struct l2cap_ctrl control;
2541 BT_DBG("chan %p, txseq %u", chan, txseq);
2543 memset(&control, 0, sizeof(control));
2545 control.super = L2CAP_SUPER_SREJ;
2547 for (seq = chan->expected_tx_seq; seq != txseq;
2548 seq = __next_seq(chan, seq)) {
2549 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2550 control.reqseq = seq;
2551 l2cap_send_sframe(chan, &control);
2552 l2cap_seq_list_append(&chan->srej_list, seq);
2556 chan->expected_tx_seq = __next_seq(chan, txseq);
2559 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2561 struct l2cap_ctrl control;
2563 BT_DBG("chan %p", chan);
2565 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2568 memset(&control, 0, sizeof(control));
2570 control.super = L2CAP_SUPER_SREJ;
2571 control.reqseq = chan->srej_list.tail;
2572 l2cap_send_sframe(chan, &control);
2575 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2577 struct l2cap_ctrl control;
2581 BT_DBG("chan %p, txseq %u", chan, txseq);
2583 memset(&control, 0, sizeof(control));
2585 control.super = L2CAP_SUPER_SREJ;
2587 /* Capture initial list head to allow only one pass through the list. */
2588 initial_head = chan->srej_list.head;
2591 seq = l2cap_seq_list_pop(&chan->srej_list);
2592 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2595 control.reqseq = seq;
2596 l2cap_send_sframe(chan, &control);
2597 l2cap_seq_list_append(&chan->srej_list, seq);
2598 } while (chan->srej_list.head != initial_head);
2601 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2603 struct sk_buff *acked_skb;
2606 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2608 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2611 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2612 chan->expected_ack_seq, chan->unacked_frames);
2614 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2615 ackseq = __next_seq(chan, ackseq)) {
2617 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2619 skb_unlink(acked_skb, &chan->tx_q);
2620 kfree_skb(acked_skb);
2621 chan->unacked_frames--;
2625 chan->expected_ack_seq = reqseq;
2627 if (chan->unacked_frames == 0)
2628 __clear_retrans_timer(chan);
2630 BT_DBG("unacked_frames %u", chan->unacked_frames);
2633 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2635 BT_DBG("chan %p", chan);
2637 chan->expected_tx_seq = chan->buffer_seq;
2638 l2cap_seq_list_clear(&chan->srej_list);
2639 skb_queue_purge(&chan->srej_q);
2640 chan->rx_state = L2CAP_RX_STATE_RECV;
2643 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2644 struct l2cap_ctrl *control,
2645 struct sk_buff_head *skbs, u8 event)
2647 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2651 case L2CAP_EV_DATA_REQUEST:
2652 if (chan->tx_send_head == NULL)
2653 chan->tx_send_head = skb_peek(skbs);
2655 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2656 l2cap_ertm_send(chan);
2658 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2659 BT_DBG("Enter LOCAL_BUSY");
2660 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2662 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2663 /* The SREJ_SENT state must be aborted if we are to
2664 * enter the LOCAL_BUSY state.
2666 l2cap_abort_rx_srej_sent(chan);
2669 l2cap_send_ack(chan);
2672 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2673 BT_DBG("Exit LOCAL_BUSY");
2674 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2676 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2677 struct l2cap_ctrl local_control;
2679 memset(&local_control, 0, sizeof(local_control));
2680 local_control.sframe = 1;
2681 local_control.super = L2CAP_SUPER_RR;
2682 local_control.poll = 1;
2683 local_control.reqseq = chan->buffer_seq;
2684 l2cap_send_sframe(chan, &local_control);
2686 chan->retry_count = 1;
2687 __set_monitor_timer(chan);
2688 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2691 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2692 l2cap_process_reqseq(chan, control->reqseq);
2694 case L2CAP_EV_EXPLICIT_POLL:
2695 l2cap_send_rr_or_rnr(chan, 1);
2696 chan->retry_count = 1;
2697 __set_monitor_timer(chan);
2698 __clear_ack_timer(chan);
2699 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2701 case L2CAP_EV_RETRANS_TO:
2702 l2cap_send_rr_or_rnr(chan, 1);
2703 chan->retry_count = 1;
2704 __set_monitor_timer(chan);
2705 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2707 case L2CAP_EV_RECV_FBIT:
2708 /* Nothing to process */
2715 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2716 struct l2cap_ctrl *control,
2717 struct sk_buff_head *skbs, u8 event)
2719 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2723 case L2CAP_EV_DATA_REQUEST:
2724 if (chan->tx_send_head == NULL)
2725 chan->tx_send_head = skb_peek(skbs);
2726 /* Queue data, but don't send. */
2727 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2729 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2730 BT_DBG("Enter LOCAL_BUSY");
2731 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2733 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2734 /* The SREJ_SENT state must be aborted if we are to
2735 * enter the LOCAL_BUSY state.
2737 l2cap_abort_rx_srej_sent(chan);
2740 l2cap_send_ack(chan);
2743 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2744 BT_DBG("Exit LOCAL_BUSY");
2745 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2747 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2748 struct l2cap_ctrl local_control;
2749 memset(&local_control, 0, sizeof(local_control));
2750 local_control.sframe = 1;
2751 local_control.super = L2CAP_SUPER_RR;
2752 local_control.poll = 1;
2753 local_control.reqseq = chan->buffer_seq;
2754 l2cap_send_sframe(chan, &local_control);
2756 chan->retry_count = 1;
2757 __set_monitor_timer(chan);
2758 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2761 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2762 l2cap_process_reqseq(chan, control->reqseq);
2766 case L2CAP_EV_RECV_FBIT:
2767 if (control && control->final) {
2768 __clear_monitor_timer(chan);
2769 if (chan->unacked_frames > 0)
2770 __set_retrans_timer(chan);
2771 chan->retry_count = 0;
2772 chan->tx_state = L2CAP_TX_STATE_XMIT;
2773 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2776 case L2CAP_EV_EXPLICIT_POLL:
2779 case L2CAP_EV_MONITOR_TO:
2780 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2781 l2cap_send_rr_or_rnr(chan, 1);
2782 __set_monitor_timer(chan);
2783 chan->retry_count++;
2785 l2cap_send_disconn_req(chan, ECONNABORTED);
2793 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2794 struct sk_buff_head *skbs, u8 event)
2796 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2797 chan, control, skbs, event, chan->tx_state);
2799 switch (chan->tx_state) {
2800 case L2CAP_TX_STATE_XMIT:
2801 l2cap_tx_state_xmit(chan, control, skbs, event);
2803 case L2CAP_TX_STATE_WAIT_F:
2804 l2cap_tx_state_wait_f(chan, control, skbs, event);
2812 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2813 struct l2cap_ctrl *control)
2815 BT_DBG("chan %p, control %p", chan, control);
2816 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2819 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2820 struct l2cap_ctrl *control)
2822 BT_DBG("chan %p, control %p", chan, control);
2823 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2826 /* Copy frame to all raw sockets on that connection */
2827 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2829 struct sk_buff *nskb;
2830 struct l2cap_chan *chan;
2832 BT_DBG("conn %p", conn);
2834 mutex_lock(&conn->chan_lock);
2836 list_for_each_entry(chan, &conn->chan_l, list) {
2837 struct sock *sk = chan->sk;
2838 if (chan->chan_type != L2CAP_CHAN_RAW)
2841 /* Don't send frame to the socket it came from */
2844 nskb = skb_clone(skb, GFP_KERNEL);
2848 if (chan->ops->recv(chan, nskb))
2852 mutex_unlock(&conn->chan_lock);
2855 /* ---- L2CAP signalling commands ---- */
2856 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2857 u8 ident, u16 dlen, void *data)
2859 struct sk_buff *skb, **frag;
2860 struct l2cap_cmd_hdr *cmd;
2861 struct l2cap_hdr *lh;
2864 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2865 conn, code, ident, dlen);
2867 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2870 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2871 count = min_t(unsigned int, conn->mtu, len);
2873 skb = bt_skb_alloc(count, GFP_KERNEL);
2877 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2878 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2880 if (conn->hcon->type == LE_LINK)
2881 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2883 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2885 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2888 cmd->len = cpu_to_le16(dlen);
2891 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2892 memcpy(skb_put(skb, count), data, count);
2898 /* Continuation fragments (no L2CAP header) */
2899 frag = &skb_shinfo(skb)->frag_list;
2901 count = min_t(unsigned int, conn->mtu, len);
2903 *frag = bt_skb_alloc(count, GFP_KERNEL);
2907 memcpy(skb_put(*frag, count), data, count);
2912 frag = &(*frag)->next;
2922 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2925 struct l2cap_conf_opt *opt = *ptr;
2928 len = L2CAP_CONF_OPT_SIZE + opt->len;
2936 *val = *((u8 *) opt->val);
2940 *val = get_unaligned_le16(opt->val);
2944 *val = get_unaligned_le32(opt->val);
2948 *val = (unsigned long) opt->val;
2952 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2956 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2958 struct l2cap_conf_opt *opt = *ptr;
2960 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2967 *((u8 *) opt->val) = val;
2971 put_unaligned_le16(val, opt->val);
2975 put_unaligned_le32(val, opt->val);
2979 memcpy(opt->val, (void *) val, len);
2983 *ptr += L2CAP_CONF_OPT_SIZE + len;
2986 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2988 struct l2cap_conf_efs efs;
2990 switch (chan->mode) {
2991 case L2CAP_MODE_ERTM:
2992 efs.id = chan->local_id;
2993 efs.stype = chan->local_stype;
2994 efs.msdu = cpu_to_le16(chan->local_msdu);
2995 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2996 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2997 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3000 case L2CAP_MODE_STREAMING:
3002 efs.stype = L2CAP_SERV_BESTEFFORT;
3003 efs.msdu = cpu_to_le16(chan->local_msdu);
3004 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3013 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3014 (unsigned long) &efs);
3017 static void l2cap_ack_timeout(struct work_struct *work)
3019 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3023 BT_DBG("chan %p", chan);
3025 l2cap_chan_lock(chan);
3027 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3028 chan->last_acked_seq);
3031 l2cap_send_rr_or_rnr(chan, 0);
3033 l2cap_chan_unlock(chan);
3034 l2cap_chan_put(chan);
3037 int l2cap_ertm_init(struct l2cap_chan *chan)
3041 chan->next_tx_seq = 0;
3042 chan->expected_tx_seq = 0;
3043 chan->expected_ack_seq = 0;
3044 chan->unacked_frames = 0;
3045 chan->buffer_seq = 0;
3046 chan->frames_sent = 0;
3047 chan->last_acked_seq = 0;
3049 chan->sdu_last_frag = NULL;
3052 skb_queue_head_init(&chan->tx_q);
3054 chan->local_amp_id = AMP_ID_BREDR;
3055 chan->move_id = AMP_ID_BREDR;
3056 chan->move_state = L2CAP_MOVE_STABLE;
3057 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3059 if (chan->mode != L2CAP_MODE_ERTM)
3062 chan->rx_state = L2CAP_RX_STATE_RECV;
3063 chan->tx_state = L2CAP_TX_STATE_XMIT;
3065 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3066 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3067 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3069 skb_queue_head_init(&chan->srej_q);
3071 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3075 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3077 l2cap_seq_list_free(&chan->srej_list);
3082 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3085 case L2CAP_MODE_STREAMING:
3086 case L2CAP_MODE_ERTM:
3087 if (l2cap_mode_supported(mode, remote_feat_mask))
3091 return L2CAP_MODE_BASIC;
3095 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3097 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3100 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3102 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3105 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3106 struct l2cap_conf_rfc *rfc)
3108 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3109 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3111 /* Class 1 devices have must have ERTM timeouts
3112 * exceeding the Link Supervision Timeout. The
3113 * default Link Supervision Timeout for AMP
3114 * controllers is 10 seconds.
3116 * Class 1 devices use 0xffffffff for their
3117 * best-effort flush timeout, so the clamping logic
3118 * will result in a timeout that meets the above
3119 * requirement. ERTM timeouts are 16-bit values, so
3120 * the maximum timeout is 65.535 seconds.
3123 /* Convert timeout to milliseconds and round */
3124 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3126 /* This is the recommended formula for class 2 devices
3127 * that start ERTM timers when packets are sent to the
3130 ertm_to = 3 * ertm_to + 500;
3132 if (ertm_to > 0xffff)
3135 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3136 rfc->monitor_timeout = rfc->retrans_timeout;
3138 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3139 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3143 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3145 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3146 __l2cap_ews_supported(chan->conn)) {
3147 /* use extended control field */
3148 set_bit(FLAG_EXT_CTRL, &chan->flags);
3149 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3151 chan->tx_win = min_t(u16, chan->tx_win,
3152 L2CAP_DEFAULT_TX_WINDOW);
3153 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3155 chan->ack_win = chan->tx_win;
3158 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3160 struct l2cap_conf_req *req = data;
3161 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3162 void *ptr = req->data;
3165 BT_DBG("chan %p", chan);
3167 if (chan->num_conf_req || chan->num_conf_rsp)
3170 switch (chan->mode) {
3171 case L2CAP_MODE_STREAMING:
3172 case L2CAP_MODE_ERTM:
3173 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3176 if (__l2cap_efs_supported(chan->conn))
3177 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3181 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3186 if (chan->imtu != L2CAP_DEFAULT_MTU)
3187 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3189 switch (chan->mode) {
3190 case L2CAP_MODE_BASIC:
3191 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3192 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3195 rfc.mode = L2CAP_MODE_BASIC;
3197 rfc.max_transmit = 0;
3198 rfc.retrans_timeout = 0;
3199 rfc.monitor_timeout = 0;
3200 rfc.max_pdu_size = 0;
3202 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3203 (unsigned long) &rfc);
3206 case L2CAP_MODE_ERTM:
3207 rfc.mode = L2CAP_MODE_ERTM;
3208 rfc.max_transmit = chan->max_tx;
3210 __l2cap_set_ertm_timeouts(chan, &rfc);
3212 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3213 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3215 rfc.max_pdu_size = cpu_to_le16(size);
3217 l2cap_txwin_setup(chan);
3219 rfc.txwin_size = min_t(u16, chan->tx_win,
3220 L2CAP_DEFAULT_TX_WINDOW);
3222 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3223 (unsigned long) &rfc);
3225 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3226 l2cap_add_opt_efs(&ptr, chan);
3228 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3229 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3232 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3233 if (chan->fcs == L2CAP_FCS_NONE ||
3234 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3235 chan->fcs = L2CAP_FCS_NONE;
3236 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3241 case L2CAP_MODE_STREAMING:
3242 l2cap_txwin_setup(chan);
3243 rfc.mode = L2CAP_MODE_STREAMING;
3245 rfc.max_transmit = 0;
3246 rfc.retrans_timeout = 0;
3247 rfc.monitor_timeout = 0;
3249 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3250 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3252 rfc.max_pdu_size = cpu_to_le16(size);
3254 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3255 (unsigned long) &rfc);
3257 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3258 l2cap_add_opt_efs(&ptr, chan);
3260 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3261 if (chan->fcs == L2CAP_FCS_NONE ||
3262 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3263 chan->fcs = L2CAP_FCS_NONE;
3264 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3270 req->dcid = cpu_to_le16(chan->dcid);
3271 req->flags = __constant_cpu_to_le16(0);
3276 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3278 struct l2cap_conf_rsp *rsp = data;
3279 void *ptr = rsp->data;
3280 void *req = chan->conf_req;
3281 int len = chan->conf_len;
3282 int type, hint, olen;
3284 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3285 struct l2cap_conf_efs efs;
3287 u16 mtu = L2CAP_DEFAULT_MTU;
3288 u16 result = L2CAP_CONF_SUCCESS;
3291 BT_DBG("chan %p", chan);
3293 while (len >= L2CAP_CONF_OPT_SIZE) {
3294 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3296 hint = type & L2CAP_CONF_HINT;
3297 type &= L2CAP_CONF_MASK;
3300 case L2CAP_CONF_MTU:
3304 case L2CAP_CONF_FLUSH_TO:
3305 chan->flush_to = val;
3308 case L2CAP_CONF_QOS:
3311 case L2CAP_CONF_RFC:
3312 if (olen == sizeof(rfc))
3313 memcpy(&rfc, (void *) val, olen);
3316 case L2CAP_CONF_FCS:
3317 if (val == L2CAP_FCS_NONE)
3318 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3321 case L2CAP_CONF_EFS:
3323 if (olen == sizeof(efs))
3324 memcpy(&efs, (void *) val, olen);
3327 case L2CAP_CONF_EWS:
3328 if (!chan->conn->hs_enabled)
3329 return -ECONNREFUSED;
3331 set_bit(FLAG_EXT_CTRL, &chan->flags);
3332 set_bit(CONF_EWS_RECV, &chan->conf_state);
3333 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3334 chan->remote_tx_win = val;
3341 result = L2CAP_CONF_UNKNOWN;
3342 *((u8 *) ptr++) = type;
3347 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3350 switch (chan->mode) {
3351 case L2CAP_MODE_STREAMING:
3352 case L2CAP_MODE_ERTM:
3353 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3354 chan->mode = l2cap_select_mode(rfc.mode,
3355 chan->conn->feat_mask);
3360 if (__l2cap_efs_supported(chan->conn))
3361 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3363 return -ECONNREFUSED;
3366 if (chan->mode != rfc.mode)
3367 return -ECONNREFUSED;
3373 if (chan->mode != rfc.mode) {
3374 result = L2CAP_CONF_UNACCEPT;
3375 rfc.mode = chan->mode;
3377 if (chan->num_conf_rsp == 1)
3378 return -ECONNREFUSED;
3380 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3381 (unsigned long) &rfc);
3384 if (result == L2CAP_CONF_SUCCESS) {
3385 /* Configure output options and let the other side know
3386 * which ones we don't like. */
3388 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3389 result = L2CAP_CONF_UNACCEPT;
3392 set_bit(CONF_MTU_DONE, &chan->conf_state);
3394 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3397 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3398 efs.stype != L2CAP_SERV_NOTRAFIC &&
3399 efs.stype != chan->local_stype) {
3401 result = L2CAP_CONF_UNACCEPT;
3403 if (chan->num_conf_req >= 1)
3404 return -ECONNREFUSED;
3406 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3408 (unsigned long) &efs);
3410 /* Send PENDING Conf Rsp */
3411 result = L2CAP_CONF_PENDING;
3412 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3417 case L2CAP_MODE_BASIC:
3418 chan->fcs = L2CAP_FCS_NONE;
3419 set_bit(CONF_MODE_DONE, &chan->conf_state);
3422 case L2CAP_MODE_ERTM:
3423 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3424 chan->remote_tx_win = rfc.txwin_size;
3426 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3428 chan->remote_max_tx = rfc.max_transmit;
3430 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3431 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3432 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3433 rfc.max_pdu_size = cpu_to_le16(size);
3434 chan->remote_mps = size;
3436 __l2cap_set_ertm_timeouts(chan, &rfc);
3438 set_bit(CONF_MODE_DONE, &chan->conf_state);
3440 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3441 sizeof(rfc), (unsigned long) &rfc);
3443 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3444 chan->remote_id = efs.id;
3445 chan->remote_stype = efs.stype;
3446 chan->remote_msdu = le16_to_cpu(efs.msdu);
3447 chan->remote_flush_to =
3448 le32_to_cpu(efs.flush_to);
3449 chan->remote_acc_lat =
3450 le32_to_cpu(efs.acc_lat);
3451 chan->remote_sdu_itime =
3452 le32_to_cpu(efs.sdu_itime);
3453 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3455 (unsigned long) &efs);
3459 case L2CAP_MODE_STREAMING:
3460 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3461 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3462 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3463 rfc.max_pdu_size = cpu_to_le16(size);
3464 chan->remote_mps = size;
3466 set_bit(CONF_MODE_DONE, &chan->conf_state);
3468 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3469 (unsigned long) &rfc);
3474 result = L2CAP_CONF_UNACCEPT;
3476 memset(&rfc, 0, sizeof(rfc));
3477 rfc.mode = chan->mode;
3480 if (result == L2CAP_CONF_SUCCESS)
3481 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3483 rsp->scid = cpu_to_le16(chan->dcid);
3484 rsp->result = cpu_to_le16(result);
3485 rsp->flags = __constant_cpu_to_le16(0);
3490 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3491 void *data, u16 *result)
3493 struct l2cap_conf_req *req = data;
3494 void *ptr = req->data;
3497 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3498 struct l2cap_conf_efs efs;
3500 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3502 while (len >= L2CAP_CONF_OPT_SIZE) {
3503 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3506 case L2CAP_CONF_MTU:
3507 if (val < L2CAP_DEFAULT_MIN_MTU) {
3508 *result = L2CAP_CONF_UNACCEPT;
3509 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3512 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3515 case L2CAP_CONF_FLUSH_TO:
3516 chan->flush_to = val;
3517 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3521 case L2CAP_CONF_RFC:
3522 if (olen == sizeof(rfc))
3523 memcpy(&rfc, (void *)val, olen);
3525 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3526 rfc.mode != chan->mode)
3527 return -ECONNREFUSED;
3531 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3532 sizeof(rfc), (unsigned long) &rfc);
3535 case L2CAP_CONF_EWS:
3536 chan->ack_win = min_t(u16, val, chan->ack_win);
3537 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3541 case L2CAP_CONF_EFS:
3542 if (olen == sizeof(efs))
3543 memcpy(&efs, (void *)val, olen);
3545 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3546 efs.stype != L2CAP_SERV_NOTRAFIC &&
3547 efs.stype != chan->local_stype)
3548 return -ECONNREFUSED;
3550 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3551 (unsigned long) &efs);
3554 case L2CAP_CONF_FCS:
3555 if (*result == L2CAP_CONF_PENDING)
3556 if (val == L2CAP_FCS_NONE)
3557 set_bit(CONF_RECV_NO_FCS,
3563 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3564 return -ECONNREFUSED;
3566 chan->mode = rfc.mode;
3568 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3570 case L2CAP_MODE_ERTM:
3571 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3572 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3573 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3574 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3575 chan->ack_win = min_t(u16, chan->ack_win,
3578 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3579 chan->local_msdu = le16_to_cpu(efs.msdu);
3580 chan->local_sdu_itime =
3581 le32_to_cpu(efs.sdu_itime);
3582 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3583 chan->local_flush_to =
3584 le32_to_cpu(efs.flush_to);
3588 case L2CAP_MODE_STREAMING:
3589 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3593 req->dcid = cpu_to_le16(chan->dcid);
3594 req->flags = __constant_cpu_to_le16(0);
3599 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3600 u16 result, u16 flags)
3602 struct l2cap_conf_rsp *rsp = data;
3603 void *ptr = rsp->data;
3605 BT_DBG("chan %p", chan);
3607 rsp->scid = cpu_to_le16(chan->dcid);
3608 rsp->result = cpu_to_le16(result);
3609 rsp->flags = cpu_to_le16(flags);
3614 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3616 struct l2cap_conn_rsp rsp;
3617 struct l2cap_conn *conn = chan->conn;
3621 rsp.scid = cpu_to_le16(chan->dcid);
3622 rsp.dcid = cpu_to_le16(chan->scid);
3623 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3624 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3627 rsp_code = L2CAP_CREATE_CHAN_RSP;
3629 rsp_code = L2CAP_CONN_RSP;
3631 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3633 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3635 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3638 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3639 l2cap_build_conf_req(chan, buf), buf);
3640 chan->num_conf_req++;
3643 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3647 /* Use sane default values in case a misbehaving remote device
3648 * did not send an RFC or extended window size option.
3650 u16 txwin_ext = chan->ack_win;
3651 struct l2cap_conf_rfc rfc = {
3653 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3654 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3655 .max_pdu_size = cpu_to_le16(chan->imtu),
3656 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3659 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3661 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3664 while (len >= L2CAP_CONF_OPT_SIZE) {
3665 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3668 case L2CAP_CONF_RFC:
3669 if (olen == sizeof(rfc))
3670 memcpy(&rfc, (void *)val, olen);
3672 case L2CAP_CONF_EWS:
3679 case L2CAP_MODE_ERTM:
3680 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3681 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3682 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3683 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3684 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3686 chan->ack_win = min_t(u16, chan->ack_win,
3689 case L2CAP_MODE_STREAMING:
3690 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3694 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3695 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3698 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3700 if (cmd_len < sizeof(*rej))
3703 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3706 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3707 cmd->ident == conn->info_ident) {
3708 cancel_delayed_work(&conn->info_timer);
3710 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3711 conn->info_ident = 0;
3713 l2cap_conn_start(conn);
3719 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3720 struct l2cap_cmd_hdr *cmd,
3721 u8 *data, u8 rsp_code, u8 amp_id)
3723 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3724 struct l2cap_conn_rsp rsp;
3725 struct l2cap_chan *chan = NULL, *pchan;
3726 struct sock *parent, *sk = NULL;
3727 int result, status = L2CAP_CS_NO_INFO;
3729 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3730 __le16 psm = req->psm;
3732 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3734 /* Check if we have socket listening on psm */
3735 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3738 result = L2CAP_CR_BAD_PSM;
3744 mutex_lock(&conn->chan_lock);
3747 /* Check if the ACL is secure enough (if not SDP) */
3748 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3749 !hci_conn_check_link_mode(conn->hcon)) {
3750 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3751 result = L2CAP_CR_SEC_BLOCK;
3755 result = L2CAP_CR_NO_MEM;
3757 /* Check if we already have channel with that dcid */
3758 if (__l2cap_get_chan_by_dcid(conn, scid))
3761 chan = pchan->ops->new_connection(pchan);
3767 /* For certain devices (ex: HID mouse), support for authentication,
3768 * pairing and bonding is optional. For such devices, inorder to avoid
3769 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3770 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3772 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3774 bacpy(&chan->src, &conn->hcon->src);
3775 bacpy(&chan->dst, &conn->hcon->dst);
3776 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3777 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3780 chan->local_amp_id = amp_id;
3782 __l2cap_chan_add(conn, chan);
3786 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3788 chan->ident = cmd->ident;
3790 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3791 if (l2cap_chan_check_security(chan)) {
3792 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3793 __l2cap_state_change(chan, BT_CONNECT2);
3794 result = L2CAP_CR_PEND;
3795 status = L2CAP_CS_AUTHOR_PEND;
3796 chan->ops->defer(chan);
3798 /* Force pending result for AMP controllers.
3799 * The connection will succeed after the
3800 * physical link is up.
3802 if (amp_id == AMP_ID_BREDR) {
3803 __l2cap_state_change(chan, BT_CONFIG);
3804 result = L2CAP_CR_SUCCESS;
3806 __l2cap_state_change(chan, BT_CONNECT2);
3807 result = L2CAP_CR_PEND;
3809 status = L2CAP_CS_NO_INFO;
3812 __l2cap_state_change(chan, BT_CONNECT2);
3813 result = L2CAP_CR_PEND;
3814 status = L2CAP_CS_AUTHEN_PEND;
3817 __l2cap_state_change(chan, BT_CONNECT2);
3818 result = L2CAP_CR_PEND;
3819 status = L2CAP_CS_NO_INFO;
3823 release_sock(parent);
3824 mutex_unlock(&conn->chan_lock);
3827 rsp.scid = cpu_to_le16(scid);
3828 rsp.dcid = cpu_to_le16(dcid);
3829 rsp.result = cpu_to_le16(result);
3830 rsp.status = cpu_to_le16(status);
3831 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3833 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3834 struct l2cap_info_req info;
3835 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3837 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3838 conn->info_ident = l2cap_get_ident(conn);
3840 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3842 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3843 sizeof(info), &info);
3846 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3847 result == L2CAP_CR_SUCCESS) {
3849 set_bit(CONF_REQ_SENT, &chan->conf_state);
3850 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3851 l2cap_build_conf_req(chan, buf), buf);
3852 chan->num_conf_req++;
3858 static int l2cap_connect_req(struct l2cap_conn *conn,
3859 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3861 struct hci_dev *hdev = conn->hcon->hdev;
3862 struct hci_conn *hcon = conn->hcon;
3864 if (cmd_len < sizeof(struct l2cap_conn_req))
3868 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3869 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3870 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3871 hcon->dst_type, 0, NULL, 0,
3873 hci_dev_unlock(hdev);
3875 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3879 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3880 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3883 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3884 u16 scid, dcid, result, status;
3885 struct l2cap_chan *chan;
3889 if (cmd_len < sizeof(*rsp))
3892 scid = __le16_to_cpu(rsp->scid);
3893 dcid = __le16_to_cpu(rsp->dcid);
3894 result = __le16_to_cpu(rsp->result);
3895 status = __le16_to_cpu(rsp->status);
3897 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3898 dcid, scid, result, status);
3900 mutex_lock(&conn->chan_lock);
3903 chan = __l2cap_get_chan_by_scid(conn, scid);
3909 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3918 l2cap_chan_lock(chan);
3921 case L2CAP_CR_SUCCESS:
3922 l2cap_state_change(chan, BT_CONFIG);
3925 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3927 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3930 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3931 l2cap_build_conf_req(chan, req), req);
3932 chan->num_conf_req++;
3936 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3940 l2cap_chan_del(chan, ECONNREFUSED);
3944 l2cap_chan_unlock(chan);
3947 mutex_unlock(&conn->chan_lock);
3952 static inline void set_default_fcs(struct l2cap_chan *chan)
3954 /* FCS is enabled only in ERTM or streaming mode, if one or both
3957 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3958 chan->fcs = L2CAP_FCS_NONE;
3959 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3960 chan->fcs = L2CAP_FCS_CRC16;
3963 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3964 u8 ident, u16 flags)
3966 struct l2cap_conn *conn = chan->conn;
3968 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3971 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3972 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3974 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3975 l2cap_build_conf_rsp(chan, data,
3976 L2CAP_CONF_SUCCESS, flags), data);
3979 static inline int l2cap_config_req(struct l2cap_conn *conn,
3980 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3983 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3986 struct l2cap_chan *chan;
3989 if (cmd_len < sizeof(*req))
3992 dcid = __le16_to_cpu(req->dcid);
3993 flags = __le16_to_cpu(req->flags);
3995 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3997 chan = l2cap_get_chan_by_scid(conn, dcid);
4001 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4002 struct l2cap_cmd_rej_cid rej;
4004 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
4005 rej.scid = cpu_to_le16(chan->scid);
4006 rej.dcid = cpu_to_le16(chan->dcid);
4008 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
4013 /* Reject if config buffer is too small. */
4014 len = cmd_len - sizeof(*req);
4015 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4016 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4017 l2cap_build_conf_rsp(chan, rsp,
4018 L2CAP_CONF_REJECT, flags), rsp);
4023 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4024 chan->conf_len += len;
4026 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4027 /* Incomplete config. Send empty response. */
4028 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4029 l2cap_build_conf_rsp(chan, rsp,
4030 L2CAP_CONF_SUCCESS, flags), rsp);
4034 /* Complete config. */
4035 len = l2cap_parse_conf_req(chan, rsp);
4037 l2cap_send_disconn_req(chan, ECONNRESET);
4041 chan->ident = cmd->ident;
4042 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4043 chan->num_conf_rsp++;
4045 /* Reset config buffer. */
4048 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4051 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4052 set_default_fcs(chan);
4054 if (chan->mode == L2CAP_MODE_ERTM ||
4055 chan->mode == L2CAP_MODE_STREAMING)
4056 err = l2cap_ertm_init(chan);
4059 l2cap_send_disconn_req(chan, -err);
4061 l2cap_chan_ready(chan);
4066 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4068 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4069 l2cap_build_conf_req(chan, buf), buf);
4070 chan->num_conf_req++;
4073 /* Got Conf Rsp PENDING from remote side and asume we sent
4074 Conf Rsp PENDING in the code above */
4075 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4076 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4078 /* check compatibility */
4080 /* Send rsp for BR/EDR channel */
4082 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4084 chan->ident = cmd->ident;
4088 l2cap_chan_unlock(chan);
4092 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4093 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4096 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4097 u16 scid, flags, result;
4098 struct l2cap_chan *chan;
4099 int len = cmd_len - sizeof(*rsp);
4102 if (cmd_len < sizeof(*rsp))
4105 scid = __le16_to_cpu(rsp->scid);
4106 flags = __le16_to_cpu(rsp->flags);
4107 result = __le16_to_cpu(rsp->result);
4109 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4112 chan = l2cap_get_chan_by_scid(conn, scid);
4117 case L2CAP_CONF_SUCCESS:
4118 l2cap_conf_rfc_get(chan, rsp->data, len);
4119 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4122 case L2CAP_CONF_PENDING:
4123 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4125 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4128 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4131 l2cap_send_disconn_req(chan, ECONNRESET);
4135 if (!chan->hs_hcon) {
4136 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4139 if (l2cap_check_efs(chan)) {
4140 amp_create_logical_link(chan);
4141 chan->ident = cmd->ident;
4147 case L2CAP_CONF_UNACCEPT:
4148 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4151 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4152 l2cap_send_disconn_req(chan, ECONNRESET);
4156 /* throw out any old stored conf requests */
4157 result = L2CAP_CONF_SUCCESS;
4158 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4161 l2cap_send_disconn_req(chan, ECONNRESET);
4165 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4166 L2CAP_CONF_REQ, len, req);
4167 chan->num_conf_req++;
4168 if (result != L2CAP_CONF_SUCCESS)
4174 l2cap_chan_set_err(chan, ECONNRESET);
4176 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4177 l2cap_send_disconn_req(chan, ECONNRESET);
4181 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4184 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4186 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4187 set_default_fcs(chan);
4189 if (chan->mode == L2CAP_MODE_ERTM ||
4190 chan->mode == L2CAP_MODE_STREAMING)
4191 err = l2cap_ertm_init(chan);
4194 l2cap_send_disconn_req(chan, -err);
4196 l2cap_chan_ready(chan);
4200 l2cap_chan_unlock(chan);
4204 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4205 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4208 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4209 struct l2cap_disconn_rsp rsp;
4211 struct l2cap_chan *chan;
4213 if (cmd_len != sizeof(*req))
4216 scid = __le16_to_cpu(req->scid);
4217 dcid = __le16_to_cpu(req->dcid);
4219 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4221 mutex_lock(&conn->chan_lock);
4223 chan = __l2cap_get_chan_by_scid(conn, dcid);
4225 mutex_unlock(&conn->chan_lock);
4229 l2cap_chan_lock(chan);
4231 rsp.dcid = cpu_to_le16(chan->scid);
4232 rsp.scid = cpu_to_le16(chan->dcid);
4233 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4235 chan->ops->set_shutdown(chan);
4237 l2cap_chan_hold(chan);
4238 l2cap_chan_del(chan, ECONNRESET);
4240 l2cap_chan_unlock(chan);
4242 chan->ops->close(chan);
4243 l2cap_chan_put(chan);
4245 mutex_unlock(&conn->chan_lock);
4250 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4251 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4254 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4256 struct l2cap_chan *chan;
4258 if (cmd_len != sizeof(*rsp))
4261 scid = __le16_to_cpu(rsp->scid);
4262 dcid = __le16_to_cpu(rsp->dcid);
4264 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4266 mutex_lock(&conn->chan_lock);
4268 chan = __l2cap_get_chan_by_scid(conn, scid);
4270 mutex_unlock(&conn->chan_lock);
4274 l2cap_chan_lock(chan);
4276 l2cap_chan_hold(chan);
4277 l2cap_chan_del(chan, 0);
4279 l2cap_chan_unlock(chan);
4281 chan->ops->close(chan);
4282 l2cap_chan_put(chan);
4284 mutex_unlock(&conn->chan_lock);
4289 static inline int l2cap_information_req(struct l2cap_conn *conn,
4290 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4293 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4296 if (cmd_len != sizeof(*req))
4299 type = __le16_to_cpu(req->type);
4301 BT_DBG("type 0x%4.4x", type);
4303 if (type == L2CAP_IT_FEAT_MASK) {
4305 u32 feat_mask = l2cap_feat_mask;
4306 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4307 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4308 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4310 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4312 if (conn->hs_enabled)
4313 feat_mask |= L2CAP_FEAT_EXT_FLOW
4314 | L2CAP_FEAT_EXT_WINDOW;
4316 put_unaligned_le32(feat_mask, rsp->data);
4317 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4319 } else if (type == L2CAP_IT_FIXED_CHAN) {
4321 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4323 if (conn->hs_enabled)
4324 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4326 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4328 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4329 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4330 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4331 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4334 struct l2cap_info_rsp rsp;
4335 rsp.type = cpu_to_le16(type);
4336 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4337 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4344 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4345 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4348 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4351 if (cmd_len < sizeof(*rsp))
4354 type = __le16_to_cpu(rsp->type);
4355 result = __le16_to_cpu(rsp->result);
4357 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4359 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4360 if (cmd->ident != conn->info_ident ||
4361 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4364 cancel_delayed_work(&conn->info_timer);
4366 if (result != L2CAP_IR_SUCCESS) {
4367 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4368 conn->info_ident = 0;
4370 l2cap_conn_start(conn);
4376 case L2CAP_IT_FEAT_MASK:
4377 conn->feat_mask = get_unaligned_le32(rsp->data);
4379 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4380 struct l2cap_info_req req;
4381 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4383 conn->info_ident = l2cap_get_ident(conn);
4385 l2cap_send_cmd(conn, conn->info_ident,
4386 L2CAP_INFO_REQ, sizeof(req), &req);
4388 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4389 conn->info_ident = 0;
4391 l2cap_conn_start(conn);
4395 case L2CAP_IT_FIXED_CHAN:
4396 conn->fixed_chan_mask = rsp->data[0];
4397 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4398 conn->info_ident = 0;
4400 l2cap_conn_start(conn);
4407 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4408 struct l2cap_cmd_hdr *cmd,
4409 u16 cmd_len, void *data)
4411 struct l2cap_create_chan_req *req = data;
4412 struct l2cap_create_chan_rsp rsp;
4413 struct l2cap_chan *chan;
4414 struct hci_dev *hdev;
4417 if (cmd_len != sizeof(*req))
4420 if (!conn->hs_enabled)
4423 psm = le16_to_cpu(req->psm);
4424 scid = le16_to_cpu(req->scid);
4426 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4428 /* For controller id 0 make BR/EDR connection */
4429 if (req->amp_id == AMP_ID_BREDR) {
4430 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4435 /* Validate AMP controller id */
4436 hdev = hci_dev_get(req->amp_id);
4440 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4445 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4448 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4449 struct hci_conn *hs_hcon;
4451 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4458 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4460 mgr->bredr_chan = chan;
4461 chan->hs_hcon = hs_hcon;
4462 chan->fcs = L2CAP_FCS_NONE;
4463 conn->mtu = hdev->block_mtu;
4472 rsp.scid = cpu_to_le16(scid);
4473 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4474 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4476 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4482 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4484 struct l2cap_move_chan_req req;
4487 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4489 ident = l2cap_get_ident(chan->conn);
4490 chan->ident = ident;
4492 req.icid = cpu_to_le16(chan->scid);
4493 req.dest_amp_id = dest_amp_id;
4495 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4498 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4501 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4503 struct l2cap_move_chan_rsp rsp;
4505 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4507 rsp.icid = cpu_to_le16(chan->dcid);
4508 rsp.result = cpu_to_le16(result);
4510 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4514 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4516 struct l2cap_move_chan_cfm cfm;
4518 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4520 chan->ident = l2cap_get_ident(chan->conn);
4522 cfm.icid = cpu_to_le16(chan->scid);
4523 cfm.result = cpu_to_le16(result);
4525 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4528 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4531 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4533 struct l2cap_move_chan_cfm cfm;
4535 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4537 cfm.icid = cpu_to_le16(icid);
4538 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4540 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4544 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4547 struct l2cap_move_chan_cfm_rsp rsp;
4549 BT_DBG("icid 0x%4.4x", icid);
4551 rsp.icid = cpu_to_le16(icid);
4552 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4555 static void __release_logical_link(struct l2cap_chan *chan)
4557 chan->hs_hchan = NULL;
4558 chan->hs_hcon = NULL;
4560 /* Placeholder - release the logical link */
4563 static void l2cap_logical_fail(struct l2cap_chan *chan)
4565 /* Logical link setup failed */
4566 if (chan->state != BT_CONNECTED) {
4567 /* Create channel failure, disconnect */
4568 l2cap_send_disconn_req(chan, ECONNRESET);
4572 switch (chan->move_role) {
4573 case L2CAP_MOVE_ROLE_RESPONDER:
4574 l2cap_move_done(chan);
4575 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4577 case L2CAP_MOVE_ROLE_INITIATOR:
4578 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4579 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4580 /* Remote has only sent pending or
4581 * success responses, clean up
4583 l2cap_move_done(chan);
4586 /* Other amp move states imply that the move
4587 * has already aborted
4589 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4594 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4595 struct hci_chan *hchan)
4597 struct l2cap_conf_rsp rsp;
4599 chan->hs_hchan = hchan;
4600 chan->hs_hcon->l2cap_data = chan->conn;
4602 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4604 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4607 set_default_fcs(chan);
4609 err = l2cap_ertm_init(chan);
4611 l2cap_send_disconn_req(chan, -err);
4613 l2cap_chan_ready(chan);
4617 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4618 struct hci_chan *hchan)
4620 chan->hs_hcon = hchan->conn;
4621 chan->hs_hcon->l2cap_data = chan->conn;
4623 BT_DBG("move_state %d", chan->move_state);
4625 switch (chan->move_state) {
4626 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4627 /* Move confirm will be sent after a success
4628 * response is received
4630 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4632 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4633 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4634 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4635 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4636 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4637 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4638 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4639 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4640 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4644 /* Move was not in expected state, free the channel */
4645 __release_logical_link(chan);
4647 chan->move_state = L2CAP_MOVE_STABLE;
4651 /* Call with chan locked */
4652 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4655 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4658 l2cap_logical_fail(chan);
4659 __release_logical_link(chan);
4663 if (chan->state != BT_CONNECTED) {
4664 /* Ignore logical link if channel is on BR/EDR */
4665 if (chan->local_amp_id != AMP_ID_BREDR)
4666 l2cap_logical_finish_create(chan, hchan);
4668 l2cap_logical_finish_move(chan, hchan);
4672 void l2cap_move_start(struct l2cap_chan *chan)
4674 BT_DBG("chan %p", chan);
4676 if (chan->local_amp_id == AMP_ID_BREDR) {
4677 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4679 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4680 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4681 /* Placeholder - start physical link setup */
4683 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4684 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4686 l2cap_move_setup(chan);
4687 l2cap_send_move_chan_req(chan, 0);
4691 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4692 u8 local_amp_id, u8 remote_amp_id)
4694 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4695 local_amp_id, remote_amp_id);
4697 chan->fcs = L2CAP_FCS_NONE;
4699 /* Outgoing channel on AMP */
4700 if (chan->state == BT_CONNECT) {
4701 if (result == L2CAP_CR_SUCCESS) {
4702 chan->local_amp_id = local_amp_id;
4703 l2cap_send_create_chan_req(chan, remote_amp_id);
4705 /* Revert to BR/EDR connect */
4706 l2cap_send_conn_req(chan);
4712 /* Incoming channel on AMP */
4713 if (__l2cap_no_conn_pending(chan)) {
4714 struct l2cap_conn_rsp rsp;
4716 rsp.scid = cpu_to_le16(chan->dcid);
4717 rsp.dcid = cpu_to_le16(chan->scid);
4719 if (result == L2CAP_CR_SUCCESS) {
4720 /* Send successful response */
4721 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4722 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4724 /* Send negative response */
4725 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4726 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4729 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4732 if (result == L2CAP_CR_SUCCESS) {
4733 __l2cap_state_change(chan, BT_CONFIG);
4734 set_bit(CONF_REQ_SENT, &chan->conf_state);
4735 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4737 l2cap_build_conf_req(chan, buf), buf);
4738 chan->num_conf_req++;
4743 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4746 l2cap_move_setup(chan);
4747 chan->move_id = local_amp_id;
4748 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4750 l2cap_send_move_chan_req(chan, remote_amp_id);
4753 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4755 struct hci_chan *hchan = NULL;
4757 /* Placeholder - get hci_chan for logical link */
4760 if (hchan->state == BT_CONNECTED) {
4761 /* Logical link is ready to go */
4762 chan->hs_hcon = hchan->conn;
4763 chan->hs_hcon->l2cap_data = chan->conn;
4764 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4765 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4767 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4769 /* Wait for logical link to be ready */
4770 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4773 /* Logical link not available */
4774 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4778 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4780 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4782 if (result == -EINVAL)
4783 rsp_result = L2CAP_MR_BAD_ID;
4785 rsp_result = L2CAP_MR_NOT_ALLOWED;
4787 l2cap_send_move_chan_rsp(chan, rsp_result);
4790 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4791 chan->move_state = L2CAP_MOVE_STABLE;
4793 /* Restart data transmission */
4794 l2cap_ertm_send(chan);
4797 /* Invoke with locked chan */
4798 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4800 u8 local_amp_id = chan->local_amp_id;
4801 u8 remote_amp_id = chan->remote_amp_id;
4803 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4804 chan, result, local_amp_id, remote_amp_id);
4806 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4807 l2cap_chan_unlock(chan);
4811 if (chan->state != BT_CONNECTED) {
4812 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4813 } else if (result != L2CAP_MR_SUCCESS) {
4814 l2cap_do_move_cancel(chan, result);
4816 switch (chan->move_role) {
4817 case L2CAP_MOVE_ROLE_INITIATOR:
4818 l2cap_do_move_initiate(chan, local_amp_id,
4821 case L2CAP_MOVE_ROLE_RESPONDER:
4822 l2cap_do_move_respond(chan, result);
4825 l2cap_do_move_cancel(chan, result);
4831 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4832 struct l2cap_cmd_hdr *cmd,
4833 u16 cmd_len, void *data)
4835 struct l2cap_move_chan_req *req = data;
4836 struct l2cap_move_chan_rsp rsp;
4837 struct l2cap_chan *chan;
4839 u16 result = L2CAP_MR_NOT_ALLOWED;
4841 if (cmd_len != sizeof(*req))
4844 icid = le16_to_cpu(req->icid);
4846 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4848 if (!conn->hs_enabled)
4851 chan = l2cap_get_chan_by_dcid(conn, icid);
4853 rsp.icid = cpu_to_le16(icid);
4854 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4855 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4860 chan->ident = cmd->ident;
4862 if (chan->scid < L2CAP_CID_DYN_START ||
4863 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4864 (chan->mode != L2CAP_MODE_ERTM &&
4865 chan->mode != L2CAP_MODE_STREAMING)) {
4866 result = L2CAP_MR_NOT_ALLOWED;
4867 goto send_move_response;
4870 if (chan->local_amp_id == req->dest_amp_id) {
4871 result = L2CAP_MR_SAME_ID;
4872 goto send_move_response;
4875 if (req->dest_amp_id != AMP_ID_BREDR) {
4876 struct hci_dev *hdev;
4877 hdev = hci_dev_get(req->dest_amp_id);
4878 if (!hdev || hdev->dev_type != HCI_AMP ||
4879 !test_bit(HCI_UP, &hdev->flags)) {
4883 result = L2CAP_MR_BAD_ID;
4884 goto send_move_response;
4889 /* Detect a move collision. Only send a collision response
4890 * if this side has "lost", otherwise proceed with the move.
4891 * The winner has the larger bd_addr.
4893 if ((__chan_is_moving(chan) ||
4894 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4895 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4896 result = L2CAP_MR_COLLISION;
4897 goto send_move_response;
4900 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4901 l2cap_move_setup(chan);
4902 chan->move_id = req->dest_amp_id;
4905 if (req->dest_amp_id == AMP_ID_BREDR) {
4906 /* Moving to BR/EDR */
4907 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4908 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4909 result = L2CAP_MR_PEND;
4911 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4912 result = L2CAP_MR_SUCCESS;
4915 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4916 /* Placeholder - uncomment when amp functions are available */
4917 /*amp_accept_physical(chan, req->dest_amp_id);*/
4918 result = L2CAP_MR_PEND;
4922 l2cap_send_move_chan_rsp(chan, result);
4924 l2cap_chan_unlock(chan);
4929 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4931 struct l2cap_chan *chan;
4932 struct hci_chan *hchan = NULL;
4934 chan = l2cap_get_chan_by_scid(conn, icid);
4936 l2cap_send_move_chan_cfm_icid(conn, icid);
4940 __clear_chan_timer(chan);
4941 if (result == L2CAP_MR_PEND)
4942 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4944 switch (chan->move_state) {
4945 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4946 /* Move confirm will be sent when logical link
4949 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4951 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4952 if (result == L2CAP_MR_PEND) {
4954 } else if (test_bit(CONN_LOCAL_BUSY,
4955 &chan->conn_state)) {
4956 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4958 /* Logical link is up or moving to BR/EDR,
4961 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4962 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4965 case L2CAP_MOVE_WAIT_RSP:
4967 if (result == L2CAP_MR_SUCCESS) {
4968 /* Remote is ready, send confirm immediately
4969 * after logical link is ready
4971 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4973 /* Both logical link and move success
4974 * are required to confirm
4976 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4979 /* Placeholder - get hci_chan for logical link */
4981 /* Logical link not available */
4982 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4986 /* If the logical link is not yet connected, do not
4987 * send confirmation.
4989 if (hchan->state != BT_CONNECTED)
4992 /* Logical link is already ready to go */
4994 chan->hs_hcon = hchan->conn;
4995 chan->hs_hcon->l2cap_data = chan->conn;
4997 if (result == L2CAP_MR_SUCCESS) {
4998 /* Can confirm now */
4999 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5001 /* Now only need move success
5004 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5007 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5010 /* Any other amp move state means the move failed. */
5011 chan->move_id = chan->local_amp_id;
5012 l2cap_move_done(chan);
5013 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5016 l2cap_chan_unlock(chan);
5019 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5022 struct l2cap_chan *chan;
5024 chan = l2cap_get_chan_by_ident(conn, ident);
5026 /* Could not locate channel, icid is best guess */
5027 l2cap_send_move_chan_cfm_icid(conn, icid);
5031 __clear_chan_timer(chan);
5033 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5034 if (result == L2CAP_MR_COLLISION) {
5035 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5037 /* Cleanup - cancel move */
5038 chan->move_id = chan->local_amp_id;
5039 l2cap_move_done(chan);
5043 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5045 l2cap_chan_unlock(chan);
5048 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5049 struct l2cap_cmd_hdr *cmd,
5050 u16 cmd_len, void *data)
5052 struct l2cap_move_chan_rsp *rsp = data;
5055 if (cmd_len != sizeof(*rsp))
5058 icid = le16_to_cpu(rsp->icid);
5059 result = le16_to_cpu(rsp->result);
5061 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5063 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5064 l2cap_move_continue(conn, icid, result);
5066 l2cap_move_fail(conn, cmd->ident, icid, result);
5071 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5072 struct l2cap_cmd_hdr *cmd,
5073 u16 cmd_len, void *data)
5075 struct l2cap_move_chan_cfm *cfm = data;
5076 struct l2cap_chan *chan;
5079 if (cmd_len != sizeof(*cfm))
5082 icid = le16_to_cpu(cfm->icid);
5083 result = le16_to_cpu(cfm->result);
5085 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5087 chan = l2cap_get_chan_by_dcid(conn, icid);
5089 /* Spec requires a response even if the icid was not found */
5090 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5094 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5095 if (result == L2CAP_MC_CONFIRMED) {
5096 chan->local_amp_id = chan->move_id;
5097 if (chan->local_amp_id == AMP_ID_BREDR)
5098 __release_logical_link(chan);
5100 chan->move_id = chan->local_amp_id;
5103 l2cap_move_done(chan);
5106 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5108 l2cap_chan_unlock(chan);
5113 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5114 struct l2cap_cmd_hdr *cmd,
5115 u16 cmd_len, void *data)
5117 struct l2cap_move_chan_cfm_rsp *rsp = data;
5118 struct l2cap_chan *chan;
5121 if (cmd_len != sizeof(*rsp))
5124 icid = le16_to_cpu(rsp->icid);
5126 BT_DBG("icid 0x%4.4x", icid);
5128 chan = l2cap_get_chan_by_scid(conn, icid);
5132 __clear_chan_timer(chan);
5134 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5135 chan->local_amp_id = chan->move_id;
5137 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5138 __release_logical_link(chan);
5140 l2cap_move_done(chan);
5143 l2cap_chan_unlock(chan);
5148 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5153 if (min > max || min < 6 || max > 3200)
5156 if (to_multiplier < 10 || to_multiplier > 3200)
5159 if (max >= to_multiplier * 8)
5162 max_latency = (to_multiplier * 8 / max) - 1;
5163 if (latency > 499 || latency > max_latency)
5169 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5170 struct l2cap_cmd_hdr *cmd,
5173 struct hci_conn *hcon = conn->hcon;
5174 struct l2cap_conn_param_update_req *req;
5175 struct l2cap_conn_param_update_rsp rsp;
5176 u16 min, max, latency, to_multiplier, cmd_len;
5179 if (!(hcon->link_mode & HCI_LM_MASTER))
5182 cmd_len = __le16_to_cpu(cmd->len);
5183 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5186 req = (struct l2cap_conn_param_update_req *) data;
5187 min = __le16_to_cpu(req->min);
5188 max = __le16_to_cpu(req->max);
5189 latency = __le16_to_cpu(req->latency);
5190 to_multiplier = __le16_to_cpu(req->to_multiplier);
5192 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5193 min, max, latency, to_multiplier);
5195 memset(&rsp, 0, sizeof(rsp));
5197 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5199 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5201 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5203 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5207 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5212 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5213 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5218 switch (cmd->code) {
5219 case L2CAP_COMMAND_REJ:
5220 l2cap_command_rej(conn, cmd, cmd_len, data);
5223 case L2CAP_CONN_REQ:
5224 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5227 case L2CAP_CONN_RSP:
5228 case L2CAP_CREATE_CHAN_RSP:
5229 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5232 case L2CAP_CONF_REQ:
5233 err = l2cap_config_req(conn, cmd, cmd_len, data);
5236 case L2CAP_CONF_RSP:
5237 l2cap_config_rsp(conn, cmd, cmd_len, data);
5240 case L2CAP_DISCONN_REQ:
5241 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5244 case L2CAP_DISCONN_RSP:
5245 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5248 case L2CAP_ECHO_REQ:
5249 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5252 case L2CAP_ECHO_RSP:
5255 case L2CAP_INFO_REQ:
5256 err = l2cap_information_req(conn, cmd, cmd_len, data);
5259 case L2CAP_INFO_RSP:
5260 l2cap_information_rsp(conn, cmd, cmd_len, data);
5263 case L2CAP_CREATE_CHAN_REQ:
5264 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5267 case L2CAP_MOVE_CHAN_REQ:
5268 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5271 case L2CAP_MOVE_CHAN_RSP:
5272 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5275 case L2CAP_MOVE_CHAN_CFM:
5276 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5279 case L2CAP_MOVE_CHAN_CFM_RSP:
5280 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5284 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5292 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5293 struct l2cap_cmd_hdr *cmd, u8 *data)
5295 switch (cmd->code) {
5296 case L2CAP_COMMAND_REJ:
5299 case L2CAP_CONN_PARAM_UPDATE_REQ:
5300 return l2cap_conn_param_update_req(conn, cmd, data);
5302 case L2CAP_CONN_PARAM_UPDATE_RSP:
5306 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5311 static __le16 l2cap_err_to_reason(int err)
5315 return __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
5317 return __constant_cpu_to_le16(L2CAP_REJ_MTU_EXCEEDED);
5321 return __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5325 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5326 struct sk_buff *skb)
5328 struct hci_conn *hcon = conn->hcon;
5329 struct l2cap_cmd_hdr *cmd;
5333 if (hcon->type != LE_LINK)
5336 if (skb->len < L2CAP_CMD_HDR_SIZE)
5339 cmd = (void *) skb->data;
5340 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5342 len = le16_to_cpu(cmd->len);
5344 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5346 if (len != skb->len || !cmd->ident) {
5347 BT_DBG("corrupted command");
5351 err = l2cap_le_sig_cmd(conn, cmd, skb->data);
5353 struct l2cap_cmd_rej_unk rej;
5355 BT_ERR("Wrong link type (%d)", err);
5357 rej.reason = l2cap_err_to_reason(err);
5358 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5366 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5367 struct sk_buff *skb)
5369 struct hci_conn *hcon = conn->hcon;
5370 u8 *data = skb->data;
5372 struct l2cap_cmd_hdr cmd;
5375 l2cap_raw_recv(conn, skb);
5377 if (hcon->type != ACL_LINK)
5380 while (len >= L2CAP_CMD_HDR_SIZE) {
5382 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5383 data += L2CAP_CMD_HDR_SIZE;
5384 len -= L2CAP_CMD_HDR_SIZE;
5386 cmd_len = le16_to_cpu(cmd.len);
5388 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5391 if (cmd_len > len || !cmd.ident) {
5392 BT_DBG("corrupted command");
5396 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5398 struct l2cap_cmd_rej_unk rej;
5400 BT_ERR("Wrong link type (%d)", err);
5402 rej.reason = l2cap_err_to_reason(err);
5403 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5415 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5417 u16 our_fcs, rcv_fcs;
5420 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5421 hdr_size = L2CAP_EXT_HDR_SIZE;
5423 hdr_size = L2CAP_ENH_HDR_SIZE;
5425 if (chan->fcs == L2CAP_FCS_CRC16) {
5426 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5427 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5428 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5430 if (our_fcs != rcv_fcs)
5436 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5438 struct l2cap_ctrl control;
5440 BT_DBG("chan %p", chan);
5442 memset(&control, 0, sizeof(control));
5445 control.reqseq = chan->buffer_seq;
5446 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5448 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5449 control.super = L2CAP_SUPER_RNR;
5450 l2cap_send_sframe(chan, &control);
5453 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5454 chan->unacked_frames > 0)
5455 __set_retrans_timer(chan);
5457 /* Send pending iframes */
5458 l2cap_ertm_send(chan);
5460 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5461 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5462 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5465 control.super = L2CAP_SUPER_RR;
5466 l2cap_send_sframe(chan, &control);
5470 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5471 struct sk_buff **last_frag)
5473 /* skb->len reflects data in skb as well as all fragments
5474 * skb->data_len reflects only data in fragments
5476 if (!skb_has_frag_list(skb))
5477 skb_shinfo(skb)->frag_list = new_frag;
5479 new_frag->next = NULL;
5481 (*last_frag)->next = new_frag;
5482 *last_frag = new_frag;
5484 skb->len += new_frag->len;
5485 skb->data_len += new_frag->len;
5486 skb->truesize += new_frag->truesize;
5489 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5490 struct l2cap_ctrl *control)
5494 switch (control->sar) {
5495 case L2CAP_SAR_UNSEGMENTED:
5499 err = chan->ops->recv(chan, skb);
5502 case L2CAP_SAR_START:
5506 chan->sdu_len = get_unaligned_le16(skb->data);
5507 skb_pull(skb, L2CAP_SDULEN_SIZE);
5509 if (chan->sdu_len > chan->imtu) {
5514 if (skb->len >= chan->sdu_len)
5518 chan->sdu_last_frag = skb;
5524 case L2CAP_SAR_CONTINUE:
5528 append_skb_frag(chan->sdu, skb,
5529 &chan->sdu_last_frag);
5532 if (chan->sdu->len >= chan->sdu_len)
5542 append_skb_frag(chan->sdu, skb,
5543 &chan->sdu_last_frag);
5546 if (chan->sdu->len != chan->sdu_len)
5549 err = chan->ops->recv(chan, chan->sdu);
5552 /* Reassembly complete */
5554 chan->sdu_last_frag = NULL;
5562 kfree_skb(chan->sdu);
5564 chan->sdu_last_frag = NULL;
5571 static int l2cap_resegment(struct l2cap_chan *chan)
5577 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5581 if (chan->mode != L2CAP_MODE_ERTM)
5584 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5585 l2cap_tx(chan, NULL, NULL, event);
5588 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5591 /* Pass sequential frames to l2cap_reassemble_sdu()
5592 * until a gap is encountered.
5595 BT_DBG("chan %p", chan);
5597 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5598 struct sk_buff *skb;
5599 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5600 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5602 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5607 skb_unlink(skb, &chan->srej_q);
5608 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5609 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5614 if (skb_queue_empty(&chan->srej_q)) {
5615 chan->rx_state = L2CAP_RX_STATE_RECV;
5616 l2cap_send_ack(chan);
5622 static void l2cap_handle_srej(struct l2cap_chan *chan,
5623 struct l2cap_ctrl *control)
5625 struct sk_buff *skb;
5627 BT_DBG("chan %p, control %p", chan, control);
5629 if (control->reqseq == chan->next_tx_seq) {
5630 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5631 l2cap_send_disconn_req(chan, ECONNRESET);
5635 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5638 BT_DBG("Seq %d not available for retransmission",
5643 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5644 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5645 l2cap_send_disconn_req(chan, ECONNRESET);
5649 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5651 if (control->poll) {
5652 l2cap_pass_to_tx(chan, control);
5654 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5655 l2cap_retransmit(chan, control);
5656 l2cap_ertm_send(chan);
5658 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5659 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5660 chan->srej_save_reqseq = control->reqseq;
5663 l2cap_pass_to_tx_fbit(chan, control);
5665 if (control->final) {
5666 if (chan->srej_save_reqseq != control->reqseq ||
5667 !test_and_clear_bit(CONN_SREJ_ACT,
5669 l2cap_retransmit(chan, control);
5671 l2cap_retransmit(chan, control);
5672 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5673 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5674 chan->srej_save_reqseq = control->reqseq;
5680 static void l2cap_handle_rej(struct l2cap_chan *chan,
5681 struct l2cap_ctrl *control)
5683 struct sk_buff *skb;
5685 BT_DBG("chan %p, control %p", chan, control);
5687 if (control->reqseq == chan->next_tx_seq) {
5688 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5689 l2cap_send_disconn_req(chan, ECONNRESET);
5693 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5695 if (chan->max_tx && skb &&
5696 bt_cb(skb)->control.retries >= chan->max_tx) {
5697 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5698 l2cap_send_disconn_req(chan, ECONNRESET);
5702 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5704 l2cap_pass_to_tx(chan, control);
5706 if (control->final) {
5707 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5708 l2cap_retransmit_all(chan, control);
5710 l2cap_retransmit_all(chan, control);
5711 l2cap_ertm_send(chan);
5712 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5713 set_bit(CONN_REJ_ACT, &chan->conn_state);
5717 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5719 BT_DBG("chan %p, txseq %d", chan, txseq);
5721 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5722 chan->expected_tx_seq);
5724 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5725 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5727 /* See notes below regarding "double poll" and
5730 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5731 BT_DBG("Invalid/Ignore - after SREJ");
5732 return L2CAP_TXSEQ_INVALID_IGNORE;
5734 BT_DBG("Invalid - in window after SREJ sent");
5735 return L2CAP_TXSEQ_INVALID;
5739 if (chan->srej_list.head == txseq) {
5740 BT_DBG("Expected SREJ");
5741 return L2CAP_TXSEQ_EXPECTED_SREJ;
5744 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5745 BT_DBG("Duplicate SREJ - txseq already stored");
5746 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5749 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5750 BT_DBG("Unexpected SREJ - not requested");
5751 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5755 if (chan->expected_tx_seq == txseq) {
5756 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5758 BT_DBG("Invalid - txseq outside tx window");
5759 return L2CAP_TXSEQ_INVALID;
5762 return L2CAP_TXSEQ_EXPECTED;
5766 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5767 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5768 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5769 return L2CAP_TXSEQ_DUPLICATE;
5772 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5773 /* A source of invalid packets is a "double poll" condition,
5774 * where delays cause us to send multiple poll packets. If
5775 * the remote stack receives and processes both polls,
5776 * sequence numbers can wrap around in such a way that a
5777 * resent frame has a sequence number that looks like new data
5778 * with a sequence gap. This would trigger an erroneous SREJ
5781 * Fortunately, this is impossible with a tx window that's
5782 * less than half of the maximum sequence number, which allows
5783 * invalid frames to be safely ignored.
5785 * With tx window sizes greater than half of the tx window
5786 * maximum, the frame is invalid and cannot be ignored. This
5787 * causes a disconnect.
5790 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5791 BT_DBG("Invalid/Ignore - txseq outside tx window");
5792 return L2CAP_TXSEQ_INVALID_IGNORE;
5794 BT_DBG("Invalid - txseq outside tx window");
5795 return L2CAP_TXSEQ_INVALID;
5798 BT_DBG("Unexpected - txseq indicates missing frames");
5799 return L2CAP_TXSEQ_UNEXPECTED;
5803 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5804 struct l2cap_ctrl *control,
5805 struct sk_buff *skb, u8 event)
5808 bool skb_in_use = false;
5810 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5814 case L2CAP_EV_RECV_IFRAME:
5815 switch (l2cap_classify_txseq(chan, control->txseq)) {
5816 case L2CAP_TXSEQ_EXPECTED:
5817 l2cap_pass_to_tx(chan, control);
5819 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5820 BT_DBG("Busy, discarding expected seq %d",
5825 chan->expected_tx_seq = __next_seq(chan,
5828 chan->buffer_seq = chan->expected_tx_seq;
5831 err = l2cap_reassemble_sdu(chan, skb, control);
5835 if (control->final) {
5836 if (!test_and_clear_bit(CONN_REJ_ACT,
5837 &chan->conn_state)) {
5839 l2cap_retransmit_all(chan, control);
5840 l2cap_ertm_send(chan);
5844 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5845 l2cap_send_ack(chan);
5847 case L2CAP_TXSEQ_UNEXPECTED:
5848 l2cap_pass_to_tx(chan, control);
5850 /* Can't issue SREJ frames in the local busy state.
5851 * Drop this frame, it will be seen as missing
5852 * when local busy is exited.
5854 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5855 BT_DBG("Busy, discarding unexpected seq %d",
5860 /* There was a gap in the sequence, so an SREJ
5861 * must be sent for each missing frame. The
5862 * current frame is stored for later use.
5864 skb_queue_tail(&chan->srej_q, skb);
5866 BT_DBG("Queued %p (queue len %d)", skb,
5867 skb_queue_len(&chan->srej_q));
5869 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5870 l2cap_seq_list_clear(&chan->srej_list);
5871 l2cap_send_srej(chan, control->txseq);
5873 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5875 case L2CAP_TXSEQ_DUPLICATE:
5876 l2cap_pass_to_tx(chan, control);
5878 case L2CAP_TXSEQ_INVALID_IGNORE:
5880 case L2CAP_TXSEQ_INVALID:
5882 l2cap_send_disconn_req(chan, ECONNRESET);
5886 case L2CAP_EV_RECV_RR:
5887 l2cap_pass_to_tx(chan, control);
5888 if (control->final) {
5889 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5891 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5892 !__chan_is_moving(chan)) {
5894 l2cap_retransmit_all(chan, control);
5897 l2cap_ertm_send(chan);
5898 } else if (control->poll) {
5899 l2cap_send_i_or_rr_or_rnr(chan);
5901 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5902 &chan->conn_state) &&
5903 chan->unacked_frames)
5904 __set_retrans_timer(chan);
5906 l2cap_ertm_send(chan);
5909 case L2CAP_EV_RECV_RNR:
5910 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5911 l2cap_pass_to_tx(chan, control);
5912 if (control && control->poll) {
5913 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5914 l2cap_send_rr_or_rnr(chan, 0);
5916 __clear_retrans_timer(chan);
5917 l2cap_seq_list_clear(&chan->retrans_list);
5919 case L2CAP_EV_RECV_REJ:
5920 l2cap_handle_rej(chan, control);
5922 case L2CAP_EV_RECV_SREJ:
5923 l2cap_handle_srej(chan, control);
5929 if (skb && !skb_in_use) {
5930 BT_DBG("Freeing %p", skb);
5937 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5938 struct l2cap_ctrl *control,
5939 struct sk_buff *skb, u8 event)
5942 u16 txseq = control->txseq;
5943 bool skb_in_use = false;
5945 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5949 case L2CAP_EV_RECV_IFRAME:
5950 switch (l2cap_classify_txseq(chan, txseq)) {
5951 case L2CAP_TXSEQ_EXPECTED:
5952 /* Keep frame for reassembly later */
5953 l2cap_pass_to_tx(chan, control);
5954 skb_queue_tail(&chan->srej_q, skb);
5956 BT_DBG("Queued %p (queue len %d)", skb,
5957 skb_queue_len(&chan->srej_q));
5959 chan->expected_tx_seq = __next_seq(chan, txseq);
5961 case L2CAP_TXSEQ_EXPECTED_SREJ:
5962 l2cap_seq_list_pop(&chan->srej_list);
5964 l2cap_pass_to_tx(chan, control);
5965 skb_queue_tail(&chan->srej_q, skb);
5967 BT_DBG("Queued %p (queue len %d)", skb,
5968 skb_queue_len(&chan->srej_q));
5970 err = l2cap_rx_queued_iframes(chan);
5975 case L2CAP_TXSEQ_UNEXPECTED:
5976 /* Got a frame that can't be reassembled yet.
5977 * Save it for later, and send SREJs to cover
5978 * the missing frames.
5980 skb_queue_tail(&chan->srej_q, skb);
5982 BT_DBG("Queued %p (queue len %d)", skb,
5983 skb_queue_len(&chan->srej_q));
5985 l2cap_pass_to_tx(chan, control);
5986 l2cap_send_srej(chan, control->txseq);
5988 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
5989 /* This frame was requested with an SREJ, but
5990 * some expected retransmitted frames are
5991 * missing. Request retransmission of missing
5994 skb_queue_tail(&chan->srej_q, skb);
5996 BT_DBG("Queued %p (queue len %d)", skb,
5997 skb_queue_len(&chan->srej_q));
5999 l2cap_pass_to_tx(chan, control);
6000 l2cap_send_srej_list(chan, control->txseq);
6002 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6003 /* We've already queued this frame. Drop this copy. */
6004 l2cap_pass_to_tx(chan, control);
6006 case L2CAP_TXSEQ_DUPLICATE:
6007 /* Expecting a later sequence number, so this frame
6008 * was already received. Ignore it completely.
6011 case L2CAP_TXSEQ_INVALID_IGNORE:
6013 case L2CAP_TXSEQ_INVALID:
6015 l2cap_send_disconn_req(chan, ECONNRESET);
6019 case L2CAP_EV_RECV_RR:
6020 l2cap_pass_to_tx(chan, control);
6021 if (control->final) {
6022 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6024 if (!test_and_clear_bit(CONN_REJ_ACT,
6025 &chan->conn_state)) {
6027 l2cap_retransmit_all(chan, control);
6030 l2cap_ertm_send(chan);
6031 } else if (control->poll) {
6032 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6033 &chan->conn_state) &&
6034 chan->unacked_frames) {
6035 __set_retrans_timer(chan);
6038 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6039 l2cap_send_srej_tail(chan);
6041 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6042 &chan->conn_state) &&
6043 chan->unacked_frames)
6044 __set_retrans_timer(chan);
6046 l2cap_send_ack(chan);
6049 case L2CAP_EV_RECV_RNR:
6050 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6051 l2cap_pass_to_tx(chan, control);
6052 if (control->poll) {
6053 l2cap_send_srej_tail(chan);
6055 struct l2cap_ctrl rr_control;
6056 memset(&rr_control, 0, sizeof(rr_control));
6057 rr_control.sframe = 1;
6058 rr_control.super = L2CAP_SUPER_RR;
6059 rr_control.reqseq = chan->buffer_seq;
6060 l2cap_send_sframe(chan, &rr_control);
6064 case L2CAP_EV_RECV_REJ:
6065 l2cap_handle_rej(chan, control);
6067 case L2CAP_EV_RECV_SREJ:
6068 l2cap_handle_srej(chan, control);
6072 if (skb && !skb_in_use) {
6073 BT_DBG("Freeing %p", skb);
6080 static int l2cap_finish_move(struct l2cap_chan *chan)
6082 BT_DBG("chan %p", chan);
6084 chan->rx_state = L2CAP_RX_STATE_RECV;
6087 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6089 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6091 return l2cap_resegment(chan);
6094 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6095 struct l2cap_ctrl *control,
6096 struct sk_buff *skb, u8 event)
6100 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6106 l2cap_process_reqseq(chan, control->reqseq);
6108 if (!skb_queue_empty(&chan->tx_q))
6109 chan->tx_send_head = skb_peek(&chan->tx_q);
6111 chan->tx_send_head = NULL;
6113 /* Rewind next_tx_seq to the point expected
6116 chan->next_tx_seq = control->reqseq;
6117 chan->unacked_frames = 0;
6119 err = l2cap_finish_move(chan);
6123 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6124 l2cap_send_i_or_rr_or_rnr(chan);
6126 if (event == L2CAP_EV_RECV_IFRAME)
6129 return l2cap_rx_state_recv(chan, control, NULL, event);
6132 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6133 struct l2cap_ctrl *control,
6134 struct sk_buff *skb, u8 event)
6138 if (!control->final)
6141 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6143 chan->rx_state = L2CAP_RX_STATE_RECV;
6144 l2cap_process_reqseq(chan, control->reqseq);
6146 if (!skb_queue_empty(&chan->tx_q))
6147 chan->tx_send_head = skb_peek(&chan->tx_q);
6149 chan->tx_send_head = NULL;
6151 /* Rewind next_tx_seq to the point expected
6154 chan->next_tx_seq = control->reqseq;
6155 chan->unacked_frames = 0;
6158 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6160 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6162 err = l2cap_resegment(chan);
6165 err = l2cap_rx_state_recv(chan, control, skb, event);
6170 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6172 /* Make sure reqseq is for a packet that has been sent but not acked */
6175 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6176 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6179 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6180 struct sk_buff *skb, u8 event)
6184 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6185 control, skb, event, chan->rx_state);
6187 if (__valid_reqseq(chan, control->reqseq)) {
6188 switch (chan->rx_state) {
6189 case L2CAP_RX_STATE_RECV:
6190 err = l2cap_rx_state_recv(chan, control, skb, event);
6192 case L2CAP_RX_STATE_SREJ_SENT:
6193 err = l2cap_rx_state_srej_sent(chan, control, skb,
6196 case L2CAP_RX_STATE_WAIT_P:
6197 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6199 case L2CAP_RX_STATE_WAIT_F:
6200 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6207 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6208 control->reqseq, chan->next_tx_seq,
6209 chan->expected_ack_seq);
6210 l2cap_send_disconn_req(chan, ECONNRESET);
6216 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6217 struct sk_buff *skb)
6221 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6224 if (l2cap_classify_txseq(chan, control->txseq) ==
6225 L2CAP_TXSEQ_EXPECTED) {
6226 l2cap_pass_to_tx(chan, control);
6228 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6229 __next_seq(chan, chan->buffer_seq));
6231 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6233 l2cap_reassemble_sdu(chan, skb, control);
6236 kfree_skb(chan->sdu);
6239 chan->sdu_last_frag = NULL;
6243 BT_DBG("Freeing %p", skb);
6248 chan->last_acked_seq = control->txseq;
6249 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6254 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6256 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6260 __unpack_control(chan, skb);
6265 * We can just drop the corrupted I-frame here.
6266 * Receiver will miss it and start proper recovery
6267 * procedures and ask for retransmission.
6269 if (l2cap_check_fcs(chan, skb))
6272 if (!control->sframe && control->sar == L2CAP_SAR_START)
6273 len -= L2CAP_SDULEN_SIZE;
6275 if (chan->fcs == L2CAP_FCS_CRC16)
6276 len -= L2CAP_FCS_SIZE;
6278 if (len > chan->mps) {
6279 l2cap_send_disconn_req(chan, ECONNRESET);
6283 if (!control->sframe) {
6286 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6287 control->sar, control->reqseq, control->final,
6290 /* Validate F-bit - F=0 always valid, F=1 only
6291 * valid in TX WAIT_F
6293 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6296 if (chan->mode != L2CAP_MODE_STREAMING) {
6297 event = L2CAP_EV_RECV_IFRAME;
6298 err = l2cap_rx(chan, control, skb, event);
6300 err = l2cap_stream_rx(chan, control, skb);
6304 l2cap_send_disconn_req(chan, ECONNRESET);
6306 const u8 rx_func_to_event[4] = {
6307 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6308 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6311 /* Only I-frames are expected in streaming mode */
6312 if (chan->mode == L2CAP_MODE_STREAMING)
6315 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6316 control->reqseq, control->final, control->poll,
6320 BT_ERR("Trailing bytes: %d in sframe", len);
6321 l2cap_send_disconn_req(chan, ECONNRESET);
6325 /* Validate F and P bits */
6326 if (control->final && (control->poll ||
6327 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6330 event = rx_func_to_event[control->super];
6331 if (l2cap_rx(chan, control, skb, event))
6332 l2cap_send_disconn_req(chan, ECONNRESET);
6342 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6343 struct sk_buff *skb)
6345 struct l2cap_chan *chan;
6347 chan = l2cap_get_chan_by_scid(conn, cid);
6349 if (cid == L2CAP_CID_A2MP) {
6350 chan = a2mp_channel_create(conn, skb);
6356 l2cap_chan_lock(chan);
6358 BT_DBG("unknown cid 0x%4.4x", cid);
6359 /* Drop packet and return */
6365 BT_DBG("chan %p, len %d", chan, skb->len);
6367 if (chan->state != BT_CONNECTED)
6370 switch (chan->mode) {
6371 case L2CAP_MODE_BASIC:
6372 /* If socket recv buffers overflows we drop data here
6373 * which is *bad* because L2CAP has to be reliable.
6374 * But we don't have any other choice. L2CAP doesn't
6375 * provide flow control mechanism. */
6377 if (chan->imtu < skb->len)
6380 if (!chan->ops->recv(chan, skb))
6384 case L2CAP_MODE_ERTM:
6385 case L2CAP_MODE_STREAMING:
6386 l2cap_data_rcv(chan, skb);
6390 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6398 l2cap_chan_unlock(chan);
6401 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6402 struct sk_buff *skb)
6404 struct hci_conn *hcon = conn->hcon;
6405 struct l2cap_chan *chan;
6407 if (hcon->type != ACL_LINK)
6410 chan = l2cap_global_chan_by_psm(0, psm, &conn->hcon->src,
6415 BT_DBG("chan %p, len %d", chan, skb->len);
6417 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6420 if (chan->imtu < skb->len)
6423 /* Store remote BD_ADDR and PSM for msg_name */
6424 bacpy(&bt_cb(skb)->bdaddr, &conn->hcon->dst);
6425 bt_cb(skb)->psm = psm;
6427 if (!chan->ops->recv(chan, skb))
6434 static void l2cap_att_channel(struct l2cap_conn *conn,
6435 struct sk_buff *skb)
6437 struct hci_conn *hcon = conn->hcon;
6438 struct l2cap_chan *chan;
6440 if (hcon->type != LE_LINK)
6443 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6444 &conn->hcon->src, &conn->hcon->dst);
6448 BT_DBG("chan %p, len %d", chan, skb->len);
6450 if (chan->imtu < skb->len)
6453 if (!chan->ops->recv(chan, skb))
6460 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6462 struct l2cap_hdr *lh = (void *) skb->data;
6466 skb_pull(skb, L2CAP_HDR_SIZE);
6467 cid = __le16_to_cpu(lh->cid);
6468 len = __le16_to_cpu(lh->len);
6470 if (len != skb->len) {
6475 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6478 case L2CAP_CID_SIGNALING:
6479 l2cap_sig_channel(conn, skb);
6482 case L2CAP_CID_CONN_LESS:
6483 psm = get_unaligned((__le16 *) skb->data);
6484 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6485 l2cap_conless_channel(conn, psm, skb);
6489 l2cap_att_channel(conn, skb);
6492 case L2CAP_CID_LE_SIGNALING:
6493 l2cap_le_sig_channel(conn, skb);
6497 if (smp_sig_channel(conn, skb))
6498 l2cap_conn_del(conn->hcon, EACCES);
6502 l2cap_data_channel(conn, cid, skb);
6507 /* ---- L2CAP interface with lower layer (HCI) ---- */
6509 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6511 int exact = 0, lm1 = 0, lm2 = 0;
6512 struct l2cap_chan *c;
6514 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6516 /* Find listening sockets and check their link_mode */
6517 read_lock(&chan_list_lock);
6518 list_for_each_entry(c, &chan_list, global_l) {
6519 if (c->state != BT_LISTEN)
6522 if (!bacmp(&c->src, &hdev->bdaddr)) {
6523 lm1 |= HCI_LM_ACCEPT;
6524 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6525 lm1 |= HCI_LM_MASTER;
6527 } else if (!bacmp(&c->src, BDADDR_ANY)) {
6528 lm2 |= HCI_LM_ACCEPT;
6529 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6530 lm2 |= HCI_LM_MASTER;
6533 read_unlock(&chan_list_lock);
6535 return exact ? lm1 : lm2;
6538 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6540 struct l2cap_conn *conn;
6542 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6545 conn = l2cap_conn_add(hcon);
6547 l2cap_conn_ready(conn);
6549 l2cap_conn_del(hcon, bt_to_errno(status));
6553 int l2cap_disconn_ind(struct hci_conn *hcon)
6555 struct l2cap_conn *conn = hcon->l2cap_data;
6557 BT_DBG("hcon %p", hcon);
6560 return HCI_ERROR_REMOTE_USER_TERM;
6561 return conn->disc_reason;
6564 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6566 BT_DBG("hcon %p reason %d", hcon, reason);
6568 l2cap_conn_del(hcon, bt_to_errno(reason));
6571 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6573 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6576 if (encrypt == 0x00) {
6577 if (chan->sec_level == BT_SECURITY_MEDIUM) {
6578 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6579 } else if (chan->sec_level == BT_SECURITY_HIGH)
6580 l2cap_chan_close(chan, ECONNREFUSED);
6582 if (chan->sec_level == BT_SECURITY_MEDIUM)
6583 __clear_chan_timer(chan);
6587 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6589 struct l2cap_conn *conn = hcon->l2cap_data;
6590 struct l2cap_chan *chan;
6595 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6597 if (hcon->type == LE_LINK) {
6598 if (!status && encrypt)
6599 smp_distribute_keys(conn, 0);
6600 cancel_delayed_work(&conn->security_timer);
6603 mutex_lock(&conn->chan_lock);
6605 list_for_each_entry(chan, &conn->chan_l, list) {
6606 l2cap_chan_lock(chan);
6608 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6609 state_to_string(chan->state));
6611 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6612 l2cap_chan_unlock(chan);
6616 if (chan->scid == L2CAP_CID_ATT) {
6617 if (!status && encrypt) {
6618 chan->sec_level = hcon->sec_level;
6619 l2cap_chan_ready(chan);
6622 l2cap_chan_unlock(chan);
6626 if (!__l2cap_no_conn_pending(chan)) {
6627 l2cap_chan_unlock(chan);
6631 if (!status && (chan->state == BT_CONNECTED ||
6632 chan->state == BT_CONFIG)) {
6633 chan->ops->resume(chan);
6634 l2cap_check_encryption(chan, encrypt);
6635 l2cap_chan_unlock(chan);
6639 if (chan->state == BT_CONNECT) {
6641 l2cap_start_connection(chan);
6643 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6645 } else if (chan->state == BT_CONNECT2) {
6646 struct sock *sk = chan->sk;
6647 struct l2cap_conn_rsp rsp;
6653 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6654 res = L2CAP_CR_PEND;
6655 stat = L2CAP_CS_AUTHOR_PEND;
6656 chan->ops->defer(chan);
6658 __l2cap_state_change(chan, BT_CONFIG);
6659 res = L2CAP_CR_SUCCESS;
6660 stat = L2CAP_CS_NO_INFO;
6663 __l2cap_state_change(chan, BT_DISCONN);
6664 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6665 res = L2CAP_CR_SEC_BLOCK;
6666 stat = L2CAP_CS_NO_INFO;
6671 rsp.scid = cpu_to_le16(chan->dcid);
6672 rsp.dcid = cpu_to_le16(chan->scid);
6673 rsp.result = cpu_to_le16(res);
6674 rsp.status = cpu_to_le16(stat);
6675 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6678 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6679 res == L2CAP_CR_SUCCESS) {
6681 set_bit(CONF_REQ_SENT, &chan->conf_state);
6682 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6684 l2cap_build_conf_req(chan, buf),
6686 chan->num_conf_req++;
6690 l2cap_chan_unlock(chan);
6693 mutex_unlock(&conn->chan_lock);
6698 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6700 struct l2cap_conn *conn = hcon->l2cap_data;
6701 struct l2cap_hdr *hdr;
6704 /* For AMP controller do not create l2cap conn */
6705 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6709 conn = l2cap_conn_add(hcon);
6714 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6718 case ACL_START_NO_FLUSH:
6721 BT_ERR("Unexpected start frame (len %d)", skb->len);
6722 kfree_skb(conn->rx_skb);
6723 conn->rx_skb = NULL;
6725 l2cap_conn_unreliable(conn, ECOMM);
6728 /* Start fragment always begin with Basic L2CAP header */
6729 if (skb->len < L2CAP_HDR_SIZE) {
6730 BT_ERR("Frame is too short (len %d)", skb->len);
6731 l2cap_conn_unreliable(conn, ECOMM);
6735 hdr = (struct l2cap_hdr *) skb->data;
6736 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6738 if (len == skb->len) {
6739 /* Complete frame received */
6740 l2cap_recv_frame(conn, skb);
6744 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6746 if (skb->len > len) {
6747 BT_ERR("Frame is too long (len %d, expected len %d)",
6749 l2cap_conn_unreliable(conn, ECOMM);
6753 /* Allocate skb for the complete frame (with header) */
6754 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6758 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6760 conn->rx_len = len - skb->len;
6764 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6766 if (!conn->rx_len) {
6767 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6768 l2cap_conn_unreliable(conn, ECOMM);
6772 if (skb->len > conn->rx_len) {
6773 BT_ERR("Fragment is too long (len %d, expected %d)",
6774 skb->len, conn->rx_len);
6775 kfree_skb(conn->rx_skb);
6776 conn->rx_skb = NULL;
6778 l2cap_conn_unreliable(conn, ECOMM);
6782 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6784 conn->rx_len -= skb->len;
6786 if (!conn->rx_len) {
6787 /* Complete frame received. l2cap_recv_frame
6788 * takes ownership of the skb so set the global
6789 * rx_skb pointer to NULL first.
6791 struct sk_buff *rx_skb = conn->rx_skb;
6792 conn->rx_skb = NULL;
6793 l2cap_recv_frame(conn, rx_skb);
6803 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6805 struct l2cap_chan *c;
6807 read_lock(&chan_list_lock);
6809 list_for_each_entry(c, &chan_list, global_l) {
6810 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6812 c->state, __le16_to_cpu(c->psm),
6813 c->scid, c->dcid, c->imtu, c->omtu,
6814 c->sec_level, c->mode);
6817 read_unlock(&chan_list_lock);
6822 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6824 return single_open(file, l2cap_debugfs_show, inode->i_private);
6827 static const struct file_operations l2cap_debugfs_fops = {
6828 .open = l2cap_debugfs_open,
6830 .llseek = seq_lseek,
6831 .release = single_release,
6834 static struct dentry *l2cap_debugfs;
6836 int __init l2cap_init(void)
6840 err = l2cap_init_sockets();
6845 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6846 NULL, &l2cap_debugfs_fops);
6848 BT_ERR("Failed to create L2CAP debug file");
6854 void l2cap_exit(void)
6856 debugfs_remove(l2cap_debugfs);
6857 l2cap_cleanup_sockets();
6860 module_param(disable_ertm, bool, 0644);
6861 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");