2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
44 #define LE_FLOWCTL_MAX_CREDITS 65535
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS, };
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
54 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
57 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
58 u8 code, u8 ident, u16 dlen, void *data);
59 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
61 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
62 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
64 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
65 struct sk_buff_head *skbs, u8 event);
67 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
69 if (hcon->type == LE_LINK) {
70 if (type == ADDR_LE_DEV_PUBLIC)
71 return BDADDR_LE_PUBLIC;
73 return BDADDR_LE_RANDOM;
79 /* ---- L2CAP channels ---- */
81 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
86 list_for_each_entry(c, &conn->chan_l, list) {
93 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
98 list_for_each_entry(c, &conn->chan_l, list) {
105 /* Find channel with given SCID.
106 * Returns locked channel. */
107 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
110 struct l2cap_chan *c;
112 mutex_lock(&conn->chan_lock);
113 c = __l2cap_get_chan_by_scid(conn, cid);
116 mutex_unlock(&conn->chan_lock);
121 /* Find channel with given DCID.
122 * Returns locked channel.
124 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
127 struct l2cap_chan *c;
129 mutex_lock(&conn->chan_lock);
130 c = __l2cap_get_chan_by_dcid(conn, cid);
133 mutex_unlock(&conn->chan_lock);
138 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
141 struct l2cap_chan *c;
143 list_for_each_entry(c, &conn->chan_l, list) {
144 if (c->ident == ident)
150 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
153 struct l2cap_chan *c;
155 mutex_lock(&conn->chan_lock);
156 c = __l2cap_get_chan_by_ident(conn, ident);
159 mutex_unlock(&conn->chan_lock);
164 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
166 struct l2cap_chan *c;
168 list_for_each_entry(c, &chan_list, global_l) {
169 if (c->sport == psm && !bacmp(&c->src, src))
175 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
179 write_lock(&chan_list_lock);
181 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
194 for (p = 0x1001; p < 0x1100; p += 2)
195 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
196 chan->psm = cpu_to_le16(p);
197 chan->sport = cpu_to_le16(p);
204 write_unlock(&chan_list_lock);
207 EXPORT_SYMBOL_GPL(l2cap_add_psm);
209 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
211 write_lock(&chan_list_lock);
213 /* Override the defaults (which are for conn-oriented) */
214 chan->omtu = L2CAP_DEFAULT_MTU;
215 chan->chan_type = L2CAP_CHAN_FIXED;
219 write_unlock(&chan_list_lock);
224 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
228 if (conn->hcon->type == LE_LINK)
229 dyn_end = L2CAP_CID_LE_DYN_END;
231 dyn_end = L2CAP_CID_DYN_END;
233 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
234 if (!__l2cap_get_chan_by_scid(conn, cid))
241 static void l2cap_state_change(struct l2cap_chan *chan, int state)
243 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
244 state_to_string(state));
247 chan->ops->state_change(chan, state, 0);
250 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
254 chan->ops->state_change(chan, chan->state, err);
257 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
259 chan->ops->state_change(chan, chan->state, err);
262 static void __set_retrans_timer(struct l2cap_chan *chan)
264 if (!delayed_work_pending(&chan->monitor_timer) &&
265 chan->retrans_timeout) {
266 l2cap_set_timer(chan, &chan->retrans_timer,
267 msecs_to_jiffies(chan->retrans_timeout));
271 static void __set_monitor_timer(struct l2cap_chan *chan)
273 __clear_retrans_timer(chan);
274 if (chan->monitor_timeout) {
275 l2cap_set_timer(chan, &chan->monitor_timer,
276 msecs_to_jiffies(chan->monitor_timeout));
280 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
285 skb_queue_walk(head, skb) {
286 if (bt_cb(skb)->control.txseq == seq)
293 /* ---- L2CAP sequence number lists ---- */
295 /* For ERTM, ordered lists of sequence numbers must be tracked for
296 * SREJ requests that are received and for frames that are to be
297 * retransmitted. These seq_list functions implement a singly-linked
298 * list in an array, where membership in the list can also be checked
299 * in constant time. Items can also be added to the tail of the list
300 * and removed from the head in constant time, without further memory
304 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
306 size_t alloc_size, i;
308 /* Allocated size is a power of 2 to map sequence numbers
309 * (which may be up to 14 bits) in to a smaller array that is
310 * sized for the negotiated ERTM transmit windows.
312 alloc_size = roundup_pow_of_two(size);
314 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
318 seq_list->mask = alloc_size - 1;
319 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
320 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
321 for (i = 0; i < alloc_size; i++)
322 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
327 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
329 kfree(seq_list->list);
332 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
335 /* Constant-time check for list membership */
336 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
339 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
341 u16 seq = seq_list->head;
342 u16 mask = seq_list->mask;
344 seq_list->head = seq_list->list[seq & mask];
345 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
347 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
348 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
349 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
355 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
359 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
362 for (i = 0; i <= seq_list->mask; i++)
363 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
365 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
366 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
369 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
371 u16 mask = seq_list->mask;
373 /* All appends happen in constant time */
375 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
378 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
379 seq_list->head = seq;
381 seq_list->list[seq_list->tail & mask] = seq;
383 seq_list->tail = seq;
384 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
387 static void l2cap_chan_timeout(struct work_struct *work)
389 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
391 struct l2cap_conn *conn = chan->conn;
394 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
396 mutex_lock(&conn->chan_lock);
397 l2cap_chan_lock(chan);
399 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
400 reason = ECONNREFUSED;
401 else if (chan->state == BT_CONNECT &&
402 chan->sec_level != BT_SECURITY_SDP)
403 reason = ECONNREFUSED;
407 l2cap_chan_close(chan, reason);
409 l2cap_chan_unlock(chan);
411 chan->ops->close(chan);
412 mutex_unlock(&conn->chan_lock);
414 l2cap_chan_put(chan);
417 struct l2cap_chan *l2cap_chan_create(void)
419 struct l2cap_chan *chan;
421 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
425 mutex_init(&chan->lock);
427 /* Set default lock nesting level */
428 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
430 write_lock(&chan_list_lock);
431 list_add(&chan->global_l, &chan_list);
432 write_unlock(&chan_list_lock);
434 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
436 chan->state = BT_OPEN;
438 kref_init(&chan->kref);
440 /* This flag is cleared in l2cap_chan_ready() */
441 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
443 BT_DBG("chan %p", chan);
447 EXPORT_SYMBOL_GPL(l2cap_chan_create);
449 static void l2cap_chan_destroy(struct kref *kref)
451 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
453 BT_DBG("chan %p", chan);
455 write_lock(&chan_list_lock);
456 list_del(&chan->global_l);
457 write_unlock(&chan_list_lock);
462 void l2cap_chan_hold(struct l2cap_chan *c)
464 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
469 void l2cap_chan_put(struct l2cap_chan *c)
471 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
473 kref_put(&c->kref, l2cap_chan_destroy);
475 EXPORT_SYMBOL_GPL(l2cap_chan_put);
477 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
479 chan->fcs = L2CAP_FCS_CRC16;
480 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
481 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
482 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
483 chan->remote_max_tx = chan->max_tx;
484 chan->remote_tx_win = chan->tx_win;
485 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
486 chan->sec_level = BT_SECURITY_LOW;
487 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
488 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
489 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
490 chan->conf_state = 0;
492 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
494 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
496 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
499 chan->sdu_last_frag = NULL;
501 chan->tx_credits = 0;
502 chan->rx_credits = le_max_credits;
503 chan->mps = min_t(u16, chan->imtu, le_default_mps);
505 skb_queue_head_init(&chan->tx_q);
508 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
510 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
511 __le16_to_cpu(chan->psm), chan->dcid);
513 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
517 switch (chan->chan_type) {
518 case L2CAP_CHAN_CONN_ORIENTED:
519 /* Alloc CID for connection-oriented socket */
520 chan->scid = l2cap_alloc_cid(conn);
521 if (conn->hcon->type == ACL_LINK)
522 chan->omtu = L2CAP_DEFAULT_MTU;
525 case L2CAP_CHAN_CONN_LESS:
526 /* Connectionless socket */
527 chan->scid = L2CAP_CID_CONN_LESS;
528 chan->dcid = L2CAP_CID_CONN_LESS;
529 chan->omtu = L2CAP_DEFAULT_MTU;
532 case L2CAP_CHAN_FIXED:
533 /* Caller will set CID and CID specific MTU values */
537 /* Raw socket can send/recv signalling messages only */
538 chan->scid = L2CAP_CID_SIGNALING;
539 chan->dcid = L2CAP_CID_SIGNALING;
540 chan->omtu = L2CAP_DEFAULT_MTU;
543 chan->local_id = L2CAP_BESTEFFORT_ID;
544 chan->local_stype = L2CAP_SERV_BESTEFFORT;
545 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
546 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
547 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
548 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
550 l2cap_chan_hold(chan);
552 /* Only keep a reference for fixed channels if they requested it */
553 if (chan->chan_type != L2CAP_CHAN_FIXED ||
554 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
555 hci_conn_hold(conn->hcon);
557 list_add(&chan->list, &conn->chan_l);
560 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
562 mutex_lock(&conn->chan_lock);
563 __l2cap_chan_add(conn, chan);
564 mutex_unlock(&conn->chan_lock);
567 void l2cap_chan_del(struct l2cap_chan *chan, int err)
569 struct l2cap_conn *conn = chan->conn;
571 __clear_chan_timer(chan);
573 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
574 state_to_string(chan->state));
576 chan->ops->teardown(chan, err);
579 struct amp_mgr *mgr = conn->hcon->amp_mgr;
580 /* Delete from channel list */
581 list_del(&chan->list);
583 l2cap_chan_put(chan);
587 /* Reference was only held for non-fixed channels or
588 * fixed channels that explicitly requested it using the
589 * FLAG_HOLD_HCI_CONN flag.
591 if (chan->chan_type != L2CAP_CHAN_FIXED ||
592 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
593 hci_conn_drop(conn->hcon);
595 if (mgr && mgr->bredr_chan == chan)
596 mgr->bredr_chan = NULL;
599 if (chan->hs_hchan) {
600 struct hci_chan *hs_hchan = chan->hs_hchan;
602 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
603 amp_disconnect_logical_link(hs_hchan);
606 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
610 case L2CAP_MODE_BASIC:
613 case L2CAP_MODE_LE_FLOWCTL:
614 skb_queue_purge(&chan->tx_q);
617 case L2CAP_MODE_ERTM:
618 __clear_retrans_timer(chan);
619 __clear_monitor_timer(chan);
620 __clear_ack_timer(chan);
622 skb_queue_purge(&chan->srej_q);
624 l2cap_seq_list_free(&chan->srej_list);
625 l2cap_seq_list_free(&chan->retrans_list);
629 case L2CAP_MODE_STREAMING:
630 skb_queue_purge(&chan->tx_q);
636 EXPORT_SYMBOL_GPL(l2cap_chan_del);
638 static void l2cap_conn_update_id_addr(struct work_struct *work)
640 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
641 id_addr_update_work);
642 struct hci_conn *hcon = conn->hcon;
643 struct l2cap_chan *chan;
645 mutex_lock(&conn->chan_lock);
647 list_for_each_entry(chan, &conn->chan_l, list) {
648 l2cap_chan_lock(chan);
649 bacpy(&chan->dst, &hcon->dst);
650 chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
651 l2cap_chan_unlock(chan);
654 mutex_unlock(&conn->chan_lock);
657 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
659 struct l2cap_conn *conn = chan->conn;
660 struct l2cap_le_conn_rsp rsp;
663 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
664 result = L2CAP_CR_AUTHORIZATION;
666 result = L2CAP_CR_BAD_PSM;
668 l2cap_state_change(chan, BT_DISCONN);
670 rsp.dcid = cpu_to_le16(chan->scid);
671 rsp.mtu = cpu_to_le16(chan->imtu);
672 rsp.mps = cpu_to_le16(chan->mps);
673 rsp.credits = cpu_to_le16(chan->rx_credits);
674 rsp.result = cpu_to_le16(result);
676 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
680 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
682 struct l2cap_conn *conn = chan->conn;
683 struct l2cap_conn_rsp rsp;
686 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
687 result = L2CAP_CR_SEC_BLOCK;
689 result = L2CAP_CR_BAD_PSM;
691 l2cap_state_change(chan, BT_DISCONN);
693 rsp.scid = cpu_to_le16(chan->dcid);
694 rsp.dcid = cpu_to_le16(chan->scid);
695 rsp.result = cpu_to_le16(result);
696 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
698 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
701 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
703 struct l2cap_conn *conn = chan->conn;
705 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
707 switch (chan->state) {
709 chan->ops->teardown(chan, 0);
714 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
715 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
716 l2cap_send_disconn_req(chan, reason);
718 l2cap_chan_del(chan, reason);
722 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
723 if (conn->hcon->type == ACL_LINK)
724 l2cap_chan_connect_reject(chan);
725 else if (conn->hcon->type == LE_LINK)
726 l2cap_chan_le_connect_reject(chan);
729 l2cap_chan_del(chan, reason);
734 l2cap_chan_del(chan, reason);
738 chan->ops->teardown(chan, 0);
742 EXPORT_SYMBOL(l2cap_chan_close);
744 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
746 switch (chan->chan_type) {
748 switch (chan->sec_level) {
749 case BT_SECURITY_HIGH:
750 case BT_SECURITY_FIPS:
751 return HCI_AT_DEDICATED_BONDING_MITM;
752 case BT_SECURITY_MEDIUM:
753 return HCI_AT_DEDICATED_BONDING;
755 return HCI_AT_NO_BONDING;
758 case L2CAP_CHAN_CONN_LESS:
759 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
760 if (chan->sec_level == BT_SECURITY_LOW)
761 chan->sec_level = BT_SECURITY_SDP;
763 if (chan->sec_level == BT_SECURITY_HIGH ||
764 chan->sec_level == BT_SECURITY_FIPS)
765 return HCI_AT_NO_BONDING_MITM;
767 return HCI_AT_NO_BONDING;
769 case L2CAP_CHAN_CONN_ORIENTED:
770 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
771 if (chan->sec_level == BT_SECURITY_LOW)
772 chan->sec_level = BT_SECURITY_SDP;
774 if (chan->sec_level == BT_SECURITY_HIGH ||
775 chan->sec_level == BT_SECURITY_FIPS)
776 return HCI_AT_NO_BONDING_MITM;
778 return HCI_AT_NO_BONDING;
782 switch (chan->sec_level) {
783 case BT_SECURITY_HIGH:
784 case BT_SECURITY_FIPS:
785 return HCI_AT_GENERAL_BONDING_MITM;
786 case BT_SECURITY_MEDIUM:
787 return HCI_AT_GENERAL_BONDING;
789 return HCI_AT_NO_BONDING;
795 /* Service level security */
796 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
798 struct l2cap_conn *conn = chan->conn;
801 if (conn->hcon->type == LE_LINK)
802 return smp_conn_security(conn->hcon, chan->sec_level);
804 auth_type = l2cap_get_auth_type(chan);
806 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
810 static u8 l2cap_get_ident(struct l2cap_conn *conn)
814 /* Get next available identificator.
815 * 1 - 128 are used by kernel.
816 * 129 - 199 are reserved.
817 * 200 - 254 are used by utilities like l2ping, etc.
820 mutex_lock(&conn->ident_lock);
822 if (++conn->tx_ident > 128)
827 mutex_unlock(&conn->ident_lock);
832 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
835 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
838 BT_DBG("code 0x%2.2x", code);
843 /* Use NO_FLUSH if supported or we have an LE link (which does
844 * not support auto-flushing packets) */
845 if (lmp_no_flush_capable(conn->hcon->hdev) ||
846 conn->hcon->type == LE_LINK)
847 flags = ACL_START_NO_FLUSH;
851 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
852 skb->priority = HCI_PRIO_MAX;
854 hci_send_acl(conn->hchan, skb, flags);
857 static bool __chan_is_moving(struct l2cap_chan *chan)
859 return chan->move_state != L2CAP_MOVE_STABLE &&
860 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
863 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
865 struct hci_conn *hcon = chan->conn->hcon;
868 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
871 if (chan->hs_hcon && !__chan_is_moving(chan)) {
873 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
880 /* Use NO_FLUSH for LE links (where this is the only option) or
881 * if the BR/EDR link supports it and flushing has not been
882 * explicitly requested (through FLAG_FLUSHABLE).
884 if (hcon->type == LE_LINK ||
885 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
886 lmp_no_flush_capable(hcon->hdev)))
887 flags = ACL_START_NO_FLUSH;
891 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
892 hci_send_acl(chan->conn->hchan, skb, flags);
895 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
897 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
898 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
900 if (enh & L2CAP_CTRL_FRAME_TYPE) {
903 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
904 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
911 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
912 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
919 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
921 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
922 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
924 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
927 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
928 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
935 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
936 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
943 static inline void __unpack_control(struct l2cap_chan *chan,
946 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
947 __unpack_extended_control(get_unaligned_le32(skb->data),
948 &bt_cb(skb)->control);
949 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
951 __unpack_enhanced_control(get_unaligned_le16(skb->data),
952 &bt_cb(skb)->control);
953 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
957 static u32 __pack_extended_control(struct l2cap_ctrl *control)
961 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
962 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
964 if (control->sframe) {
965 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
966 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
967 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
969 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
970 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
976 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
980 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
981 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
983 if (control->sframe) {
984 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
985 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
986 packed |= L2CAP_CTRL_FRAME_TYPE;
988 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
989 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
995 static inline void __pack_control(struct l2cap_chan *chan,
996 struct l2cap_ctrl *control,
999 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1000 put_unaligned_le32(__pack_extended_control(control),
1001 skb->data + L2CAP_HDR_SIZE);
1003 put_unaligned_le16(__pack_enhanced_control(control),
1004 skb->data + L2CAP_HDR_SIZE);
1008 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1010 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1011 return L2CAP_EXT_HDR_SIZE;
1013 return L2CAP_ENH_HDR_SIZE;
1016 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1019 struct sk_buff *skb;
1020 struct l2cap_hdr *lh;
1021 int hlen = __ertm_hdr_size(chan);
1023 if (chan->fcs == L2CAP_FCS_CRC16)
1024 hlen += L2CAP_FCS_SIZE;
1026 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1029 return ERR_PTR(-ENOMEM);
1031 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1032 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1033 lh->cid = cpu_to_le16(chan->dcid);
1035 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1036 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1038 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1040 if (chan->fcs == L2CAP_FCS_CRC16) {
1041 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1042 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1045 skb->priority = HCI_PRIO_MAX;
1049 static void l2cap_send_sframe(struct l2cap_chan *chan,
1050 struct l2cap_ctrl *control)
1052 struct sk_buff *skb;
1055 BT_DBG("chan %p, control %p", chan, control);
1057 if (!control->sframe)
1060 if (__chan_is_moving(chan))
1063 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1067 if (control->super == L2CAP_SUPER_RR)
1068 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1069 else if (control->super == L2CAP_SUPER_RNR)
1070 set_bit(CONN_RNR_SENT, &chan->conn_state);
1072 if (control->super != L2CAP_SUPER_SREJ) {
1073 chan->last_acked_seq = control->reqseq;
1074 __clear_ack_timer(chan);
1077 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1078 control->final, control->poll, control->super);
1080 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1081 control_field = __pack_extended_control(control);
1083 control_field = __pack_enhanced_control(control);
1085 skb = l2cap_create_sframe_pdu(chan, control_field);
1087 l2cap_do_send(chan, skb);
1090 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1092 struct l2cap_ctrl control;
1094 BT_DBG("chan %p, poll %d", chan, poll);
1096 memset(&control, 0, sizeof(control));
1098 control.poll = poll;
1100 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1101 control.super = L2CAP_SUPER_RNR;
1103 control.super = L2CAP_SUPER_RR;
1105 control.reqseq = chan->buffer_seq;
1106 l2cap_send_sframe(chan, &control);
1109 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1111 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1114 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1117 static bool __amp_capable(struct l2cap_chan *chan)
1119 struct l2cap_conn *conn = chan->conn;
1120 struct hci_dev *hdev;
1121 bool amp_available = false;
1123 if (!conn->hs_enabled)
1126 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1129 read_lock(&hci_dev_list_lock);
1130 list_for_each_entry(hdev, &hci_dev_list, list) {
1131 if (hdev->amp_type != AMP_TYPE_BREDR &&
1132 test_bit(HCI_UP, &hdev->flags)) {
1133 amp_available = true;
1137 read_unlock(&hci_dev_list_lock);
1139 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1140 return amp_available;
1145 static bool l2cap_check_efs(struct l2cap_chan *chan)
1147 /* Check EFS parameters */
1151 void l2cap_send_conn_req(struct l2cap_chan *chan)
1153 struct l2cap_conn *conn = chan->conn;
1154 struct l2cap_conn_req req;
1156 req.scid = cpu_to_le16(chan->scid);
1157 req.psm = chan->psm;
1159 chan->ident = l2cap_get_ident(conn);
1161 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1163 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1166 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1168 struct l2cap_create_chan_req req;
1169 req.scid = cpu_to_le16(chan->scid);
1170 req.psm = chan->psm;
1171 req.amp_id = amp_id;
1173 chan->ident = l2cap_get_ident(chan->conn);
1175 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1179 static void l2cap_move_setup(struct l2cap_chan *chan)
1181 struct sk_buff *skb;
1183 BT_DBG("chan %p", chan);
1185 if (chan->mode != L2CAP_MODE_ERTM)
1188 __clear_retrans_timer(chan);
1189 __clear_monitor_timer(chan);
1190 __clear_ack_timer(chan);
1192 chan->retry_count = 0;
1193 skb_queue_walk(&chan->tx_q, skb) {
1194 if (bt_cb(skb)->control.retries)
1195 bt_cb(skb)->control.retries = 1;
1200 chan->expected_tx_seq = chan->buffer_seq;
1202 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1203 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1204 l2cap_seq_list_clear(&chan->retrans_list);
1205 l2cap_seq_list_clear(&chan->srej_list);
1206 skb_queue_purge(&chan->srej_q);
1208 chan->tx_state = L2CAP_TX_STATE_XMIT;
1209 chan->rx_state = L2CAP_RX_STATE_MOVE;
1211 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1214 static void l2cap_move_done(struct l2cap_chan *chan)
1216 u8 move_role = chan->move_role;
1217 BT_DBG("chan %p", chan);
1219 chan->move_state = L2CAP_MOVE_STABLE;
1220 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1222 if (chan->mode != L2CAP_MODE_ERTM)
1225 switch (move_role) {
1226 case L2CAP_MOVE_ROLE_INITIATOR:
1227 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1228 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1230 case L2CAP_MOVE_ROLE_RESPONDER:
1231 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1236 static void l2cap_chan_ready(struct l2cap_chan *chan)
1238 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1239 chan->conf_state = 0;
1240 __clear_chan_timer(chan);
1242 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1243 chan->ops->suspend(chan);
1245 chan->state = BT_CONNECTED;
1247 chan->ops->ready(chan);
1250 static void l2cap_le_connect(struct l2cap_chan *chan)
1252 struct l2cap_conn *conn = chan->conn;
1253 struct l2cap_le_conn_req req;
1255 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1258 req.psm = chan->psm;
1259 req.scid = cpu_to_le16(chan->scid);
1260 req.mtu = cpu_to_le16(chan->imtu);
1261 req.mps = cpu_to_le16(chan->mps);
1262 req.credits = cpu_to_le16(chan->rx_credits);
1264 chan->ident = l2cap_get_ident(conn);
1266 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1270 static void l2cap_le_start(struct l2cap_chan *chan)
1272 struct l2cap_conn *conn = chan->conn;
1274 if (!smp_conn_security(conn->hcon, chan->sec_level))
1278 l2cap_chan_ready(chan);
1282 if (chan->state == BT_CONNECT)
1283 l2cap_le_connect(chan);
1286 static void l2cap_start_connection(struct l2cap_chan *chan)
1288 if (__amp_capable(chan)) {
1289 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1290 a2mp_discover_amp(chan);
1291 } else if (chan->conn->hcon->type == LE_LINK) {
1292 l2cap_le_start(chan);
1294 l2cap_send_conn_req(chan);
1298 static void l2cap_request_info(struct l2cap_conn *conn)
1300 struct l2cap_info_req req;
1302 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1305 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1307 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1308 conn->info_ident = l2cap_get_ident(conn);
1310 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1312 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1316 static void l2cap_do_start(struct l2cap_chan *chan)
1318 struct l2cap_conn *conn = chan->conn;
1320 if (conn->hcon->type == LE_LINK) {
1321 l2cap_le_start(chan);
1325 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1326 l2cap_request_info(conn);
1330 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1333 if (l2cap_chan_check_security(chan, true) &&
1334 __l2cap_no_conn_pending(chan))
1335 l2cap_start_connection(chan);
1338 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1340 u32 local_feat_mask = l2cap_feat_mask;
1342 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1345 case L2CAP_MODE_ERTM:
1346 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1347 case L2CAP_MODE_STREAMING:
1348 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1354 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1356 struct l2cap_conn *conn = chan->conn;
1357 struct l2cap_disconn_req req;
1362 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1363 __clear_retrans_timer(chan);
1364 __clear_monitor_timer(chan);
1365 __clear_ack_timer(chan);
1368 if (chan->scid == L2CAP_CID_A2MP) {
1369 l2cap_state_change(chan, BT_DISCONN);
1373 req.dcid = cpu_to_le16(chan->dcid);
1374 req.scid = cpu_to_le16(chan->scid);
1375 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1378 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1381 /* ---- L2CAP connections ---- */
1382 static void l2cap_conn_start(struct l2cap_conn *conn)
1384 struct l2cap_chan *chan, *tmp;
1386 BT_DBG("conn %p", conn);
1388 mutex_lock(&conn->chan_lock);
1390 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1391 l2cap_chan_lock(chan);
1393 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1394 l2cap_chan_ready(chan);
1395 l2cap_chan_unlock(chan);
1399 if (chan->state == BT_CONNECT) {
1400 if (!l2cap_chan_check_security(chan, true) ||
1401 !__l2cap_no_conn_pending(chan)) {
1402 l2cap_chan_unlock(chan);
1406 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1407 && test_bit(CONF_STATE2_DEVICE,
1408 &chan->conf_state)) {
1409 l2cap_chan_close(chan, ECONNRESET);
1410 l2cap_chan_unlock(chan);
1414 l2cap_start_connection(chan);
1416 } else if (chan->state == BT_CONNECT2) {
1417 struct l2cap_conn_rsp rsp;
1419 rsp.scid = cpu_to_le16(chan->dcid);
1420 rsp.dcid = cpu_to_le16(chan->scid);
1422 if (l2cap_chan_check_security(chan, false)) {
1423 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1424 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1425 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1426 chan->ops->defer(chan);
1429 l2cap_state_change(chan, BT_CONFIG);
1430 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1431 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1434 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1435 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1438 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1441 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1442 rsp.result != L2CAP_CR_SUCCESS) {
1443 l2cap_chan_unlock(chan);
1447 set_bit(CONF_REQ_SENT, &chan->conf_state);
1448 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1449 l2cap_build_conf_req(chan, buf), buf);
1450 chan->num_conf_req++;
1453 l2cap_chan_unlock(chan);
1456 mutex_unlock(&conn->chan_lock);
1459 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1461 struct hci_conn *hcon = conn->hcon;
1462 struct hci_dev *hdev = hcon->hdev;
1464 BT_DBG("%s conn %p", hdev->name, conn);
1466 /* For outgoing pairing which doesn't necessarily have an
1467 * associated socket (e.g. mgmt_pair_device).
1470 smp_conn_security(hcon, hcon->pending_sec_level);
1472 /* For LE slave connections, make sure the connection interval
1473 * is in the range of the minium and maximum interval that has
1474 * been configured for this connection. If not, then trigger
1475 * the connection update procedure.
1477 if (hcon->role == HCI_ROLE_SLAVE &&
1478 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1479 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1480 struct l2cap_conn_param_update_req req;
1482 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1483 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1484 req.latency = cpu_to_le16(hcon->le_conn_latency);
1485 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1487 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1488 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1492 static void l2cap_conn_ready(struct l2cap_conn *conn)
1494 struct l2cap_chan *chan;
1495 struct hci_conn *hcon = conn->hcon;
1497 BT_DBG("conn %p", conn);
1499 if (hcon->type == ACL_LINK)
1500 l2cap_request_info(conn);
1502 mutex_lock(&conn->chan_lock);
1504 list_for_each_entry(chan, &conn->chan_l, list) {
1506 l2cap_chan_lock(chan);
1508 if (chan->scid == L2CAP_CID_A2MP) {
1509 l2cap_chan_unlock(chan);
1513 if (hcon->type == LE_LINK) {
1514 l2cap_le_start(chan);
1515 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1516 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1517 l2cap_chan_ready(chan);
1518 } else if (chan->state == BT_CONNECT) {
1519 l2cap_do_start(chan);
1522 l2cap_chan_unlock(chan);
1525 mutex_unlock(&conn->chan_lock);
1527 if (hcon->type == LE_LINK)
1528 l2cap_le_conn_ready(conn);
1530 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1533 /* Notify sockets that we cannot guaranty reliability anymore */
1534 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1536 struct l2cap_chan *chan;
1538 BT_DBG("conn %p", conn);
1540 mutex_lock(&conn->chan_lock);
1542 list_for_each_entry(chan, &conn->chan_l, list) {
1543 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1544 l2cap_chan_set_err(chan, err);
1547 mutex_unlock(&conn->chan_lock);
1550 static void l2cap_info_timeout(struct work_struct *work)
1552 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1555 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1556 conn->info_ident = 0;
1558 l2cap_conn_start(conn);
1563 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1564 * callback is called during registration. The ->remove callback is called
1565 * during unregistration.
1566 * An l2cap_user object can either be explicitly unregistered or when the
1567 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1568 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1569 * External modules must own a reference to the l2cap_conn object if they intend
1570 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1571 * any time if they don't.
1574 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1576 struct hci_dev *hdev = conn->hcon->hdev;
1579 /* We need to check whether l2cap_conn is registered. If it is not, we
1580 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1581 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1582 * relies on the parent hci_conn object to be locked. This itself relies
1583 * on the hci_dev object to be locked. So we must lock the hci device
1588 if (user->list.next || user->list.prev) {
1593 /* conn->hchan is NULL after l2cap_conn_del() was called */
1599 ret = user->probe(conn, user);
1603 list_add(&user->list, &conn->users);
1607 hci_dev_unlock(hdev);
1610 EXPORT_SYMBOL(l2cap_register_user);
1612 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1614 struct hci_dev *hdev = conn->hcon->hdev;
1618 if (!user->list.next || !user->list.prev)
1621 list_del(&user->list);
1622 user->list.next = NULL;
1623 user->list.prev = NULL;
1624 user->remove(conn, user);
1627 hci_dev_unlock(hdev);
1629 EXPORT_SYMBOL(l2cap_unregister_user);
1631 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1633 struct l2cap_user *user;
1635 while (!list_empty(&conn->users)) {
1636 user = list_first_entry(&conn->users, struct l2cap_user, list);
1637 list_del(&user->list);
1638 user->list.next = NULL;
1639 user->list.prev = NULL;
1640 user->remove(conn, user);
1644 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1646 struct l2cap_conn *conn = hcon->l2cap_data;
1647 struct l2cap_chan *chan, *l;
1652 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1654 kfree_skb(conn->rx_skb);
1656 skb_queue_purge(&conn->pending_rx);
1658 /* We can not call flush_work(&conn->pending_rx_work) here since we
1659 * might block if we are running on a worker from the same workqueue
1660 * pending_rx_work is waiting on.
1662 if (work_pending(&conn->pending_rx_work))
1663 cancel_work_sync(&conn->pending_rx_work);
1665 if (work_pending(&conn->id_addr_update_work))
1666 cancel_work_sync(&conn->id_addr_update_work);
1668 l2cap_unregister_all_users(conn);
1670 /* Force the connection to be immediately dropped */
1671 hcon->disc_timeout = 0;
1673 mutex_lock(&conn->chan_lock);
1676 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1677 l2cap_chan_hold(chan);
1678 l2cap_chan_lock(chan);
1680 l2cap_chan_del(chan, err);
1682 l2cap_chan_unlock(chan);
1684 chan->ops->close(chan);
1685 l2cap_chan_put(chan);
1688 mutex_unlock(&conn->chan_lock);
1690 hci_chan_del(conn->hchan);
1692 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1693 cancel_delayed_work_sync(&conn->info_timer);
1695 hcon->l2cap_data = NULL;
1697 l2cap_conn_put(conn);
1700 static void l2cap_conn_free(struct kref *ref)
1702 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1704 hci_conn_put(conn->hcon);
1708 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1710 kref_get(&conn->ref);
1713 EXPORT_SYMBOL(l2cap_conn_get);
1715 void l2cap_conn_put(struct l2cap_conn *conn)
1717 kref_put(&conn->ref, l2cap_conn_free);
1719 EXPORT_SYMBOL(l2cap_conn_put);
1721 /* ---- Socket interface ---- */
1723 /* Find socket with psm and source / destination bdaddr.
1724 * Returns closest match.
1726 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1731 struct l2cap_chan *c, *c1 = NULL;
1733 read_lock(&chan_list_lock);
1735 list_for_each_entry(c, &chan_list, global_l) {
1736 if (state && c->state != state)
1739 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1742 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1745 if (c->psm == psm) {
1746 int src_match, dst_match;
1747 int src_any, dst_any;
1750 src_match = !bacmp(&c->src, src);
1751 dst_match = !bacmp(&c->dst, dst);
1752 if (src_match && dst_match) {
1754 read_unlock(&chan_list_lock);
1759 src_any = !bacmp(&c->src, BDADDR_ANY);
1760 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1761 if ((src_match && dst_any) || (src_any && dst_match) ||
1762 (src_any && dst_any))
1768 l2cap_chan_hold(c1);
1770 read_unlock(&chan_list_lock);
1775 static void l2cap_monitor_timeout(struct work_struct *work)
1777 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1778 monitor_timer.work);
1780 BT_DBG("chan %p", chan);
1782 l2cap_chan_lock(chan);
1785 l2cap_chan_unlock(chan);
1786 l2cap_chan_put(chan);
1790 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1792 l2cap_chan_unlock(chan);
1793 l2cap_chan_put(chan);
1796 static void l2cap_retrans_timeout(struct work_struct *work)
1798 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1799 retrans_timer.work);
1801 BT_DBG("chan %p", chan);
1803 l2cap_chan_lock(chan);
1806 l2cap_chan_unlock(chan);
1807 l2cap_chan_put(chan);
1811 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1812 l2cap_chan_unlock(chan);
1813 l2cap_chan_put(chan);
1816 static void l2cap_streaming_send(struct l2cap_chan *chan,
1817 struct sk_buff_head *skbs)
1819 struct sk_buff *skb;
1820 struct l2cap_ctrl *control;
1822 BT_DBG("chan %p, skbs %p", chan, skbs);
1824 if (__chan_is_moving(chan))
1827 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1829 while (!skb_queue_empty(&chan->tx_q)) {
1831 skb = skb_dequeue(&chan->tx_q);
1833 bt_cb(skb)->control.retries = 1;
1834 control = &bt_cb(skb)->control;
1836 control->reqseq = 0;
1837 control->txseq = chan->next_tx_seq;
1839 __pack_control(chan, control, skb);
1841 if (chan->fcs == L2CAP_FCS_CRC16) {
1842 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1843 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1846 l2cap_do_send(chan, skb);
1848 BT_DBG("Sent txseq %u", control->txseq);
1850 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1851 chan->frames_sent++;
1855 static int l2cap_ertm_send(struct l2cap_chan *chan)
1857 struct sk_buff *skb, *tx_skb;
1858 struct l2cap_ctrl *control;
1861 BT_DBG("chan %p", chan);
1863 if (chan->state != BT_CONNECTED)
1866 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1869 if (__chan_is_moving(chan))
1872 while (chan->tx_send_head &&
1873 chan->unacked_frames < chan->remote_tx_win &&
1874 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1876 skb = chan->tx_send_head;
1878 bt_cb(skb)->control.retries = 1;
1879 control = &bt_cb(skb)->control;
1881 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1884 control->reqseq = chan->buffer_seq;
1885 chan->last_acked_seq = chan->buffer_seq;
1886 control->txseq = chan->next_tx_seq;
1888 __pack_control(chan, control, skb);
1890 if (chan->fcs == L2CAP_FCS_CRC16) {
1891 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1892 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1895 /* Clone after data has been modified. Data is assumed to be
1896 read-only (for locking purposes) on cloned sk_buffs.
1898 tx_skb = skb_clone(skb, GFP_KERNEL);
1903 __set_retrans_timer(chan);
1905 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1906 chan->unacked_frames++;
1907 chan->frames_sent++;
1910 if (skb_queue_is_last(&chan->tx_q, skb))
1911 chan->tx_send_head = NULL;
1913 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1915 l2cap_do_send(chan, tx_skb);
1916 BT_DBG("Sent txseq %u", control->txseq);
1919 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1920 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1925 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1927 struct l2cap_ctrl control;
1928 struct sk_buff *skb;
1929 struct sk_buff *tx_skb;
1932 BT_DBG("chan %p", chan);
1934 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1937 if (__chan_is_moving(chan))
1940 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1941 seq = l2cap_seq_list_pop(&chan->retrans_list);
1943 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1945 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1950 bt_cb(skb)->control.retries++;
1951 control = bt_cb(skb)->control;
1953 if (chan->max_tx != 0 &&
1954 bt_cb(skb)->control.retries > chan->max_tx) {
1955 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1956 l2cap_send_disconn_req(chan, ECONNRESET);
1957 l2cap_seq_list_clear(&chan->retrans_list);
1961 control.reqseq = chan->buffer_seq;
1962 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1967 if (skb_cloned(skb)) {
1968 /* Cloned sk_buffs are read-only, so we need a
1971 tx_skb = skb_copy(skb, GFP_KERNEL);
1973 tx_skb = skb_clone(skb, GFP_KERNEL);
1977 l2cap_seq_list_clear(&chan->retrans_list);
1981 /* Update skb contents */
1982 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1983 put_unaligned_le32(__pack_extended_control(&control),
1984 tx_skb->data + L2CAP_HDR_SIZE);
1986 put_unaligned_le16(__pack_enhanced_control(&control),
1987 tx_skb->data + L2CAP_HDR_SIZE);
1991 if (chan->fcs == L2CAP_FCS_CRC16) {
1992 u16 fcs = crc16(0, (u8 *) tx_skb->data,
1993 tx_skb->len - L2CAP_FCS_SIZE);
1994 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
1998 l2cap_do_send(chan, tx_skb);
2000 BT_DBG("Resent txseq %d", control.txseq);
2002 chan->last_acked_seq = chan->buffer_seq;
2006 static void l2cap_retransmit(struct l2cap_chan *chan,
2007 struct l2cap_ctrl *control)
2009 BT_DBG("chan %p, control %p", chan, control);
2011 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2012 l2cap_ertm_resend(chan);
2015 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2016 struct l2cap_ctrl *control)
2018 struct sk_buff *skb;
2020 BT_DBG("chan %p, control %p", chan, control);
2023 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2025 l2cap_seq_list_clear(&chan->retrans_list);
2027 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2030 if (chan->unacked_frames) {
2031 skb_queue_walk(&chan->tx_q, skb) {
2032 if (bt_cb(skb)->control.txseq == control->reqseq ||
2033 skb == chan->tx_send_head)
2037 skb_queue_walk_from(&chan->tx_q, skb) {
2038 if (skb == chan->tx_send_head)
2041 l2cap_seq_list_append(&chan->retrans_list,
2042 bt_cb(skb)->control.txseq);
2045 l2cap_ertm_resend(chan);
2049 static void l2cap_send_ack(struct l2cap_chan *chan)
2051 struct l2cap_ctrl control;
2052 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2053 chan->last_acked_seq);
2056 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2057 chan, chan->last_acked_seq, chan->buffer_seq);
2059 memset(&control, 0, sizeof(control));
2062 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2063 chan->rx_state == L2CAP_RX_STATE_RECV) {
2064 __clear_ack_timer(chan);
2065 control.super = L2CAP_SUPER_RNR;
2066 control.reqseq = chan->buffer_seq;
2067 l2cap_send_sframe(chan, &control);
2069 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2070 l2cap_ertm_send(chan);
2071 /* If any i-frames were sent, they included an ack */
2072 if (chan->buffer_seq == chan->last_acked_seq)
2076 /* Ack now if the window is 3/4ths full.
2077 * Calculate without mul or div
2079 threshold = chan->ack_win;
2080 threshold += threshold << 1;
2083 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2086 if (frames_to_ack >= threshold) {
2087 __clear_ack_timer(chan);
2088 control.super = L2CAP_SUPER_RR;
2089 control.reqseq = chan->buffer_seq;
2090 l2cap_send_sframe(chan, &control);
2095 __set_ack_timer(chan);
2099 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2100 struct msghdr *msg, int len,
2101 int count, struct sk_buff *skb)
2103 struct l2cap_conn *conn = chan->conn;
2104 struct sk_buff **frag;
2107 if (chan->ops->memcpy_fromiovec(chan, skb_put(skb, count),
2108 msg->msg_iov, count))
2114 /* Continuation fragments (no L2CAP header) */
2115 frag = &skb_shinfo(skb)->frag_list;
2117 struct sk_buff *tmp;
2119 count = min_t(unsigned int, conn->mtu, len);
2121 tmp = chan->ops->alloc_skb(chan, 0, count,
2122 msg->msg_flags & MSG_DONTWAIT);
2124 return PTR_ERR(tmp);
2128 if (chan->ops->memcpy_fromiovec(chan, skb_put(*frag, count),
2129 msg->msg_iov, count))
2135 skb->len += (*frag)->len;
2136 skb->data_len += (*frag)->len;
2138 frag = &(*frag)->next;
2144 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2145 struct msghdr *msg, size_t len)
2147 struct l2cap_conn *conn = chan->conn;
2148 struct sk_buff *skb;
2149 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2150 struct l2cap_hdr *lh;
2152 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2153 __le16_to_cpu(chan->psm), len);
2155 count = min_t(unsigned int, (conn->mtu - hlen), len);
2157 skb = chan->ops->alloc_skb(chan, hlen, count,
2158 msg->msg_flags & MSG_DONTWAIT);
2162 /* Create L2CAP header */
2163 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2164 lh->cid = cpu_to_le16(chan->dcid);
2165 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2166 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2168 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2169 if (unlikely(err < 0)) {
2171 return ERR_PTR(err);
2176 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2177 struct msghdr *msg, size_t len)
2179 struct l2cap_conn *conn = chan->conn;
2180 struct sk_buff *skb;
2182 struct l2cap_hdr *lh;
2184 BT_DBG("chan %p len %zu", chan, len);
2186 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2188 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2189 msg->msg_flags & MSG_DONTWAIT);
2193 /* Create L2CAP header */
2194 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2195 lh->cid = cpu_to_le16(chan->dcid);
2196 lh->len = cpu_to_le16(len);
2198 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2199 if (unlikely(err < 0)) {
2201 return ERR_PTR(err);
2206 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2207 struct msghdr *msg, size_t len,
2210 struct l2cap_conn *conn = chan->conn;
2211 struct sk_buff *skb;
2212 int err, count, hlen;
2213 struct l2cap_hdr *lh;
2215 BT_DBG("chan %p len %zu", chan, len);
2218 return ERR_PTR(-ENOTCONN);
2220 hlen = __ertm_hdr_size(chan);
2223 hlen += L2CAP_SDULEN_SIZE;
2225 if (chan->fcs == L2CAP_FCS_CRC16)
2226 hlen += L2CAP_FCS_SIZE;
2228 count = min_t(unsigned int, (conn->mtu - hlen), len);
2230 skb = chan->ops->alloc_skb(chan, hlen, count,
2231 msg->msg_flags & MSG_DONTWAIT);
2235 /* Create L2CAP header */
2236 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2237 lh->cid = cpu_to_le16(chan->dcid);
2238 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2240 /* Control header is populated later */
2241 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2242 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2244 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2247 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2249 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2250 if (unlikely(err < 0)) {
2252 return ERR_PTR(err);
2255 bt_cb(skb)->control.fcs = chan->fcs;
2256 bt_cb(skb)->control.retries = 0;
2260 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2261 struct sk_buff_head *seg_queue,
2262 struct msghdr *msg, size_t len)
2264 struct sk_buff *skb;
2269 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2271 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2272 * so fragmented skbs are not used. The HCI layer's handling
2273 * of fragmented skbs is not compatible with ERTM's queueing.
2276 /* PDU size is derived from the HCI MTU */
2277 pdu_len = chan->conn->mtu;
2279 /* Constrain PDU size for BR/EDR connections */
2281 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2283 /* Adjust for largest possible L2CAP overhead. */
2285 pdu_len -= L2CAP_FCS_SIZE;
2287 pdu_len -= __ertm_hdr_size(chan);
2289 /* Remote device may have requested smaller PDUs */
2290 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2292 if (len <= pdu_len) {
2293 sar = L2CAP_SAR_UNSEGMENTED;
2297 sar = L2CAP_SAR_START;
2302 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2305 __skb_queue_purge(seg_queue);
2306 return PTR_ERR(skb);
2309 bt_cb(skb)->control.sar = sar;
2310 __skb_queue_tail(seg_queue, skb);
2316 if (len <= pdu_len) {
2317 sar = L2CAP_SAR_END;
2320 sar = L2CAP_SAR_CONTINUE;
2327 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2329 size_t len, u16 sdulen)
2331 struct l2cap_conn *conn = chan->conn;
2332 struct sk_buff *skb;
2333 int err, count, hlen;
2334 struct l2cap_hdr *lh;
2336 BT_DBG("chan %p len %zu", chan, len);
2339 return ERR_PTR(-ENOTCONN);
2341 hlen = L2CAP_HDR_SIZE;
2344 hlen += L2CAP_SDULEN_SIZE;
2346 count = min_t(unsigned int, (conn->mtu - hlen), len);
2348 skb = chan->ops->alloc_skb(chan, hlen, count,
2349 msg->msg_flags & MSG_DONTWAIT);
2353 /* Create L2CAP header */
2354 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2355 lh->cid = cpu_to_le16(chan->dcid);
2356 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2359 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2361 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2362 if (unlikely(err < 0)) {
2364 return ERR_PTR(err);
2370 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2371 struct sk_buff_head *seg_queue,
2372 struct msghdr *msg, size_t len)
2374 struct sk_buff *skb;
2378 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2381 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2387 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2389 __skb_queue_purge(seg_queue);
2390 return PTR_ERR(skb);
2393 __skb_queue_tail(seg_queue, skb);
2399 pdu_len += L2CAP_SDULEN_SIZE;
2406 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2408 struct sk_buff *skb;
2410 struct sk_buff_head seg_queue;
2415 /* Connectionless channel */
2416 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2417 skb = l2cap_create_connless_pdu(chan, msg, len);
2419 return PTR_ERR(skb);
2421 /* Channel lock is released before requesting new skb and then
2422 * reacquired thus we need to recheck channel state.
2424 if (chan->state != BT_CONNECTED) {
2429 l2cap_do_send(chan, skb);
2433 switch (chan->mode) {
2434 case L2CAP_MODE_LE_FLOWCTL:
2435 /* Check outgoing MTU */
2436 if (len > chan->omtu)
2439 if (!chan->tx_credits)
2442 __skb_queue_head_init(&seg_queue);
2444 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2446 if (chan->state != BT_CONNECTED) {
2447 __skb_queue_purge(&seg_queue);
2454 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2456 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2457 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2461 if (!chan->tx_credits)
2462 chan->ops->suspend(chan);
2468 case L2CAP_MODE_BASIC:
2469 /* Check outgoing MTU */
2470 if (len > chan->omtu)
2473 /* Create a basic PDU */
2474 skb = l2cap_create_basic_pdu(chan, msg, len);
2476 return PTR_ERR(skb);
2478 /* Channel lock is released before requesting new skb and then
2479 * reacquired thus we need to recheck channel state.
2481 if (chan->state != BT_CONNECTED) {
2486 l2cap_do_send(chan, skb);
2490 case L2CAP_MODE_ERTM:
2491 case L2CAP_MODE_STREAMING:
2492 /* Check outgoing MTU */
2493 if (len > chan->omtu) {
2498 __skb_queue_head_init(&seg_queue);
2500 /* Do segmentation before calling in to the state machine,
2501 * since it's possible to block while waiting for memory
2504 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2506 /* The channel could have been closed while segmenting,
2507 * check that it is still connected.
2509 if (chan->state != BT_CONNECTED) {
2510 __skb_queue_purge(&seg_queue);
2517 if (chan->mode == L2CAP_MODE_ERTM)
2518 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2520 l2cap_streaming_send(chan, &seg_queue);
2524 /* If the skbs were not queued for sending, they'll still be in
2525 * seg_queue and need to be purged.
2527 __skb_queue_purge(&seg_queue);
2531 BT_DBG("bad state %1.1x", chan->mode);
2537 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2539 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2541 struct l2cap_ctrl control;
2544 BT_DBG("chan %p, txseq %u", chan, txseq);
2546 memset(&control, 0, sizeof(control));
2548 control.super = L2CAP_SUPER_SREJ;
2550 for (seq = chan->expected_tx_seq; seq != txseq;
2551 seq = __next_seq(chan, seq)) {
2552 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2553 control.reqseq = seq;
2554 l2cap_send_sframe(chan, &control);
2555 l2cap_seq_list_append(&chan->srej_list, seq);
2559 chan->expected_tx_seq = __next_seq(chan, txseq);
2562 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2564 struct l2cap_ctrl control;
2566 BT_DBG("chan %p", chan);
2568 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2571 memset(&control, 0, sizeof(control));
2573 control.super = L2CAP_SUPER_SREJ;
2574 control.reqseq = chan->srej_list.tail;
2575 l2cap_send_sframe(chan, &control);
2578 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2580 struct l2cap_ctrl control;
2584 BT_DBG("chan %p, txseq %u", chan, txseq);
2586 memset(&control, 0, sizeof(control));
2588 control.super = L2CAP_SUPER_SREJ;
2590 /* Capture initial list head to allow only one pass through the list. */
2591 initial_head = chan->srej_list.head;
2594 seq = l2cap_seq_list_pop(&chan->srej_list);
2595 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2598 control.reqseq = seq;
2599 l2cap_send_sframe(chan, &control);
2600 l2cap_seq_list_append(&chan->srej_list, seq);
2601 } while (chan->srej_list.head != initial_head);
2604 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2606 struct sk_buff *acked_skb;
2609 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2611 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2614 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2615 chan->expected_ack_seq, chan->unacked_frames);
2617 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2618 ackseq = __next_seq(chan, ackseq)) {
2620 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2622 skb_unlink(acked_skb, &chan->tx_q);
2623 kfree_skb(acked_skb);
2624 chan->unacked_frames--;
2628 chan->expected_ack_seq = reqseq;
2630 if (chan->unacked_frames == 0)
2631 __clear_retrans_timer(chan);
2633 BT_DBG("unacked_frames %u", chan->unacked_frames);
2636 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2638 BT_DBG("chan %p", chan);
2640 chan->expected_tx_seq = chan->buffer_seq;
2641 l2cap_seq_list_clear(&chan->srej_list);
2642 skb_queue_purge(&chan->srej_q);
2643 chan->rx_state = L2CAP_RX_STATE_RECV;
2646 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2647 struct l2cap_ctrl *control,
2648 struct sk_buff_head *skbs, u8 event)
2650 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2654 case L2CAP_EV_DATA_REQUEST:
2655 if (chan->tx_send_head == NULL)
2656 chan->tx_send_head = skb_peek(skbs);
2658 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2659 l2cap_ertm_send(chan);
2661 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2662 BT_DBG("Enter LOCAL_BUSY");
2663 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2665 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2666 /* The SREJ_SENT state must be aborted if we are to
2667 * enter the LOCAL_BUSY state.
2669 l2cap_abort_rx_srej_sent(chan);
2672 l2cap_send_ack(chan);
2675 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2676 BT_DBG("Exit LOCAL_BUSY");
2677 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2679 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2680 struct l2cap_ctrl local_control;
2682 memset(&local_control, 0, sizeof(local_control));
2683 local_control.sframe = 1;
2684 local_control.super = L2CAP_SUPER_RR;
2685 local_control.poll = 1;
2686 local_control.reqseq = chan->buffer_seq;
2687 l2cap_send_sframe(chan, &local_control);
2689 chan->retry_count = 1;
2690 __set_monitor_timer(chan);
2691 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2694 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2695 l2cap_process_reqseq(chan, control->reqseq);
2697 case L2CAP_EV_EXPLICIT_POLL:
2698 l2cap_send_rr_or_rnr(chan, 1);
2699 chan->retry_count = 1;
2700 __set_monitor_timer(chan);
2701 __clear_ack_timer(chan);
2702 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2704 case L2CAP_EV_RETRANS_TO:
2705 l2cap_send_rr_or_rnr(chan, 1);
2706 chan->retry_count = 1;
2707 __set_monitor_timer(chan);
2708 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2710 case L2CAP_EV_RECV_FBIT:
2711 /* Nothing to process */
2718 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2719 struct l2cap_ctrl *control,
2720 struct sk_buff_head *skbs, u8 event)
2722 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2726 case L2CAP_EV_DATA_REQUEST:
2727 if (chan->tx_send_head == NULL)
2728 chan->tx_send_head = skb_peek(skbs);
2729 /* Queue data, but don't send. */
2730 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2732 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2733 BT_DBG("Enter LOCAL_BUSY");
2734 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2736 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2737 /* The SREJ_SENT state must be aborted if we are to
2738 * enter the LOCAL_BUSY state.
2740 l2cap_abort_rx_srej_sent(chan);
2743 l2cap_send_ack(chan);
2746 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2747 BT_DBG("Exit LOCAL_BUSY");
2748 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2750 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2751 struct l2cap_ctrl local_control;
2752 memset(&local_control, 0, sizeof(local_control));
2753 local_control.sframe = 1;
2754 local_control.super = L2CAP_SUPER_RR;
2755 local_control.poll = 1;
2756 local_control.reqseq = chan->buffer_seq;
2757 l2cap_send_sframe(chan, &local_control);
2759 chan->retry_count = 1;
2760 __set_monitor_timer(chan);
2761 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2764 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2765 l2cap_process_reqseq(chan, control->reqseq);
2769 case L2CAP_EV_RECV_FBIT:
2770 if (control && control->final) {
2771 __clear_monitor_timer(chan);
2772 if (chan->unacked_frames > 0)
2773 __set_retrans_timer(chan);
2774 chan->retry_count = 0;
2775 chan->tx_state = L2CAP_TX_STATE_XMIT;
2776 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2779 case L2CAP_EV_EXPLICIT_POLL:
2782 case L2CAP_EV_MONITOR_TO:
2783 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2784 l2cap_send_rr_or_rnr(chan, 1);
2785 __set_monitor_timer(chan);
2786 chan->retry_count++;
2788 l2cap_send_disconn_req(chan, ECONNABORTED);
2796 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2797 struct sk_buff_head *skbs, u8 event)
2799 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2800 chan, control, skbs, event, chan->tx_state);
2802 switch (chan->tx_state) {
2803 case L2CAP_TX_STATE_XMIT:
2804 l2cap_tx_state_xmit(chan, control, skbs, event);
2806 case L2CAP_TX_STATE_WAIT_F:
2807 l2cap_tx_state_wait_f(chan, control, skbs, event);
2815 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2816 struct l2cap_ctrl *control)
2818 BT_DBG("chan %p, control %p", chan, control);
2819 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2822 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2823 struct l2cap_ctrl *control)
2825 BT_DBG("chan %p, control %p", chan, control);
2826 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2829 /* Copy frame to all raw sockets on that connection */
2830 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2832 struct sk_buff *nskb;
2833 struct l2cap_chan *chan;
2835 BT_DBG("conn %p", conn);
2837 mutex_lock(&conn->chan_lock);
2839 list_for_each_entry(chan, &conn->chan_l, list) {
2840 if (chan->chan_type != L2CAP_CHAN_RAW)
2843 /* Don't send frame to the channel it came from */
2844 if (bt_cb(skb)->chan == chan)
2847 nskb = skb_clone(skb, GFP_KERNEL);
2850 if (chan->ops->recv(chan, nskb))
2854 mutex_unlock(&conn->chan_lock);
2857 /* ---- L2CAP signalling commands ---- */
2858 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2859 u8 ident, u16 dlen, void *data)
2861 struct sk_buff *skb, **frag;
2862 struct l2cap_cmd_hdr *cmd;
2863 struct l2cap_hdr *lh;
2866 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2867 conn, code, ident, dlen);
2869 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2872 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2873 count = min_t(unsigned int, conn->mtu, len);
2875 skb = bt_skb_alloc(count, GFP_KERNEL);
2879 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2880 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2882 if (conn->hcon->type == LE_LINK)
2883 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2885 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2887 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2890 cmd->len = cpu_to_le16(dlen);
2893 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2894 memcpy(skb_put(skb, count), data, count);
2900 /* Continuation fragments (no L2CAP header) */
2901 frag = &skb_shinfo(skb)->frag_list;
2903 count = min_t(unsigned int, conn->mtu, len);
2905 *frag = bt_skb_alloc(count, GFP_KERNEL);
2909 memcpy(skb_put(*frag, count), data, count);
2914 frag = &(*frag)->next;
2924 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2927 struct l2cap_conf_opt *opt = *ptr;
2930 len = L2CAP_CONF_OPT_SIZE + opt->len;
2938 *val = *((u8 *) opt->val);
2942 *val = get_unaligned_le16(opt->val);
2946 *val = get_unaligned_le32(opt->val);
2950 *val = (unsigned long) opt->val;
2954 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2958 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2960 struct l2cap_conf_opt *opt = *ptr;
2962 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2969 *((u8 *) opt->val) = val;
2973 put_unaligned_le16(val, opt->val);
2977 put_unaligned_le32(val, opt->val);
2981 memcpy(opt->val, (void *) val, len);
2985 *ptr += L2CAP_CONF_OPT_SIZE + len;
2988 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2990 struct l2cap_conf_efs efs;
2992 switch (chan->mode) {
2993 case L2CAP_MODE_ERTM:
2994 efs.id = chan->local_id;
2995 efs.stype = chan->local_stype;
2996 efs.msdu = cpu_to_le16(chan->local_msdu);
2997 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2998 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2999 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3002 case L2CAP_MODE_STREAMING:
3004 efs.stype = L2CAP_SERV_BESTEFFORT;
3005 efs.msdu = cpu_to_le16(chan->local_msdu);
3006 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3015 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3016 (unsigned long) &efs);
3019 static void l2cap_ack_timeout(struct work_struct *work)
3021 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3025 BT_DBG("chan %p", chan);
3027 l2cap_chan_lock(chan);
3029 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3030 chan->last_acked_seq);
3033 l2cap_send_rr_or_rnr(chan, 0);
3035 l2cap_chan_unlock(chan);
3036 l2cap_chan_put(chan);
3039 int l2cap_ertm_init(struct l2cap_chan *chan)
3043 chan->next_tx_seq = 0;
3044 chan->expected_tx_seq = 0;
3045 chan->expected_ack_seq = 0;
3046 chan->unacked_frames = 0;
3047 chan->buffer_seq = 0;
3048 chan->frames_sent = 0;
3049 chan->last_acked_seq = 0;
3051 chan->sdu_last_frag = NULL;
3054 skb_queue_head_init(&chan->tx_q);
3056 chan->local_amp_id = AMP_ID_BREDR;
3057 chan->move_id = AMP_ID_BREDR;
3058 chan->move_state = L2CAP_MOVE_STABLE;
3059 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3061 if (chan->mode != L2CAP_MODE_ERTM)
3064 chan->rx_state = L2CAP_RX_STATE_RECV;
3065 chan->tx_state = L2CAP_TX_STATE_XMIT;
3067 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3068 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3069 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3071 skb_queue_head_init(&chan->srej_q);
3073 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3077 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3079 l2cap_seq_list_free(&chan->srej_list);
3084 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3087 case L2CAP_MODE_STREAMING:
3088 case L2CAP_MODE_ERTM:
3089 if (l2cap_mode_supported(mode, remote_feat_mask))
3093 return L2CAP_MODE_BASIC;
3097 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3099 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3102 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3104 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3107 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3108 struct l2cap_conf_rfc *rfc)
3110 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3111 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3113 /* Class 1 devices have must have ERTM timeouts
3114 * exceeding the Link Supervision Timeout. The
3115 * default Link Supervision Timeout for AMP
3116 * controllers is 10 seconds.
3118 * Class 1 devices use 0xffffffff for their
3119 * best-effort flush timeout, so the clamping logic
3120 * will result in a timeout that meets the above
3121 * requirement. ERTM timeouts are 16-bit values, so
3122 * the maximum timeout is 65.535 seconds.
3125 /* Convert timeout to milliseconds and round */
3126 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3128 /* This is the recommended formula for class 2 devices
3129 * that start ERTM timers when packets are sent to the
3132 ertm_to = 3 * ertm_to + 500;
3134 if (ertm_to > 0xffff)
3137 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3138 rfc->monitor_timeout = rfc->retrans_timeout;
3140 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3141 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3145 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3147 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3148 __l2cap_ews_supported(chan->conn)) {
3149 /* use extended control field */
3150 set_bit(FLAG_EXT_CTRL, &chan->flags);
3151 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3153 chan->tx_win = min_t(u16, chan->tx_win,
3154 L2CAP_DEFAULT_TX_WINDOW);
3155 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3157 chan->ack_win = chan->tx_win;
3160 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3162 struct l2cap_conf_req *req = data;
3163 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3164 void *ptr = req->data;
3167 BT_DBG("chan %p", chan);
3169 if (chan->num_conf_req || chan->num_conf_rsp)
3172 switch (chan->mode) {
3173 case L2CAP_MODE_STREAMING:
3174 case L2CAP_MODE_ERTM:
3175 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3178 if (__l2cap_efs_supported(chan->conn))
3179 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3183 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3188 if (chan->imtu != L2CAP_DEFAULT_MTU)
3189 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3191 switch (chan->mode) {
3192 case L2CAP_MODE_BASIC:
3196 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3197 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3200 rfc.mode = L2CAP_MODE_BASIC;
3202 rfc.max_transmit = 0;
3203 rfc.retrans_timeout = 0;
3204 rfc.monitor_timeout = 0;
3205 rfc.max_pdu_size = 0;
3207 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3208 (unsigned long) &rfc);
3211 case L2CAP_MODE_ERTM:
3212 rfc.mode = L2CAP_MODE_ERTM;
3213 rfc.max_transmit = chan->max_tx;
3215 __l2cap_set_ertm_timeouts(chan, &rfc);
3217 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3218 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3220 rfc.max_pdu_size = cpu_to_le16(size);
3222 l2cap_txwin_setup(chan);
3224 rfc.txwin_size = min_t(u16, chan->tx_win,
3225 L2CAP_DEFAULT_TX_WINDOW);
3227 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3228 (unsigned long) &rfc);
3230 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3231 l2cap_add_opt_efs(&ptr, chan);
3233 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3234 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3237 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3238 if (chan->fcs == L2CAP_FCS_NONE ||
3239 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3240 chan->fcs = L2CAP_FCS_NONE;
3241 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3246 case L2CAP_MODE_STREAMING:
3247 l2cap_txwin_setup(chan);
3248 rfc.mode = L2CAP_MODE_STREAMING;
3250 rfc.max_transmit = 0;
3251 rfc.retrans_timeout = 0;
3252 rfc.monitor_timeout = 0;
3254 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3255 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3257 rfc.max_pdu_size = cpu_to_le16(size);
3259 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3260 (unsigned long) &rfc);
3262 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3263 l2cap_add_opt_efs(&ptr, chan);
3265 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3266 if (chan->fcs == L2CAP_FCS_NONE ||
3267 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3268 chan->fcs = L2CAP_FCS_NONE;
3269 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3275 req->dcid = cpu_to_le16(chan->dcid);
3276 req->flags = cpu_to_le16(0);
3281 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3283 struct l2cap_conf_rsp *rsp = data;
3284 void *ptr = rsp->data;
3285 void *req = chan->conf_req;
3286 int len = chan->conf_len;
3287 int type, hint, olen;
3289 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3290 struct l2cap_conf_efs efs;
3292 u16 mtu = L2CAP_DEFAULT_MTU;
3293 u16 result = L2CAP_CONF_SUCCESS;
3296 BT_DBG("chan %p", chan);
3298 while (len >= L2CAP_CONF_OPT_SIZE) {
3299 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3301 hint = type & L2CAP_CONF_HINT;
3302 type &= L2CAP_CONF_MASK;
3305 case L2CAP_CONF_MTU:
3309 case L2CAP_CONF_FLUSH_TO:
3310 chan->flush_to = val;
3313 case L2CAP_CONF_QOS:
3316 case L2CAP_CONF_RFC:
3317 if (olen == sizeof(rfc))
3318 memcpy(&rfc, (void *) val, olen);
3321 case L2CAP_CONF_FCS:
3322 if (val == L2CAP_FCS_NONE)
3323 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3326 case L2CAP_CONF_EFS:
3328 if (olen == sizeof(efs))
3329 memcpy(&efs, (void *) val, olen);
3332 case L2CAP_CONF_EWS:
3333 if (!chan->conn->hs_enabled)
3334 return -ECONNREFUSED;
3336 set_bit(FLAG_EXT_CTRL, &chan->flags);
3337 set_bit(CONF_EWS_RECV, &chan->conf_state);
3338 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3339 chan->remote_tx_win = val;
3346 result = L2CAP_CONF_UNKNOWN;
3347 *((u8 *) ptr++) = type;
3352 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3355 switch (chan->mode) {
3356 case L2CAP_MODE_STREAMING:
3357 case L2CAP_MODE_ERTM:
3358 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3359 chan->mode = l2cap_select_mode(rfc.mode,
3360 chan->conn->feat_mask);
3365 if (__l2cap_efs_supported(chan->conn))
3366 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3368 return -ECONNREFUSED;
3371 if (chan->mode != rfc.mode)
3372 return -ECONNREFUSED;
3378 if (chan->mode != rfc.mode) {
3379 result = L2CAP_CONF_UNACCEPT;
3380 rfc.mode = chan->mode;
3382 if (chan->num_conf_rsp == 1)
3383 return -ECONNREFUSED;
3385 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3386 (unsigned long) &rfc);
3389 if (result == L2CAP_CONF_SUCCESS) {
3390 /* Configure output options and let the other side know
3391 * which ones we don't like. */
3393 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3394 result = L2CAP_CONF_UNACCEPT;
3397 set_bit(CONF_MTU_DONE, &chan->conf_state);
3399 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3402 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3403 efs.stype != L2CAP_SERV_NOTRAFIC &&
3404 efs.stype != chan->local_stype) {
3406 result = L2CAP_CONF_UNACCEPT;
3408 if (chan->num_conf_req >= 1)
3409 return -ECONNREFUSED;
3411 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3413 (unsigned long) &efs);
3415 /* Send PENDING Conf Rsp */
3416 result = L2CAP_CONF_PENDING;
3417 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3422 case L2CAP_MODE_BASIC:
3423 chan->fcs = L2CAP_FCS_NONE;
3424 set_bit(CONF_MODE_DONE, &chan->conf_state);
3427 case L2CAP_MODE_ERTM:
3428 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3429 chan->remote_tx_win = rfc.txwin_size;
3431 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3433 chan->remote_max_tx = rfc.max_transmit;
3435 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3436 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3437 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3438 rfc.max_pdu_size = cpu_to_le16(size);
3439 chan->remote_mps = size;
3441 __l2cap_set_ertm_timeouts(chan, &rfc);
3443 set_bit(CONF_MODE_DONE, &chan->conf_state);
3445 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3446 sizeof(rfc), (unsigned long) &rfc);
3448 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3449 chan->remote_id = efs.id;
3450 chan->remote_stype = efs.stype;
3451 chan->remote_msdu = le16_to_cpu(efs.msdu);
3452 chan->remote_flush_to =
3453 le32_to_cpu(efs.flush_to);
3454 chan->remote_acc_lat =
3455 le32_to_cpu(efs.acc_lat);
3456 chan->remote_sdu_itime =
3457 le32_to_cpu(efs.sdu_itime);
3458 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3460 (unsigned long) &efs);
3464 case L2CAP_MODE_STREAMING:
3465 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3466 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3467 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3468 rfc.max_pdu_size = cpu_to_le16(size);
3469 chan->remote_mps = size;
3471 set_bit(CONF_MODE_DONE, &chan->conf_state);
3473 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3474 (unsigned long) &rfc);
3479 result = L2CAP_CONF_UNACCEPT;
3481 memset(&rfc, 0, sizeof(rfc));
3482 rfc.mode = chan->mode;
3485 if (result == L2CAP_CONF_SUCCESS)
3486 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3488 rsp->scid = cpu_to_le16(chan->dcid);
3489 rsp->result = cpu_to_le16(result);
3490 rsp->flags = cpu_to_le16(0);
3495 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3496 void *data, u16 *result)
3498 struct l2cap_conf_req *req = data;
3499 void *ptr = req->data;
3502 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3503 struct l2cap_conf_efs efs;
3505 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3507 while (len >= L2CAP_CONF_OPT_SIZE) {
3508 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3511 case L2CAP_CONF_MTU:
3512 if (val < L2CAP_DEFAULT_MIN_MTU) {
3513 *result = L2CAP_CONF_UNACCEPT;
3514 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3517 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3520 case L2CAP_CONF_FLUSH_TO:
3521 chan->flush_to = val;
3522 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3526 case L2CAP_CONF_RFC:
3527 if (olen == sizeof(rfc))
3528 memcpy(&rfc, (void *)val, olen);
3530 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3531 rfc.mode != chan->mode)
3532 return -ECONNREFUSED;
3536 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3537 sizeof(rfc), (unsigned long) &rfc);
3540 case L2CAP_CONF_EWS:
3541 chan->ack_win = min_t(u16, val, chan->ack_win);
3542 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3546 case L2CAP_CONF_EFS:
3547 if (olen == sizeof(efs))
3548 memcpy(&efs, (void *)val, olen);
3550 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3551 efs.stype != L2CAP_SERV_NOTRAFIC &&
3552 efs.stype != chan->local_stype)
3553 return -ECONNREFUSED;
3555 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3556 (unsigned long) &efs);
3559 case L2CAP_CONF_FCS:
3560 if (*result == L2CAP_CONF_PENDING)
3561 if (val == L2CAP_FCS_NONE)
3562 set_bit(CONF_RECV_NO_FCS,
3568 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3569 return -ECONNREFUSED;
3571 chan->mode = rfc.mode;
3573 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3575 case L2CAP_MODE_ERTM:
3576 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3577 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3578 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3579 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3580 chan->ack_win = min_t(u16, chan->ack_win,
3583 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3584 chan->local_msdu = le16_to_cpu(efs.msdu);
3585 chan->local_sdu_itime =
3586 le32_to_cpu(efs.sdu_itime);
3587 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3588 chan->local_flush_to =
3589 le32_to_cpu(efs.flush_to);
3593 case L2CAP_MODE_STREAMING:
3594 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3598 req->dcid = cpu_to_le16(chan->dcid);
3599 req->flags = cpu_to_le16(0);
3604 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3605 u16 result, u16 flags)
3607 struct l2cap_conf_rsp *rsp = data;
3608 void *ptr = rsp->data;
3610 BT_DBG("chan %p", chan);
3612 rsp->scid = cpu_to_le16(chan->dcid);
3613 rsp->result = cpu_to_le16(result);
3614 rsp->flags = cpu_to_le16(flags);
3619 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3621 struct l2cap_le_conn_rsp rsp;
3622 struct l2cap_conn *conn = chan->conn;
3624 BT_DBG("chan %p", chan);
3626 rsp.dcid = cpu_to_le16(chan->scid);
3627 rsp.mtu = cpu_to_le16(chan->imtu);
3628 rsp.mps = cpu_to_le16(chan->mps);
3629 rsp.credits = cpu_to_le16(chan->rx_credits);
3630 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3632 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3636 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3638 struct l2cap_conn_rsp rsp;
3639 struct l2cap_conn *conn = chan->conn;
3643 rsp.scid = cpu_to_le16(chan->dcid);
3644 rsp.dcid = cpu_to_le16(chan->scid);
3645 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3646 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3649 rsp_code = L2CAP_CREATE_CHAN_RSP;
3651 rsp_code = L2CAP_CONN_RSP;
3653 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3655 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3657 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3660 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3661 l2cap_build_conf_req(chan, buf), buf);
3662 chan->num_conf_req++;
3665 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3669 /* Use sane default values in case a misbehaving remote device
3670 * did not send an RFC or extended window size option.
3672 u16 txwin_ext = chan->ack_win;
3673 struct l2cap_conf_rfc rfc = {
3675 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3676 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3677 .max_pdu_size = cpu_to_le16(chan->imtu),
3678 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3681 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3683 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3686 while (len >= L2CAP_CONF_OPT_SIZE) {
3687 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3690 case L2CAP_CONF_RFC:
3691 if (olen == sizeof(rfc))
3692 memcpy(&rfc, (void *)val, olen);
3694 case L2CAP_CONF_EWS:
3701 case L2CAP_MODE_ERTM:
3702 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3703 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3704 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3705 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3706 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3708 chan->ack_win = min_t(u16, chan->ack_win,
3711 case L2CAP_MODE_STREAMING:
3712 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3716 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3717 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3720 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3722 if (cmd_len < sizeof(*rej))
3725 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3728 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3729 cmd->ident == conn->info_ident) {
3730 cancel_delayed_work(&conn->info_timer);
3732 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3733 conn->info_ident = 0;
3735 l2cap_conn_start(conn);
3741 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3742 struct l2cap_cmd_hdr *cmd,
3743 u8 *data, u8 rsp_code, u8 amp_id)
3745 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3746 struct l2cap_conn_rsp rsp;
3747 struct l2cap_chan *chan = NULL, *pchan;
3748 int result, status = L2CAP_CS_NO_INFO;
3750 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3751 __le16 psm = req->psm;
3753 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3755 /* Check if we have socket listening on psm */
3756 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3757 &conn->hcon->dst, ACL_LINK);
3759 result = L2CAP_CR_BAD_PSM;
3763 mutex_lock(&conn->chan_lock);
3764 l2cap_chan_lock(pchan);
3766 /* Check if the ACL is secure enough (if not SDP) */
3767 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3768 !hci_conn_check_link_mode(conn->hcon)) {
3769 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3770 result = L2CAP_CR_SEC_BLOCK;
3774 result = L2CAP_CR_NO_MEM;
3776 /* Check if we already have channel with that dcid */
3777 if (__l2cap_get_chan_by_dcid(conn, scid))
3780 chan = pchan->ops->new_connection(pchan);
3784 /* For certain devices (ex: HID mouse), support for authentication,
3785 * pairing and bonding is optional. For such devices, inorder to avoid
3786 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3787 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3789 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3791 bacpy(&chan->src, &conn->hcon->src);
3792 bacpy(&chan->dst, &conn->hcon->dst);
3793 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3794 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3797 chan->local_amp_id = amp_id;
3799 __l2cap_chan_add(conn, chan);
3803 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3805 chan->ident = cmd->ident;
3807 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3808 if (l2cap_chan_check_security(chan, false)) {
3809 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3810 l2cap_state_change(chan, BT_CONNECT2);
3811 result = L2CAP_CR_PEND;
3812 status = L2CAP_CS_AUTHOR_PEND;
3813 chan->ops->defer(chan);
3815 /* Force pending result for AMP controllers.
3816 * The connection will succeed after the
3817 * physical link is up.
3819 if (amp_id == AMP_ID_BREDR) {
3820 l2cap_state_change(chan, BT_CONFIG);
3821 result = L2CAP_CR_SUCCESS;
3823 l2cap_state_change(chan, BT_CONNECT2);
3824 result = L2CAP_CR_PEND;
3826 status = L2CAP_CS_NO_INFO;
3829 l2cap_state_change(chan, BT_CONNECT2);
3830 result = L2CAP_CR_PEND;
3831 status = L2CAP_CS_AUTHEN_PEND;
3834 l2cap_state_change(chan, BT_CONNECT2);
3835 result = L2CAP_CR_PEND;
3836 status = L2CAP_CS_NO_INFO;
3840 l2cap_chan_unlock(pchan);
3841 mutex_unlock(&conn->chan_lock);
3842 l2cap_chan_put(pchan);
3845 rsp.scid = cpu_to_le16(scid);
3846 rsp.dcid = cpu_to_le16(dcid);
3847 rsp.result = cpu_to_le16(result);
3848 rsp.status = cpu_to_le16(status);
3849 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3851 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3852 struct l2cap_info_req info;
3853 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3855 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3856 conn->info_ident = l2cap_get_ident(conn);
3858 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3860 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3861 sizeof(info), &info);
3864 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3865 result == L2CAP_CR_SUCCESS) {
3867 set_bit(CONF_REQ_SENT, &chan->conf_state);
3868 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3869 l2cap_build_conf_req(chan, buf), buf);
3870 chan->num_conf_req++;
3876 static int l2cap_connect_req(struct l2cap_conn *conn,
3877 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3879 struct hci_dev *hdev = conn->hcon->hdev;
3880 struct hci_conn *hcon = conn->hcon;
3882 if (cmd_len < sizeof(struct l2cap_conn_req))
3886 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3887 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3888 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3889 hci_dev_unlock(hdev);
3891 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3895 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3896 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3899 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3900 u16 scid, dcid, result, status;
3901 struct l2cap_chan *chan;
3905 if (cmd_len < sizeof(*rsp))
3908 scid = __le16_to_cpu(rsp->scid);
3909 dcid = __le16_to_cpu(rsp->dcid);
3910 result = __le16_to_cpu(rsp->result);
3911 status = __le16_to_cpu(rsp->status);
3913 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3914 dcid, scid, result, status);
3916 mutex_lock(&conn->chan_lock);
3919 chan = __l2cap_get_chan_by_scid(conn, scid);
3925 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3934 l2cap_chan_lock(chan);
3937 case L2CAP_CR_SUCCESS:
3938 l2cap_state_change(chan, BT_CONFIG);
3941 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3943 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3946 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3947 l2cap_build_conf_req(chan, req), req);
3948 chan->num_conf_req++;
3952 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3956 l2cap_chan_del(chan, ECONNREFUSED);
3960 l2cap_chan_unlock(chan);
3963 mutex_unlock(&conn->chan_lock);
3968 static inline void set_default_fcs(struct l2cap_chan *chan)
3970 /* FCS is enabled only in ERTM or streaming mode, if one or both
3973 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3974 chan->fcs = L2CAP_FCS_NONE;
3975 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3976 chan->fcs = L2CAP_FCS_CRC16;
3979 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3980 u8 ident, u16 flags)
3982 struct l2cap_conn *conn = chan->conn;
3984 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3987 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3988 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3990 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3991 l2cap_build_conf_rsp(chan, data,
3992 L2CAP_CONF_SUCCESS, flags), data);
3995 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
3998 struct l2cap_cmd_rej_cid rej;
4000 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4001 rej.scid = __cpu_to_le16(scid);
4002 rej.dcid = __cpu_to_le16(dcid);
4004 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4007 static inline int l2cap_config_req(struct l2cap_conn *conn,
4008 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4011 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4014 struct l2cap_chan *chan;
4017 if (cmd_len < sizeof(*req))
4020 dcid = __le16_to_cpu(req->dcid);
4021 flags = __le16_to_cpu(req->flags);
4023 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4025 chan = l2cap_get_chan_by_scid(conn, dcid);
4027 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4031 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4032 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4037 /* Reject if config buffer is too small. */
4038 len = cmd_len - sizeof(*req);
4039 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4040 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4041 l2cap_build_conf_rsp(chan, rsp,
4042 L2CAP_CONF_REJECT, flags), rsp);
4047 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4048 chan->conf_len += len;
4050 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4051 /* Incomplete config. Send empty response. */
4052 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4053 l2cap_build_conf_rsp(chan, rsp,
4054 L2CAP_CONF_SUCCESS, flags), rsp);
4058 /* Complete config. */
4059 len = l2cap_parse_conf_req(chan, rsp);
4061 l2cap_send_disconn_req(chan, ECONNRESET);
4065 chan->ident = cmd->ident;
4066 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4067 chan->num_conf_rsp++;
4069 /* Reset config buffer. */
4072 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4075 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4076 set_default_fcs(chan);
4078 if (chan->mode == L2CAP_MODE_ERTM ||
4079 chan->mode == L2CAP_MODE_STREAMING)
4080 err = l2cap_ertm_init(chan);
4083 l2cap_send_disconn_req(chan, -err);
4085 l2cap_chan_ready(chan);
4090 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4092 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4093 l2cap_build_conf_req(chan, buf), buf);
4094 chan->num_conf_req++;
4097 /* Got Conf Rsp PENDING from remote side and assume we sent
4098 Conf Rsp PENDING in the code above */
4099 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4100 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4102 /* check compatibility */
4104 /* Send rsp for BR/EDR channel */
4106 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4108 chan->ident = cmd->ident;
4112 l2cap_chan_unlock(chan);
4116 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4117 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4120 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4121 u16 scid, flags, result;
4122 struct l2cap_chan *chan;
4123 int len = cmd_len - sizeof(*rsp);
4126 if (cmd_len < sizeof(*rsp))
4129 scid = __le16_to_cpu(rsp->scid);
4130 flags = __le16_to_cpu(rsp->flags);
4131 result = __le16_to_cpu(rsp->result);
4133 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4136 chan = l2cap_get_chan_by_scid(conn, scid);
4141 case L2CAP_CONF_SUCCESS:
4142 l2cap_conf_rfc_get(chan, rsp->data, len);
4143 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4146 case L2CAP_CONF_PENDING:
4147 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4149 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4152 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4155 l2cap_send_disconn_req(chan, ECONNRESET);
4159 if (!chan->hs_hcon) {
4160 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4163 if (l2cap_check_efs(chan)) {
4164 amp_create_logical_link(chan);
4165 chan->ident = cmd->ident;
4171 case L2CAP_CONF_UNACCEPT:
4172 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4175 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4176 l2cap_send_disconn_req(chan, ECONNRESET);
4180 /* throw out any old stored conf requests */
4181 result = L2CAP_CONF_SUCCESS;
4182 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4185 l2cap_send_disconn_req(chan, ECONNRESET);
4189 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4190 L2CAP_CONF_REQ, len, req);
4191 chan->num_conf_req++;
4192 if (result != L2CAP_CONF_SUCCESS)
4198 l2cap_chan_set_err(chan, ECONNRESET);
4200 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4201 l2cap_send_disconn_req(chan, ECONNRESET);
4205 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4208 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4210 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4211 set_default_fcs(chan);
4213 if (chan->mode == L2CAP_MODE_ERTM ||
4214 chan->mode == L2CAP_MODE_STREAMING)
4215 err = l2cap_ertm_init(chan);
4218 l2cap_send_disconn_req(chan, -err);
4220 l2cap_chan_ready(chan);
4224 l2cap_chan_unlock(chan);
4228 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4229 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4232 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4233 struct l2cap_disconn_rsp rsp;
4235 struct l2cap_chan *chan;
4237 if (cmd_len != sizeof(*req))
4240 scid = __le16_to_cpu(req->scid);
4241 dcid = __le16_to_cpu(req->dcid);
4243 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4245 mutex_lock(&conn->chan_lock);
4247 chan = __l2cap_get_chan_by_scid(conn, dcid);
4249 mutex_unlock(&conn->chan_lock);
4250 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4254 l2cap_chan_lock(chan);
4256 rsp.dcid = cpu_to_le16(chan->scid);
4257 rsp.scid = cpu_to_le16(chan->dcid);
4258 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4260 chan->ops->set_shutdown(chan);
4262 l2cap_chan_hold(chan);
4263 l2cap_chan_del(chan, ECONNRESET);
4265 l2cap_chan_unlock(chan);
4267 chan->ops->close(chan);
4268 l2cap_chan_put(chan);
4270 mutex_unlock(&conn->chan_lock);
4275 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4276 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4279 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4281 struct l2cap_chan *chan;
4283 if (cmd_len != sizeof(*rsp))
4286 scid = __le16_to_cpu(rsp->scid);
4287 dcid = __le16_to_cpu(rsp->dcid);
4289 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4291 mutex_lock(&conn->chan_lock);
4293 chan = __l2cap_get_chan_by_scid(conn, scid);
4295 mutex_unlock(&conn->chan_lock);
4299 l2cap_chan_lock(chan);
4301 l2cap_chan_hold(chan);
4302 l2cap_chan_del(chan, 0);
4304 l2cap_chan_unlock(chan);
4306 chan->ops->close(chan);
4307 l2cap_chan_put(chan);
4309 mutex_unlock(&conn->chan_lock);
4314 static inline int l2cap_information_req(struct l2cap_conn *conn,
4315 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4318 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4321 if (cmd_len != sizeof(*req))
4324 type = __le16_to_cpu(req->type);
4326 BT_DBG("type 0x%4.4x", type);
4328 if (type == L2CAP_IT_FEAT_MASK) {
4330 u32 feat_mask = l2cap_feat_mask;
4331 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4332 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4333 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4335 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4337 if (conn->hs_enabled)
4338 feat_mask |= L2CAP_FEAT_EXT_FLOW
4339 | L2CAP_FEAT_EXT_WINDOW;
4341 put_unaligned_le32(feat_mask, rsp->data);
4342 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4344 } else if (type == L2CAP_IT_FIXED_CHAN) {
4346 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4348 if (conn->hs_enabled)
4349 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4351 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4353 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4354 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4355 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4356 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4359 struct l2cap_info_rsp rsp;
4360 rsp.type = cpu_to_le16(type);
4361 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4362 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4369 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4370 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4373 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4376 if (cmd_len < sizeof(*rsp))
4379 type = __le16_to_cpu(rsp->type);
4380 result = __le16_to_cpu(rsp->result);
4382 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4384 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4385 if (cmd->ident != conn->info_ident ||
4386 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4389 cancel_delayed_work(&conn->info_timer);
4391 if (result != L2CAP_IR_SUCCESS) {
4392 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4393 conn->info_ident = 0;
4395 l2cap_conn_start(conn);
4401 case L2CAP_IT_FEAT_MASK:
4402 conn->feat_mask = get_unaligned_le32(rsp->data);
4404 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4405 struct l2cap_info_req req;
4406 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4408 conn->info_ident = l2cap_get_ident(conn);
4410 l2cap_send_cmd(conn, conn->info_ident,
4411 L2CAP_INFO_REQ, sizeof(req), &req);
4413 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4414 conn->info_ident = 0;
4416 l2cap_conn_start(conn);
4420 case L2CAP_IT_FIXED_CHAN:
4421 conn->fixed_chan_mask = rsp->data[0];
4422 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4423 conn->info_ident = 0;
4425 l2cap_conn_start(conn);
4432 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4433 struct l2cap_cmd_hdr *cmd,
4434 u16 cmd_len, void *data)
4436 struct l2cap_create_chan_req *req = data;
4437 struct l2cap_create_chan_rsp rsp;
4438 struct l2cap_chan *chan;
4439 struct hci_dev *hdev;
4442 if (cmd_len != sizeof(*req))
4445 if (!conn->hs_enabled)
4448 psm = le16_to_cpu(req->psm);
4449 scid = le16_to_cpu(req->scid);
4451 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4453 /* For controller id 0 make BR/EDR connection */
4454 if (req->amp_id == AMP_ID_BREDR) {
4455 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4460 /* Validate AMP controller id */
4461 hdev = hci_dev_get(req->amp_id);
4465 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4470 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4473 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4474 struct hci_conn *hs_hcon;
4476 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4480 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4485 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4487 mgr->bredr_chan = chan;
4488 chan->hs_hcon = hs_hcon;
4489 chan->fcs = L2CAP_FCS_NONE;
4490 conn->mtu = hdev->block_mtu;
4499 rsp.scid = cpu_to_le16(scid);
4500 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4501 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4503 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4509 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4511 struct l2cap_move_chan_req req;
4514 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4516 ident = l2cap_get_ident(chan->conn);
4517 chan->ident = ident;
4519 req.icid = cpu_to_le16(chan->scid);
4520 req.dest_amp_id = dest_amp_id;
4522 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4525 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4528 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4530 struct l2cap_move_chan_rsp rsp;
4532 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4534 rsp.icid = cpu_to_le16(chan->dcid);
4535 rsp.result = cpu_to_le16(result);
4537 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4541 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4543 struct l2cap_move_chan_cfm cfm;
4545 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4547 chan->ident = l2cap_get_ident(chan->conn);
4549 cfm.icid = cpu_to_le16(chan->scid);
4550 cfm.result = cpu_to_le16(result);
4552 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4555 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4558 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4560 struct l2cap_move_chan_cfm cfm;
4562 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4564 cfm.icid = cpu_to_le16(icid);
4565 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4567 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4571 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4574 struct l2cap_move_chan_cfm_rsp rsp;
4576 BT_DBG("icid 0x%4.4x", icid);
4578 rsp.icid = cpu_to_le16(icid);
4579 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4582 static void __release_logical_link(struct l2cap_chan *chan)
4584 chan->hs_hchan = NULL;
4585 chan->hs_hcon = NULL;
4587 /* Placeholder - release the logical link */
4590 static void l2cap_logical_fail(struct l2cap_chan *chan)
4592 /* Logical link setup failed */
4593 if (chan->state != BT_CONNECTED) {
4594 /* Create channel failure, disconnect */
4595 l2cap_send_disconn_req(chan, ECONNRESET);
4599 switch (chan->move_role) {
4600 case L2CAP_MOVE_ROLE_RESPONDER:
4601 l2cap_move_done(chan);
4602 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4604 case L2CAP_MOVE_ROLE_INITIATOR:
4605 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4606 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4607 /* Remote has only sent pending or
4608 * success responses, clean up
4610 l2cap_move_done(chan);
4613 /* Other amp move states imply that the move
4614 * has already aborted
4616 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4621 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4622 struct hci_chan *hchan)
4624 struct l2cap_conf_rsp rsp;
4626 chan->hs_hchan = hchan;
4627 chan->hs_hcon->l2cap_data = chan->conn;
4629 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4631 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4634 set_default_fcs(chan);
4636 err = l2cap_ertm_init(chan);
4638 l2cap_send_disconn_req(chan, -err);
4640 l2cap_chan_ready(chan);
4644 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4645 struct hci_chan *hchan)
4647 chan->hs_hcon = hchan->conn;
4648 chan->hs_hcon->l2cap_data = chan->conn;
4650 BT_DBG("move_state %d", chan->move_state);
4652 switch (chan->move_state) {
4653 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4654 /* Move confirm will be sent after a success
4655 * response is received
4657 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4659 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4660 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4661 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4662 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4663 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4664 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4665 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4666 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4667 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4671 /* Move was not in expected state, free the channel */
4672 __release_logical_link(chan);
4674 chan->move_state = L2CAP_MOVE_STABLE;
4678 /* Call with chan locked */
4679 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4682 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4685 l2cap_logical_fail(chan);
4686 __release_logical_link(chan);
4690 if (chan->state != BT_CONNECTED) {
4691 /* Ignore logical link if channel is on BR/EDR */
4692 if (chan->local_amp_id != AMP_ID_BREDR)
4693 l2cap_logical_finish_create(chan, hchan);
4695 l2cap_logical_finish_move(chan, hchan);
4699 void l2cap_move_start(struct l2cap_chan *chan)
4701 BT_DBG("chan %p", chan);
4703 if (chan->local_amp_id == AMP_ID_BREDR) {
4704 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4706 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4707 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4708 /* Placeholder - start physical link setup */
4710 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4711 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4713 l2cap_move_setup(chan);
4714 l2cap_send_move_chan_req(chan, 0);
4718 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4719 u8 local_amp_id, u8 remote_amp_id)
4721 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4722 local_amp_id, remote_amp_id);
4724 chan->fcs = L2CAP_FCS_NONE;
4726 /* Outgoing channel on AMP */
4727 if (chan->state == BT_CONNECT) {
4728 if (result == L2CAP_CR_SUCCESS) {
4729 chan->local_amp_id = local_amp_id;
4730 l2cap_send_create_chan_req(chan, remote_amp_id);
4732 /* Revert to BR/EDR connect */
4733 l2cap_send_conn_req(chan);
4739 /* Incoming channel on AMP */
4740 if (__l2cap_no_conn_pending(chan)) {
4741 struct l2cap_conn_rsp rsp;
4743 rsp.scid = cpu_to_le16(chan->dcid);
4744 rsp.dcid = cpu_to_le16(chan->scid);
4746 if (result == L2CAP_CR_SUCCESS) {
4747 /* Send successful response */
4748 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4749 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4751 /* Send negative response */
4752 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4753 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4756 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4759 if (result == L2CAP_CR_SUCCESS) {
4760 l2cap_state_change(chan, BT_CONFIG);
4761 set_bit(CONF_REQ_SENT, &chan->conf_state);
4762 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4764 l2cap_build_conf_req(chan, buf), buf);
4765 chan->num_conf_req++;
4770 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4773 l2cap_move_setup(chan);
4774 chan->move_id = local_amp_id;
4775 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4777 l2cap_send_move_chan_req(chan, remote_amp_id);
4780 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4782 struct hci_chan *hchan = NULL;
4784 /* Placeholder - get hci_chan for logical link */
4787 if (hchan->state == BT_CONNECTED) {
4788 /* Logical link is ready to go */
4789 chan->hs_hcon = hchan->conn;
4790 chan->hs_hcon->l2cap_data = chan->conn;
4791 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4792 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4794 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4796 /* Wait for logical link to be ready */
4797 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4800 /* Logical link not available */
4801 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4805 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4807 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4809 if (result == -EINVAL)
4810 rsp_result = L2CAP_MR_BAD_ID;
4812 rsp_result = L2CAP_MR_NOT_ALLOWED;
4814 l2cap_send_move_chan_rsp(chan, rsp_result);
4817 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4818 chan->move_state = L2CAP_MOVE_STABLE;
4820 /* Restart data transmission */
4821 l2cap_ertm_send(chan);
4824 /* Invoke with locked chan */
4825 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4827 u8 local_amp_id = chan->local_amp_id;
4828 u8 remote_amp_id = chan->remote_amp_id;
4830 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4831 chan, result, local_amp_id, remote_amp_id);
4833 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4834 l2cap_chan_unlock(chan);
4838 if (chan->state != BT_CONNECTED) {
4839 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4840 } else if (result != L2CAP_MR_SUCCESS) {
4841 l2cap_do_move_cancel(chan, result);
4843 switch (chan->move_role) {
4844 case L2CAP_MOVE_ROLE_INITIATOR:
4845 l2cap_do_move_initiate(chan, local_amp_id,
4848 case L2CAP_MOVE_ROLE_RESPONDER:
4849 l2cap_do_move_respond(chan, result);
4852 l2cap_do_move_cancel(chan, result);
4858 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4859 struct l2cap_cmd_hdr *cmd,
4860 u16 cmd_len, void *data)
4862 struct l2cap_move_chan_req *req = data;
4863 struct l2cap_move_chan_rsp rsp;
4864 struct l2cap_chan *chan;
4866 u16 result = L2CAP_MR_NOT_ALLOWED;
4868 if (cmd_len != sizeof(*req))
4871 icid = le16_to_cpu(req->icid);
4873 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4875 if (!conn->hs_enabled)
4878 chan = l2cap_get_chan_by_dcid(conn, icid);
4880 rsp.icid = cpu_to_le16(icid);
4881 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4882 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4887 chan->ident = cmd->ident;
4889 if (chan->scid < L2CAP_CID_DYN_START ||
4890 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4891 (chan->mode != L2CAP_MODE_ERTM &&
4892 chan->mode != L2CAP_MODE_STREAMING)) {
4893 result = L2CAP_MR_NOT_ALLOWED;
4894 goto send_move_response;
4897 if (chan->local_amp_id == req->dest_amp_id) {
4898 result = L2CAP_MR_SAME_ID;
4899 goto send_move_response;
4902 if (req->dest_amp_id != AMP_ID_BREDR) {
4903 struct hci_dev *hdev;
4904 hdev = hci_dev_get(req->dest_amp_id);
4905 if (!hdev || hdev->dev_type != HCI_AMP ||
4906 !test_bit(HCI_UP, &hdev->flags)) {
4910 result = L2CAP_MR_BAD_ID;
4911 goto send_move_response;
4916 /* Detect a move collision. Only send a collision response
4917 * if this side has "lost", otherwise proceed with the move.
4918 * The winner has the larger bd_addr.
4920 if ((__chan_is_moving(chan) ||
4921 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4922 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4923 result = L2CAP_MR_COLLISION;
4924 goto send_move_response;
4927 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4928 l2cap_move_setup(chan);
4929 chan->move_id = req->dest_amp_id;
4932 if (req->dest_amp_id == AMP_ID_BREDR) {
4933 /* Moving to BR/EDR */
4934 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4935 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4936 result = L2CAP_MR_PEND;
4938 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4939 result = L2CAP_MR_SUCCESS;
4942 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4943 /* Placeholder - uncomment when amp functions are available */
4944 /*amp_accept_physical(chan, req->dest_amp_id);*/
4945 result = L2CAP_MR_PEND;
4949 l2cap_send_move_chan_rsp(chan, result);
4951 l2cap_chan_unlock(chan);
4956 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4958 struct l2cap_chan *chan;
4959 struct hci_chan *hchan = NULL;
4961 chan = l2cap_get_chan_by_scid(conn, icid);
4963 l2cap_send_move_chan_cfm_icid(conn, icid);
4967 __clear_chan_timer(chan);
4968 if (result == L2CAP_MR_PEND)
4969 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4971 switch (chan->move_state) {
4972 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4973 /* Move confirm will be sent when logical link
4976 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4978 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4979 if (result == L2CAP_MR_PEND) {
4981 } else if (test_bit(CONN_LOCAL_BUSY,
4982 &chan->conn_state)) {
4983 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4985 /* Logical link is up or moving to BR/EDR,
4988 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4989 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4992 case L2CAP_MOVE_WAIT_RSP:
4994 if (result == L2CAP_MR_SUCCESS) {
4995 /* Remote is ready, send confirm immediately
4996 * after logical link is ready
4998 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5000 /* Both logical link and move success
5001 * are required to confirm
5003 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5006 /* Placeholder - get hci_chan for logical link */
5008 /* Logical link not available */
5009 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5013 /* If the logical link is not yet connected, do not
5014 * send confirmation.
5016 if (hchan->state != BT_CONNECTED)
5019 /* Logical link is already ready to go */
5021 chan->hs_hcon = hchan->conn;
5022 chan->hs_hcon->l2cap_data = chan->conn;
5024 if (result == L2CAP_MR_SUCCESS) {
5025 /* Can confirm now */
5026 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5028 /* Now only need move success
5031 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5034 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5037 /* Any other amp move state means the move failed. */
5038 chan->move_id = chan->local_amp_id;
5039 l2cap_move_done(chan);
5040 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5043 l2cap_chan_unlock(chan);
5046 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5049 struct l2cap_chan *chan;
5051 chan = l2cap_get_chan_by_ident(conn, ident);
5053 /* Could not locate channel, icid is best guess */
5054 l2cap_send_move_chan_cfm_icid(conn, icid);
5058 __clear_chan_timer(chan);
5060 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5061 if (result == L2CAP_MR_COLLISION) {
5062 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5064 /* Cleanup - cancel move */
5065 chan->move_id = chan->local_amp_id;
5066 l2cap_move_done(chan);
5070 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5072 l2cap_chan_unlock(chan);
5075 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5076 struct l2cap_cmd_hdr *cmd,
5077 u16 cmd_len, void *data)
5079 struct l2cap_move_chan_rsp *rsp = data;
5082 if (cmd_len != sizeof(*rsp))
5085 icid = le16_to_cpu(rsp->icid);
5086 result = le16_to_cpu(rsp->result);
5088 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5090 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5091 l2cap_move_continue(conn, icid, result);
5093 l2cap_move_fail(conn, cmd->ident, icid, result);
5098 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5099 struct l2cap_cmd_hdr *cmd,
5100 u16 cmd_len, void *data)
5102 struct l2cap_move_chan_cfm *cfm = data;
5103 struct l2cap_chan *chan;
5106 if (cmd_len != sizeof(*cfm))
5109 icid = le16_to_cpu(cfm->icid);
5110 result = le16_to_cpu(cfm->result);
5112 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5114 chan = l2cap_get_chan_by_dcid(conn, icid);
5116 /* Spec requires a response even if the icid was not found */
5117 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5121 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5122 if (result == L2CAP_MC_CONFIRMED) {
5123 chan->local_amp_id = chan->move_id;
5124 if (chan->local_amp_id == AMP_ID_BREDR)
5125 __release_logical_link(chan);
5127 chan->move_id = chan->local_amp_id;
5130 l2cap_move_done(chan);
5133 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5135 l2cap_chan_unlock(chan);
5140 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5141 struct l2cap_cmd_hdr *cmd,
5142 u16 cmd_len, void *data)
5144 struct l2cap_move_chan_cfm_rsp *rsp = data;
5145 struct l2cap_chan *chan;
5148 if (cmd_len != sizeof(*rsp))
5151 icid = le16_to_cpu(rsp->icid);
5153 BT_DBG("icid 0x%4.4x", icid);
5155 chan = l2cap_get_chan_by_scid(conn, icid);
5159 __clear_chan_timer(chan);
5161 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5162 chan->local_amp_id = chan->move_id;
5164 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5165 __release_logical_link(chan);
5167 l2cap_move_done(chan);
5170 l2cap_chan_unlock(chan);
5175 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5176 struct l2cap_cmd_hdr *cmd,
5177 u16 cmd_len, u8 *data)
5179 struct hci_conn *hcon = conn->hcon;
5180 struct l2cap_conn_param_update_req *req;
5181 struct l2cap_conn_param_update_rsp rsp;
5182 u16 min, max, latency, to_multiplier;
5185 if (hcon->role != HCI_ROLE_MASTER)
5188 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5191 req = (struct l2cap_conn_param_update_req *) data;
5192 min = __le16_to_cpu(req->min);
5193 max = __le16_to_cpu(req->max);
5194 latency = __le16_to_cpu(req->latency);
5195 to_multiplier = __le16_to_cpu(req->to_multiplier);
5197 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5198 min, max, latency, to_multiplier);
5200 memset(&rsp, 0, sizeof(rsp));
5202 err = hci_check_conn_params(min, max, latency, to_multiplier);
5204 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5206 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5208 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5214 store_hint = hci_le_conn_update(hcon, min, max, latency,
5216 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5217 store_hint, min, max, latency,
5225 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5226 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5229 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5230 struct hci_conn *hcon = conn->hcon;
5231 u16 dcid, mtu, mps, credits, result;
5232 struct l2cap_chan *chan;
5235 if (cmd_len < sizeof(*rsp))
5238 dcid = __le16_to_cpu(rsp->dcid);
5239 mtu = __le16_to_cpu(rsp->mtu);
5240 mps = __le16_to_cpu(rsp->mps);
5241 credits = __le16_to_cpu(rsp->credits);
5242 result = __le16_to_cpu(rsp->result);
5244 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5247 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5248 dcid, mtu, mps, credits, result);
5250 mutex_lock(&conn->chan_lock);
5252 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5260 l2cap_chan_lock(chan);
5263 case L2CAP_CR_SUCCESS:
5267 chan->remote_mps = mps;
5268 chan->tx_credits = credits;
5269 l2cap_chan_ready(chan);
5272 case L2CAP_CR_AUTHENTICATION:
5273 case L2CAP_CR_ENCRYPTION:
5274 /* If we already have MITM protection we can't do
5277 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5278 l2cap_chan_del(chan, ECONNREFUSED);
5282 sec_level = hcon->sec_level + 1;
5283 if (chan->sec_level < sec_level)
5284 chan->sec_level = sec_level;
5286 /* We'll need to send a new Connect Request */
5287 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5289 smp_conn_security(hcon, chan->sec_level);
5293 l2cap_chan_del(chan, ECONNREFUSED);
5297 l2cap_chan_unlock(chan);
5300 mutex_unlock(&conn->chan_lock);
5305 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5306 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5311 switch (cmd->code) {
5312 case L2CAP_COMMAND_REJ:
5313 l2cap_command_rej(conn, cmd, cmd_len, data);
5316 case L2CAP_CONN_REQ:
5317 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5320 case L2CAP_CONN_RSP:
5321 case L2CAP_CREATE_CHAN_RSP:
5322 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5325 case L2CAP_CONF_REQ:
5326 err = l2cap_config_req(conn, cmd, cmd_len, data);
5329 case L2CAP_CONF_RSP:
5330 l2cap_config_rsp(conn, cmd, cmd_len, data);
5333 case L2CAP_DISCONN_REQ:
5334 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5337 case L2CAP_DISCONN_RSP:
5338 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5341 case L2CAP_ECHO_REQ:
5342 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5345 case L2CAP_ECHO_RSP:
5348 case L2CAP_INFO_REQ:
5349 err = l2cap_information_req(conn, cmd, cmd_len, data);
5352 case L2CAP_INFO_RSP:
5353 l2cap_information_rsp(conn, cmd, cmd_len, data);
5356 case L2CAP_CREATE_CHAN_REQ:
5357 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5360 case L2CAP_MOVE_CHAN_REQ:
5361 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5364 case L2CAP_MOVE_CHAN_RSP:
5365 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5368 case L2CAP_MOVE_CHAN_CFM:
5369 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5372 case L2CAP_MOVE_CHAN_CFM_RSP:
5373 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5377 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5385 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5386 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5389 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5390 struct l2cap_le_conn_rsp rsp;
5391 struct l2cap_chan *chan, *pchan;
5392 u16 dcid, scid, credits, mtu, mps;
5396 if (cmd_len != sizeof(*req))
5399 scid = __le16_to_cpu(req->scid);
5400 mtu = __le16_to_cpu(req->mtu);
5401 mps = __le16_to_cpu(req->mps);
5406 if (mtu < 23 || mps < 23)
5409 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5412 /* Check if we have socket listening on psm */
5413 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5414 &conn->hcon->dst, LE_LINK);
5416 result = L2CAP_CR_BAD_PSM;
5421 mutex_lock(&conn->chan_lock);
5422 l2cap_chan_lock(pchan);
5424 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5426 result = L2CAP_CR_AUTHENTICATION;
5428 goto response_unlock;
5431 /* Check if we already have channel with that dcid */
5432 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5433 result = L2CAP_CR_NO_MEM;
5435 goto response_unlock;
5438 chan = pchan->ops->new_connection(pchan);
5440 result = L2CAP_CR_NO_MEM;
5441 goto response_unlock;
5444 l2cap_le_flowctl_init(chan);
5446 bacpy(&chan->src, &conn->hcon->src);
5447 bacpy(&chan->dst, &conn->hcon->dst);
5448 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5449 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5453 chan->remote_mps = mps;
5454 chan->tx_credits = __le16_to_cpu(req->credits);
5456 __l2cap_chan_add(conn, chan);
5458 credits = chan->rx_credits;
5460 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5462 chan->ident = cmd->ident;
5464 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5465 l2cap_state_change(chan, BT_CONNECT2);
5466 /* The following result value is actually not defined
5467 * for LE CoC but we use it to let the function know
5468 * that it should bail out after doing its cleanup
5469 * instead of sending a response.
5471 result = L2CAP_CR_PEND;
5472 chan->ops->defer(chan);
5474 l2cap_chan_ready(chan);
5475 result = L2CAP_CR_SUCCESS;
5479 l2cap_chan_unlock(pchan);
5480 mutex_unlock(&conn->chan_lock);
5481 l2cap_chan_put(pchan);
5483 if (result == L2CAP_CR_PEND)
5488 rsp.mtu = cpu_to_le16(chan->imtu);
5489 rsp.mps = cpu_to_le16(chan->mps);
5495 rsp.dcid = cpu_to_le16(dcid);
5496 rsp.credits = cpu_to_le16(credits);
5497 rsp.result = cpu_to_le16(result);
5499 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5504 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5505 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5508 struct l2cap_le_credits *pkt;
5509 struct l2cap_chan *chan;
5510 u16 cid, credits, max_credits;
5512 if (cmd_len != sizeof(*pkt))
5515 pkt = (struct l2cap_le_credits *) data;
5516 cid = __le16_to_cpu(pkt->cid);
5517 credits = __le16_to_cpu(pkt->credits);
5519 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5521 chan = l2cap_get_chan_by_dcid(conn, cid);
5525 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5526 if (credits > max_credits) {
5527 BT_ERR("LE credits overflow");
5528 l2cap_send_disconn_req(chan, ECONNRESET);
5529 l2cap_chan_unlock(chan);
5531 /* Return 0 so that we don't trigger an unnecessary
5532 * command reject packet.
5537 chan->tx_credits += credits;
5539 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5540 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5544 if (chan->tx_credits)
5545 chan->ops->resume(chan);
5547 l2cap_chan_unlock(chan);
5552 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5553 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5556 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5557 struct l2cap_chan *chan;
5559 if (cmd_len < sizeof(*rej))
5562 mutex_lock(&conn->chan_lock);
5564 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5568 l2cap_chan_lock(chan);
5569 l2cap_chan_del(chan, ECONNREFUSED);
5570 l2cap_chan_unlock(chan);
5573 mutex_unlock(&conn->chan_lock);
5577 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5578 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5583 switch (cmd->code) {
5584 case L2CAP_COMMAND_REJ:
5585 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5588 case L2CAP_CONN_PARAM_UPDATE_REQ:
5589 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5592 case L2CAP_CONN_PARAM_UPDATE_RSP:
5595 case L2CAP_LE_CONN_RSP:
5596 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5599 case L2CAP_LE_CONN_REQ:
5600 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5603 case L2CAP_LE_CREDITS:
5604 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5607 case L2CAP_DISCONN_REQ:
5608 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5611 case L2CAP_DISCONN_RSP:
5612 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5616 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5624 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5625 struct sk_buff *skb)
5627 struct hci_conn *hcon = conn->hcon;
5628 struct l2cap_cmd_hdr *cmd;
5632 if (hcon->type != LE_LINK)
5635 if (skb->len < L2CAP_CMD_HDR_SIZE)
5638 cmd = (void *) skb->data;
5639 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5641 len = le16_to_cpu(cmd->len);
5643 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5645 if (len != skb->len || !cmd->ident) {
5646 BT_DBG("corrupted command");
5650 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5652 struct l2cap_cmd_rej_unk rej;
5654 BT_ERR("Wrong link type (%d)", err);
5656 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5657 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5665 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5666 struct sk_buff *skb)
5668 struct hci_conn *hcon = conn->hcon;
5669 u8 *data = skb->data;
5671 struct l2cap_cmd_hdr cmd;
5674 l2cap_raw_recv(conn, skb);
5676 if (hcon->type != ACL_LINK)
5679 while (len >= L2CAP_CMD_HDR_SIZE) {
5681 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5682 data += L2CAP_CMD_HDR_SIZE;
5683 len -= L2CAP_CMD_HDR_SIZE;
5685 cmd_len = le16_to_cpu(cmd.len);
5687 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5690 if (cmd_len > len || !cmd.ident) {
5691 BT_DBG("corrupted command");
5695 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5697 struct l2cap_cmd_rej_unk rej;
5699 BT_ERR("Wrong link type (%d)", err);
5701 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5702 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5714 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5716 u16 our_fcs, rcv_fcs;
5719 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5720 hdr_size = L2CAP_EXT_HDR_SIZE;
5722 hdr_size = L2CAP_ENH_HDR_SIZE;
5724 if (chan->fcs == L2CAP_FCS_CRC16) {
5725 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5726 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5727 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5729 if (our_fcs != rcv_fcs)
5735 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5737 struct l2cap_ctrl control;
5739 BT_DBG("chan %p", chan);
5741 memset(&control, 0, sizeof(control));
5744 control.reqseq = chan->buffer_seq;
5745 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5747 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5748 control.super = L2CAP_SUPER_RNR;
5749 l2cap_send_sframe(chan, &control);
5752 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5753 chan->unacked_frames > 0)
5754 __set_retrans_timer(chan);
5756 /* Send pending iframes */
5757 l2cap_ertm_send(chan);
5759 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5760 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5761 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5764 control.super = L2CAP_SUPER_RR;
5765 l2cap_send_sframe(chan, &control);
5769 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5770 struct sk_buff **last_frag)
5772 /* skb->len reflects data in skb as well as all fragments
5773 * skb->data_len reflects only data in fragments
5775 if (!skb_has_frag_list(skb))
5776 skb_shinfo(skb)->frag_list = new_frag;
5778 new_frag->next = NULL;
5780 (*last_frag)->next = new_frag;
5781 *last_frag = new_frag;
5783 skb->len += new_frag->len;
5784 skb->data_len += new_frag->len;
5785 skb->truesize += new_frag->truesize;
5788 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5789 struct l2cap_ctrl *control)
5793 switch (control->sar) {
5794 case L2CAP_SAR_UNSEGMENTED:
5798 err = chan->ops->recv(chan, skb);
5801 case L2CAP_SAR_START:
5805 chan->sdu_len = get_unaligned_le16(skb->data);
5806 skb_pull(skb, L2CAP_SDULEN_SIZE);
5808 if (chan->sdu_len > chan->imtu) {
5813 if (skb->len >= chan->sdu_len)
5817 chan->sdu_last_frag = skb;
5823 case L2CAP_SAR_CONTINUE:
5827 append_skb_frag(chan->sdu, skb,
5828 &chan->sdu_last_frag);
5831 if (chan->sdu->len >= chan->sdu_len)
5841 append_skb_frag(chan->sdu, skb,
5842 &chan->sdu_last_frag);
5845 if (chan->sdu->len != chan->sdu_len)
5848 err = chan->ops->recv(chan, chan->sdu);
5851 /* Reassembly complete */
5853 chan->sdu_last_frag = NULL;
5861 kfree_skb(chan->sdu);
5863 chan->sdu_last_frag = NULL;
5870 static int l2cap_resegment(struct l2cap_chan *chan)
5876 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5880 if (chan->mode != L2CAP_MODE_ERTM)
5883 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5884 l2cap_tx(chan, NULL, NULL, event);
5887 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5890 /* Pass sequential frames to l2cap_reassemble_sdu()
5891 * until a gap is encountered.
5894 BT_DBG("chan %p", chan);
5896 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5897 struct sk_buff *skb;
5898 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5899 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5901 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5906 skb_unlink(skb, &chan->srej_q);
5907 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5908 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5913 if (skb_queue_empty(&chan->srej_q)) {
5914 chan->rx_state = L2CAP_RX_STATE_RECV;
5915 l2cap_send_ack(chan);
5921 static void l2cap_handle_srej(struct l2cap_chan *chan,
5922 struct l2cap_ctrl *control)
5924 struct sk_buff *skb;
5926 BT_DBG("chan %p, control %p", chan, control);
5928 if (control->reqseq == chan->next_tx_seq) {
5929 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5930 l2cap_send_disconn_req(chan, ECONNRESET);
5934 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5937 BT_DBG("Seq %d not available for retransmission",
5942 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5943 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5944 l2cap_send_disconn_req(chan, ECONNRESET);
5948 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5950 if (control->poll) {
5951 l2cap_pass_to_tx(chan, control);
5953 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5954 l2cap_retransmit(chan, control);
5955 l2cap_ertm_send(chan);
5957 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5958 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5959 chan->srej_save_reqseq = control->reqseq;
5962 l2cap_pass_to_tx_fbit(chan, control);
5964 if (control->final) {
5965 if (chan->srej_save_reqseq != control->reqseq ||
5966 !test_and_clear_bit(CONN_SREJ_ACT,
5968 l2cap_retransmit(chan, control);
5970 l2cap_retransmit(chan, control);
5971 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5972 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5973 chan->srej_save_reqseq = control->reqseq;
5979 static void l2cap_handle_rej(struct l2cap_chan *chan,
5980 struct l2cap_ctrl *control)
5982 struct sk_buff *skb;
5984 BT_DBG("chan %p, control %p", chan, control);
5986 if (control->reqseq == chan->next_tx_seq) {
5987 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5988 l2cap_send_disconn_req(chan, ECONNRESET);
5992 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5994 if (chan->max_tx && skb &&
5995 bt_cb(skb)->control.retries >= chan->max_tx) {
5996 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5997 l2cap_send_disconn_req(chan, ECONNRESET);
6001 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6003 l2cap_pass_to_tx(chan, control);
6005 if (control->final) {
6006 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6007 l2cap_retransmit_all(chan, control);
6009 l2cap_retransmit_all(chan, control);
6010 l2cap_ertm_send(chan);
6011 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6012 set_bit(CONN_REJ_ACT, &chan->conn_state);
6016 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6018 BT_DBG("chan %p, txseq %d", chan, txseq);
6020 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6021 chan->expected_tx_seq);
6023 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6024 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6026 /* See notes below regarding "double poll" and
6029 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6030 BT_DBG("Invalid/Ignore - after SREJ");
6031 return L2CAP_TXSEQ_INVALID_IGNORE;
6033 BT_DBG("Invalid - in window after SREJ sent");
6034 return L2CAP_TXSEQ_INVALID;
6038 if (chan->srej_list.head == txseq) {
6039 BT_DBG("Expected SREJ");
6040 return L2CAP_TXSEQ_EXPECTED_SREJ;
6043 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6044 BT_DBG("Duplicate SREJ - txseq already stored");
6045 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6048 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6049 BT_DBG("Unexpected SREJ - not requested");
6050 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6054 if (chan->expected_tx_seq == txseq) {
6055 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6057 BT_DBG("Invalid - txseq outside tx window");
6058 return L2CAP_TXSEQ_INVALID;
6061 return L2CAP_TXSEQ_EXPECTED;
6065 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6066 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6067 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6068 return L2CAP_TXSEQ_DUPLICATE;
6071 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6072 /* A source of invalid packets is a "double poll" condition,
6073 * where delays cause us to send multiple poll packets. If
6074 * the remote stack receives and processes both polls,
6075 * sequence numbers can wrap around in such a way that a
6076 * resent frame has a sequence number that looks like new data
6077 * with a sequence gap. This would trigger an erroneous SREJ
6080 * Fortunately, this is impossible with a tx window that's
6081 * less than half of the maximum sequence number, which allows
6082 * invalid frames to be safely ignored.
6084 * With tx window sizes greater than half of the tx window
6085 * maximum, the frame is invalid and cannot be ignored. This
6086 * causes a disconnect.
6089 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6090 BT_DBG("Invalid/Ignore - txseq outside tx window");
6091 return L2CAP_TXSEQ_INVALID_IGNORE;
6093 BT_DBG("Invalid - txseq outside tx window");
6094 return L2CAP_TXSEQ_INVALID;
6097 BT_DBG("Unexpected - txseq indicates missing frames");
6098 return L2CAP_TXSEQ_UNEXPECTED;
6102 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6103 struct l2cap_ctrl *control,
6104 struct sk_buff *skb, u8 event)
6107 bool skb_in_use = false;
6109 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6113 case L2CAP_EV_RECV_IFRAME:
6114 switch (l2cap_classify_txseq(chan, control->txseq)) {
6115 case L2CAP_TXSEQ_EXPECTED:
6116 l2cap_pass_to_tx(chan, control);
6118 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6119 BT_DBG("Busy, discarding expected seq %d",
6124 chan->expected_tx_seq = __next_seq(chan,
6127 chan->buffer_seq = chan->expected_tx_seq;
6130 err = l2cap_reassemble_sdu(chan, skb, control);
6134 if (control->final) {
6135 if (!test_and_clear_bit(CONN_REJ_ACT,
6136 &chan->conn_state)) {
6138 l2cap_retransmit_all(chan, control);
6139 l2cap_ertm_send(chan);
6143 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6144 l2cap_send_ack(chan);
6146 case L2CAP_TXSEQ_UNEXPECTED:
6147 l2cap_pass_to_tx(chan, control);
6149 /* Can't issue SREJ frames in the local busy state.
6150 * Drop this frame, it will be seen as missing
6151 * when local busy is exited.
6153 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6154 BT_DBG("Busy, discarding unexpected seq %d",
6159 /* There was a gap in the sequence, so an SREJ
6160 * must be sent for each missing frame. The
6161 * current frame is stored for later use.
6163 skb_queue_tail(&chan->srej_q, skb);
6165 BT_DBG("Queued %p (queue len %d)", skb,
6166 skb_queue_len(&chan->srej_q));
6168 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6169 l2cap_seq_list_clear(&chan->srej_list);
6170 l2cap_send_srej(chan, control->txseq);
6172 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6174 case L2CAP_TXSEQ_DUPLICATE:
6175 l2cap_pass_to_tx(chan, control);
6177 case L2CAP_TXSEQ_INVALID_IGNORE:
6179 case L2CAP_TXSEQ_INVALID:
6181 l2cap_send_disconn_req(chan, ECONNRESET);
6185 case L2CAP_EV_RECV_RR:
6186 l2cap_pass_to_tx(chan, control);
6187 if (control->final) {
6188 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6190 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6191 !__chan_is_moving(chan)) {
6193 l2cap_retransmit_all(chan, control);
6196 l2cap_ertm_send(chan);
6197 } else if (control->poll) {
6198 l2cap_send_i_or_rr_or_rnr(chan);
6200 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6201 &chan->conn_state) &&
6202 chan->unacked_frames)
6203 __set_retrans_timer(chan);
6205 l2cap_ertm_send(chan);
6208 case L2CAP_EV_RECV_RNR:
6209 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6210 l2cap_pass_to_tx(chan, control);
6211 if (control && control->poll) {
6212 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6213 l2cap_send_rr_or_rnr(chan, 0);
6215 __clear_retrans_timer(chan);
6216 l2cap_seq_list_clear(&chan->retrans_list);
6218 case L2CAP_EV_RECV_REJ:
6219 l2cap_handle_rej(chan, control);
6221 case L2CAP_EV_RECV_SREJ:
6222 l2cap_handle_srej(chan, control);
6228 if (skb && !skb_in_use) {
6229 BT_DBG("Freeing %p", skb);
6236 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6237 struct l2cap_ctrl *control,
6238 struct sk_buff *skb, u8 event)
6241 u16 txseq = control->txseq;
6242 bool skb_in_use = false;
6244 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6248 case L2CAP_EV_RECV_IFRAME:
6249 switch (l2cap_classify_txseq(chan, txseq)) {
6250 case L2CAP_TXSEQ_EXPECTED:
6251 /* Keep frame for reassembly later */
6252 l2cap_pass_to_tx(chan, control);
6253 skb_queue_tail(&chan->srej_q, skb);
6255 BT_DBG("Queued %p (queue len %d)", skb,
6256 skb_queue_len(&chan->srej_q));
6258 chan->expected_tx_seq = __next_seq(chan, txseq);
6260 case L2CAP_TXSEQ_EXPECTED_SREJ:
6261 l2cap_seq_list_pop(&chan->srej_list);
6263 l2cap_pass_to_tx(chan, control);
6264 skb_queue_tail(&chan->srej_q, skb);
6266 BT_DBG("Queued %p (queue len %d)", skb,
6267 skb_queue_len(&chan->srej_q));
6269 err = l2cap_rx_queued_iframes(chan);
6274 case L2CAP_TXSEQ_UNEXPECTED:
6275 /* Got a frame that can't be reassembled yet.
6276 * Save it for later, and send SREJs to cover
6277 * the missing frames.
6279 skb_queue_tail(&chan->srej_q, skb);
6281 BT_DBG("Queued %p (queue len %d)", skb,
6282 skb_queue_len(&chan->srej_q));
6284 l2cap_pass_to_tx(chan, control);
6285 l2cap_send_srej(chan, control->txseq);
6287 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6288 /* This frame was requested with an SREJ, but
6289 * some expected retransmitted frames are
6290 * missing. Request retransmission of missing
6293 skb_queue_tail(&chan->srej_q, skb);
6295 BT_DBG("Queued %p (queue len %d)", skb,
6296 skb_queue_len(&chan->srej_q));
6298 l2cap_pass_to_tx(chan, control);
6299 l2cap_send_srej_list(chan, control->txseq);
6301 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6302 /* We've already queued this frame. Drop this copy. */
6303 l2cap_pass_to_tx(chan, control);
6305 case L2CAP_TXSEQ_DUPLICATE:
6306 /* Expecting a later sequence number, so this frame
6307 * was already received. Ignore it completely.
6310 case L2CAP_TXSEQ_INVALID_IGNORE:
6312 case L2CAP_TXSEQ_INVALID:
6314 l2cap_send_disconn_req(chan, ECONNRESET);
6318 case L2CAP_EV_RECV_RR:
6319 l2cap_pass_to_tx(chan, control);
6320 if (control->final) {
6321 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6323 if (!test_and_clear_bit(CONN_REJ_ACT,
6324 &chan->conn_state)) {
6326 l2cap_retransmit_all(chan, control);
6329 l2cap_ertm_send(chan);
6330 } else if (control->poll) {
6331 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6332 &chan->conn_state) &&
6333 chan->unacked_frames) {
6334 __set_retrans_timer(chan);
6337 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6338 l2cap_send_srej_tail(chan);
6340 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6341 &chan->conn_state) &&
6342 chan->unacked_frames)
6343 __set_retrans_timer(chan);
6345 l2cap_send_ack(chan);
6348 case L2CAP_EV_RECV_RNR:
6349 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6350 l2cap_pass_to_tx(chan, control);
6351 if (control->poll) {
6352 l2cap_send_srej_tail(chan);
6354 struct l2cap_ctrl rr_control;
6355 memset(&rr_control, 0, sizeof(rr_control));
6356 rr_control.sframe = 1;
6357 rr_control.super = L2CAP_SUPER_RR;
6358 rr_control.reqseq = chan->buffer_seq;
6359 l2cap_send_sframe(chan, &rr_control);
6363 case L2CAP_EV_RECV_REJ:
6364 l2cap_handle_rej(chan, control);
6366 case L2CAP_EV_RECV_SREJ:
6367 l2cap_handle_srej(chan, control);
6371 if (skb && !skb_in_use) {
6372 BT_DBG("Freeing %p", skb);
6379 static int l2cap_finish_move(struct l2cap_chan *chan)
6381 BT_DBG("chan %p", chan);
6383 chan->rx_state = L2CAP_RX_STATE_RECV;
6386 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6388 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6390 return l2cap_resegment(chan);
6393 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6394 struct l2cap_ctrl *control,
6395 struct sk_buff *skb, u8 event)
6399 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6405 l2cap_process_reqseq(chan, control->reqseq);
6407 if (!skb_queue_empty(&chan->tx_q))
6408 chan->tx_send_head = skb_peek(&chan->tx_q);
6410 chan->tx_send_head = NULL;
6412 /* Rewind next_tx_seq to the point expected
6415 chan->next_tx_seq = control->reqseq;
6416 chan->unacked_frames = 0;
6418 err = l2cap_finish_move(chan);
6422 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6423 l2cap_send_i_or_rr_or_rnr(chan);
6425 if (event == L2CAP_EV_RECV_IFRAME)
6428 return l2cap_rx_state_recv(chan, control, NULL, event);
6431 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6432 struct l2cap_ctrl *control,
6433 struct sk_buff *skb, u8 event)
6437 if (!control->final)
6440 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6442 chan->rx_state = L2CAP_RX_STATE_RECV;
6443 l2cap_process_reqseq(chan, control->reqseq);
6445 if (!skb_queue_empty(&chan->tx_q))
6446 chan->tx_send_head = skb_peek(&chan->tx_q);
6448 chan->tx_send_head = NULL;
6450 /* Rewind next_tx_seq to the point expected
6453 chan->next_tx_seq = control->reqseq;
6454 chan->unacked_frames = 0;
6457 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6459 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6461 err = l2cap_resegment(chan);
6464 err = l2cap_rx_state_recv(chan, control, skb, event);
6469 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6471 /* Make sure reqseq is for a packet that has been sent but not acked */
6474 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6475 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6478 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6479 struct sk_buff *skb, u8 event)
6483 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6484 control, skb, event, chan->rx_state);
6486 if (__valid_reqseq(chan, control->reqseq)) {
6487 switch (chan->rx_state) {
6488 case L2CAP_RX_STATE_RECV:
6489 err = l2cap_rx_state_recv(chan, control, skb, event);
6491 case L2CAP_RX_STATE_SREJ_SENT:
6492 err = l2cap_rx_state_srej_sent(chan, control, skb,
6495 case L2CAP_RX_STATE_WAIT_P:
6496 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6498 case L2CAP_RX_STATE_WAIT_F:
6499 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6506 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6507 control->reqseq, chan->next_tx_seq,
6508 chan->expected_ack_seq);
6509 l2cap_send_disconn_req(chan, ECONNRESET);
6515 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6516 struct sk_buff *skb)
6520 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6523 if (l2cap_classify_txseq(chan, control->txseq) ==
6524 L2CAP_TXSEQ_EXPECTED) {
6525 l2cap_pass_to_tx(chan, control);
6527 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6528 __next_seq(chan, chan->buffer_seq));
6530 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6532 l2cap_reassemble_sdu(chan, skb, control);
6535 kfree_skb(chan->sdu);
6538 chan->sdu_last_frag = NULL;
6542 BT_DBG("Freeing %p", skb);
6547 chan->last_acked_seq = control->txseq;
6548 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6553 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6555 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6559 __unpack_control(chan, skb);
6564 * We can just drop the corrupted I-frame here.
6565 * Receiver will miss it and start proper recovery
6566 * procedures and ask for retransmission.
6568 if (l2cap_check_fcs(chan, skb))
6571 if (!control->sframe && control->sar == L2CAP_SAR_START)
6572 len -= L2CAP_SDULEN_SIZE;
6574 if (chan->fcs == L2CAP_FCS_CRC16)
6575 len -= L2CAP_FCS_SIZE;
6577 if (len > chan->mps) {
6578 l2cap_send_disconn_req(chan, ECONNRESET);
6582 if (!control->sframe) {
6585 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6586 control->sar, control->reqseq, control->final,
6589 /* Validate F-bit - F=0 always valid, F=1 only
6590 * valid in TX WAIT_F
6592 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6595 if (chan->mode != L2CAP_MODE_STREAMING) {
6596 event = L2CAP_EV_RECV_IFRAME;
6597 err = l2cap_rx(chan, control, skb, event);
6599 err = l2cap_stream_rx(chan, control, skb);
6603 l2cap_send_disconn_req(chan, ECONNRESET);
6605 const u8 rx_func_to_event[4] = {
6606 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6607 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6610 /* Only I-frames are expected in streaming mode */
6611 if (chan->mode == L2CAP_MODE_STREAMING)
6614 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6615 control->reqseq, control->final, control->poll,
6619 BT_ERR("Trailing bytes: %d in sframe", len);
6620 l2cap_send_disconn_req(chan, ECONNRESET);
6624 /* Validate F and P bits */
6625 if (control->final && (control->poll ||
6626 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6629 event = rx_func_to_event[control->super];
6630 if (l2cap_rx(chan, control, skb, event))
6631 l2cap_send_disconn_req(chan, ECONNRESET);
6641 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6643 struct l2cap_conn *conn = chan->conn;
6644 struct l2cap_le_credits pkt;
6647 /* We return more credits to the sender only after the amount of
6648 * credits falls below half of the initial amount.
6650 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6653 return_credits = le_max_credits - chan->rx_credits;
6655 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6657 chan->rx_credits += return_credits;
6659 pkt.cid = cpu_to_le16(chan->scid);
6660 pkt.credits = cpu_to_le16(return_credits);
6662 chan->ident = l2cap_get_ident(conn);
6664 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6667 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6671 if (!chan->rx_credits) {
6672 BT_ERR("No credits to receive LE L2CAP data");
6673 l2cap_send_disconn_req(chan, ECONNRESET);
6677 if (chan->imtu < skb->len) {
6678 BT_ERR("Too big LE L2CAP PDU");
6683 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6685 l2cap_chan_le_send_credits(chan);
6692 sdu_len = get_unaligned_le16(skb->data);
6693 skb_pull(skb, L2CAP_SDULEN_SIZE);
6695 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6696 sdu_len, skb->len, chan->imtu);
6698 if (sdu_len > chan->imtu) {
6699 BT_ERR("Too big LE L2CAP SDU length received");
6704 if (skb->len > sdu_len) {
6705 BT_ERR("Too much LE L2CAP data received");
6710 if (skb->len == sdu_len)
6711 return chan->ops->recv(chan, skb);
6714 chan->sdu_len = sdu_len;
6715 chan->sdu_last_frag = skb;
6720 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6721 chan->sdu->len, skb->len, chan->sdu_len);
6723 if (chan->sdu->len + skb->len > chan->sdu_len) {
6724 BT_ERR("Too much LE L2CAP data received");
6729 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6732 if (chan->sdu->len == chan->sdu_len) {
6733 err = chan->ops->recv(chan, chan->sdu);
6736 chan->sdu_last_frag = NULL;
6744 kfree_skb(chan->sdu);
6746 chan->sdu_last_frag = NULL;
6750 /* We can't return an error here since we took care of the skb
6751 * freeing internally. An error return would cause the caller to
6752 * do a double-free of the skb.
6757 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6758 struct sk_buff *skb)
6760 struct l2cap_chan *chan;
6762 chan = l2cap_get_chan_by_scid(conn, cid);
6764 if (cid == L2CAP_CID_A2MP) {
6765 chan = a2mp_channel_create(conn, skb);
6771 l2cap_chan_lock(chan);
6773 BT_DBG("unknown cid 0x%4.4x", cid);
6774 /* Drop packet and return */
6780 BT_DBG("chan %p, len %d", chan, skb->len);
6782 if (chan->state != BT_CONNECTED)
6785 switch (chan->mode) {
6786 case L2CAP_MODE_LE_FLOWCTL:
6787 if (l2cap_le_data_rcv(chan, skb) < 0)
6792 case L2CAP_MODE_BASIC:
6793 /* If socket recv buffers overflows we drop data here
6794 * which is *bad* because L2CAP has to be reliable.
6795 * But we don't have any other choice. L2CAP doesn't
6796 * provide flow control mechanism. */
6798 if (chan->imtu < skb->len) {
6799 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6803 if (!chan->ops->recv(chan, skb))
6807 case L2CAP_MODE_ERTM:
6808 case L2CAP_MODE_STREAMING:
6809 l2cap_data_rcv(chan, skb);
6813 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6821 l2cap_chan_unlock(chan);
6824 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6825 struct sk_buff *skb)
6827 struct hci_conn *hcon = conn->hcon;
6828 struct l2cap_chan *chan;
6830 if (hcon->type != ACL_LINK)
6833 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6838 BT_DBG("chan %p, len %d", chan, skb->len);
6840 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6843 if (chan->imtu < skb->len)
6846 /* Store remote BD_ADDR and PSM for msg_name */
6847 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6848 bt_cb(skb)->psm = psm;
6850 if (!chan->ops->recv(chan, skb)) {
6851 l2cap_chan_put(chan);
6856 l2cap_chan_put(chan);
6861 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6863 struct l2cap_hdr *lh = (void *) skb->data;
6864 struct hci_conn *hcon = conn->hcon;
6868 if (hcon->state != BT_CONNECTED) {
6869 BT_DBG("queueing pending rx skb");
6870 skb_queue_tail(&conn->pending_rx, skb);
6874 skb_pull(skb, L2CAP_HDR_SIZE);
6875 cid = __le16_to_cpu(lh->cid);
6876 len = __le16_to_cpu(lh->len);
6878 if (len != skb->len) {
6883 /* Since we can't actively block incoming LE connections we must
6884 * at least ensure that we ignore incoming data from them.
6886 if (hcon->type == LE_LINK &&
6887 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6888 bdaddr_type(hcon, hcon->dst_type))) {
6893 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6896 case L2CAP_CID_SIGNALING:
6897 l2cap_sig_channel(conn, skb);
6900 case L2CAP_CID_CONN_LESS:
6901 psm = get_unaligned((__le16 *) skb->data);
6902 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6903 l2cap_conless_channel(conn, psm, skb);
6906 case L2CAP_CID_LE_SIGNALING:
6907 l2cap_le_sig_channel(conn, skb);
6911 l2cap_data_channel(conn, cid, skb);
6916 static void process_pending_rx(struct work_struct *work)
6918 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6920 struct sk_buff *skb;
6924 while ((skb = skb_dequeue(&conn->pending_rx)))
6925 l2cap_recv_frame(conn, skb);
6928 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6930 struct l2cap_conn *conn = hcon->l2cap_data;
6931 struct hci_chan *hchan;
6936 hchan = hci_chan_create(hcon);
6940 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6942 hci_chan_del(hchan);
6946 kref_init(&conn->ref);
6947 hcon->l2cap_data = conn;
6948 conn->hcon = hci_conn_get(hcon);
6949 conn->hchan = hchan;
6951 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6953 switch (hcon->type) {
6955 if (hcon->hdev->le_mtu) {
6956 conn->mtu = hcon->hdev->le_mtu;
6961 conn->mtu = hcon->hdev->acl_mtu;
6965 conn->feat_mask = 0;
6967 if (hcon->type == ACL_LINK)
6968 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
6969 &hcon->hdev->dev_flags);
6971 mutex_init(&conn->ident_lock);
6972 mutex_init(&conn->chan_lock);
6974 INIT_LIST_HEAD(&conn->chan_l);
6975 INIT_LIST_HEAD(&conn->users);
6977 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6979 skb_queue_head_init(&conn->pending_rx);
6980 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6981 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
6983 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6988 static bool is_valid_psm(u16 psm, u8 dst_type) {
6992 if (bdaddr_type_is_le(dst_type))
6993 return (psm <= 0x00ff);
6995 /* PSM must be odd and lsb of upper byte must be 0 */
6996 return ((psm & 0x0101) == 0x0001);
6999 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7000 bdaddr_t *dst, u8 dst_type)
7002 struct l2cap_conn *conn;
7003 struct hci_conn *hcon;
7004 struct hci_dev *hdev;
7007 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7008 dst_type, __le16_to_cpu(psm));
7010 hdev = hci_get_route(dst, &chan->src);
7012 return -EHOSTUNREACH;
7016 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7017 chan->chan_type != L2CAP_CHAN_RAW) {
7022 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7027 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7032 switch (chan->mode) {
7033 case L2CAP_MODE_BASIC:
7035 case L2CAP_MODE_LE_FLOWCTL:
7036 l2cap_le_flowctl_init(chan);
7038 case L2CAP_MODE_ERTM:
7039 case L2CAP_MODE_STREAMING:
7048 switch (chan->state) {
7052 /* Already connecting */
7057 /* Already connected */
7071 /* Set destination address and psm */
7072 bacpy(&chan->dst, dst);
7073 chan->dst_type = dst_type;
7078 if (bdaddr_type_is_le(dst_type)) {
7081 /* Convert from L2CAP channel address type to HCI address type
7083 if (dst_type == BDADDR_LE_PUBLIC)
7084 dst_type = ADDR_LE_DEV_PUBLIC;
7086 dst_type = ADDR_LE_DEV_RANDOM;
7088 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7089 role = HCI_ROLE_SLAVE;
7091 role = HCI_ROLE_MASTER;
7093 hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
7094 HCI_LE_CONN_TIMEOUT, role);
7096 u8 auth_type = l2cap_get_auth_type(chan);
7097 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7101 err = PTR_ERR(hcon);
7105 conn = l2cap_conn_add(hcon);
7107 hci_conn_drop(hcon);
7112 mutex_lock(&conn->chan_lock);
7113 l2cap_chan_lock(chan);
7115 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7116 hci_conn_drop(hcon);
7121 /* Update source addr of the socket */
7122 bacpy(&chan->src, &hcon->src);
7123 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7125 __l2cap_chan_add(conn, chan);
7127 /* l2cap_chan_add takes its own ref so we can drop this one */
7128 hci_conn_drop(hcon);
7130 l2cap_state_change(chan, BT_CONNECT);
7131 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7133 /* Release chan->sport so that it can be reused by other
7134 * sockets (as it's only used for listening sockets).
7136 write_lock(&chan_list_lock);
7138 write_unlock(&chan_list_lock);
7140 if (hcon->state == BT_CONNECTED) {
7141 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7142 __clear_chan_timer(chan);
7143 if (l2cap_chan_check_security(chan, true))
7144 l2cap_state_change(chan, BT_CONNECTED);
7146 l2cap_do_start(chan);
7152 l2cap_chan_unlock(chan);
7153 mutex_unlock(&conn->chan_lock);
7155 hci_dev_unlock(hdev);
7159 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7161 /* ---- L2CAP interface with lower layer (HCI) ---- */
7163 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7165 int exact = 0, lm1 = 0, lm2 = 0;
7166 struct l2cap_chan *c;
7168 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7170 /* Find listening sockets and check their link_mode */
7171 read_lock(&chan_list_lock);
7172 list_for_each_entry(c, &chan_list, global_l) {
7173 if (c->state != BT_LISTEN)
7176 if (!bacmp(&c->src, &hdev->bdaddr)) {
7177 lm1 |= HCI_LM_ACCEPT;
7178 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7179 lm1 |= HCI_LM_MASTER;
7181 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7182 lm2 |= HCI_LM_ACCEPT;
7183 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7184 lm2 |= HCI_LM_MASTER;
7187 read_unlock(&chan_list_lock);
7189 return exact ? lm1 : lm2;
7192 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7193 * from an existing channel in the list or from the beginning of the
7194 * global list (by passing NULL as first parameter).
7196 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7197 bdaddr_t *src, u8 link_type)
7199 read_lock(&chan_list_lock);
7202 c = list_next_entry(c, global_l);
7204 c = list_entry(chan_list.next, typeof(*c), global_l);
7206 list_for_each_entry_from(c, &chan_list, global_l) {
7207 if (c->chan_type != L2CAP_CHAN_FIXED)
7209 if (c->state != BT_LISTEN)
7211 if (bacmp(&c->src, src) && bacmp(&c->src, BDADDR_ANY))
7213 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
7215 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
7219 read_unlock(&chan_list_lock);
7223 read_unlock(&chan_list_lock);
7228 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7230 struct hci_dev *hdev = hcon->hdev;
7231 struct l2cap_conn *conn;
7232 struct l2cap_chan *pchan;
7235 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7238 l2cap_conn_del(hcon, bt_to_errno(status));
7242 conn = l2cap_conn_add(hcon);
7246 dst_type = bdaddr_type(hcon, hcon->dst_type);
7248 /* If device is blocked, do not create channels for it */
7249 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7252 /* Find fixed channels and notify them of the new connection. We
7253 * use multiple individual lookups, continuing each time where
7254 * we left off, because the list lock would prevent calling the
7255 * potentially sleeping l2cap_chan_lock() function.
7257 pchan = l2cap_global_fixed_chan(NULL, &hdev->bdaddr, hcon->type);
7259 struct l2cap_chan *chan, *next;
7261 /* Client fixed channels should override server ones */
7262 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7265 l2cap_chan_lock(pchan);
7266 chan = pchan->ops->new_connection(pchan);
7268 bacpy(&chan->src, &hcon->src);
7269 bacpy(&chan->dst, &hcon->dst);
7270 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7271 chan->dst_type = dst_type;
7273 __l2cap_chan_add(conn, chan);
7276 l2cap_chan_unlock(pchan);
7278 next = l2cap_global_fixed_chan(pchan, &hdev->bdaddr,
7280 l2cap_chan_put(pchan);
7284 l2cap_conn_ready(conn);
7287 int l2cap_disconn_ind(struct hci_conn *hcon)
7289 struct l2cap_conn *conn = hcon->l2cap_data;
7291 BT_DBG("hcon %p", hcon);
7294 return HCI_ERROR_REMOTE_USER_TERM;
7295 return conn->disc_reason;
7298 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7300 BT_DBG("hcon %p reason %d", hcon, reason);
7302 l2cap_conn_del(hcon, bt_to_errno(reason));
7305 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7307 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7310 if (encrypt == 0x00) {
7311 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7312 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7313 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7314 chan->sec_level == BT_SECURITY_FIPS)
7315 l2cap_chan_close(chan, ECONNREFUSED);
7317 if (chan->sec_level == BT_SECURITY_MEDIUM)
7318 __clear_chan_timer(chan);
7322 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7324 struct l2cap_conn *conn = hcon->l2cap_data;
7325 struct l2cap_chan *chan;
7330 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7332 mutex_lock(&conn->chan_lock);
7334 list_for_each_entry(chan, &conn->chan_l, list) {
7335 l2cap_chan_lock(chan);
7337 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7338 state_to_string(chan->state));
7340 if (chan->scid == L2CAP_CID_A2MP) {
7341 l2cap_chan_unlock(chan);
7345 if (!status && encrypt)
7346 chan->sec_level = hcon->sec_level;
7348 if (!__l2cap_no_conn_pending(chan)) {
7349 l2cap_chan_unlock(chan);
7353 if (!status && (chan->state == BT_CONNECTED ||
7354 chan->state == BT_CONFIG)) {
7355 chan->ops->resume(chan);
7356 l2cap_check_encryption(chan, encrypt);
7357 l2cap_chan_unlock(chan);
7361 if (chan->state == BT_CONNECT) {
7363 l2cap_start_connection(chan);
7365 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7366 } else if (chan->state == BT_CONNECT2 &&
7367 chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7368 struct l2cap_conn_rsp rsp;
7372 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7373 res = L2CAP_CR_PEND;
7374 stat = L2CAP_CS_AUTHOR_PEND;
7375 chan->ops->defer(chan);
7377 l2cap_state_change(chan, BT_CONFIG);
7378 res = L2CAP_CR_SUCCESS;
7379 stat = L2CAP_CS_NO_INFO;
7382 l2cap_state_change(chan, BT_DISCONN);
7383 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7384 res = L2CAP_CR_SEC_BLOCK;
7385 stat = L2CAP_CS_NO_INFO;
7388 rsp.scid = cpu_to_le16(chan->dcid);
7389 rsp.dcid = cpu_to_le16(chan->scid);
7390 rsp.result = cpu_to_le16(res);
7391 rsp.status = cpu_to_le16(stat);
7392 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7395 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7396 res == L2CAP_CR_SUCCESS) {
7398 set_bit(CONF_REQ_SENT, &chan->conf_state);
7399 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7401 l2cap_build_conf_req(chan, buf),
7403 chan->num_conf_req++;
7407 l2cap_chan_unlock(chan);
7410 mutex_unlock(&conn->chan_lock);
7415 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7417 struct l2cap_conn *conn = hcon->l2cap_data;
7418 struct l2cap_hdr *hdr;
7421 /* For AMP controller do not create l2cap conn */
7422 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7426 conn = l2cap_conn_add(hcon);
7431 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7435 case ACL_START_NO_FLUSH:
7438 BT_ERR("Unexpected start frame (len %d)", skb->len);
7439 kfree_skb(conn->rx_skb);
7440 conn->rx_skb = NULL;
7442 l2cap_conn_unreliable(conn, ECOMM);
7445 /* Start fragment always begin with Basic L2CAP header */
7446 if (skb->len < L2CAP_HDR_SIZE) {
7447 BT_ERR("Frame is too short (len %d)", skb->len);
7448 l2cap_conn_unreliable(conn, ECOMM);
7452 hdr = (struct l2cap_hdr *) skb->data;
7453 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7455 if (len == skb->len) {
7456 /* Complete frame received */
7457 l2cap_recv_frame(conn, skb);
7461 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7463 if (skb->len > len) {
7464 BT_ERR("Frame is too long (len %d, expected len %d)",
7466 l2cap_conn_unreliable(conn, ECOMM);
7470 /* Allocate skb for the complete frame (with header) */
7471 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7475 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7477 conn->rx_len = len - skb->len;
7481 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7483 if (!conn->rx_len) {
7484 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7485 l2cap_conn_unreliable(conn, ECOMM);
7489 if (skb->len > conn->rx_len) {
7490 BT_ERR("Fragment is too long (len %d, expected %d)",
7491 skb->len, conn->rx_len);
7492 kfree_skb(conn->rx_skb);
7493 conn->rx_skb = NULL;
7495 l2cap_conn_unreliable(conn, ECOMM);
7499 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7501 conn->rx_len -= skb->len;
7503 if (!conn->rx_len) {
7504 /* Complete frame received. l2cap_recv_frame
7505 * takes ownership of the skb so set the global
7506 * rx_skb pointer to NULL first.
7508 struct sk_buff *rx_skb = conn->rx_skb;
7509 conn->rx_skb = NULL;
7510 l2cap_recv_frame(conn, rx_skb);
7520 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7522 struct l2cap_chan *c;
7524 read_lock(&chan_list_lock);
7526 list_for_each_entry(c, &chan_list, global_l) {
7527 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7529 c->state, __le16_to_cpu(c->psm),
7530 c->scid, c->dcid, c->imtu, c->omtu,
7531 c->sec_level, c->mode);
7534 read_unlock(&chan_list_lock);
7539 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7541 return single_open(file, l2cap_debugfs_show, inode->i_private);
7544 static const struct file_operations l2cap_debugfs_fops = {
7545 .open = l2cap_debugfs_open,
7547 .llseek = seq_lseek,
7548 .release = single_release,
7551 static struct dentry *l2cap_debugfs;
7553 int __init l2cap_init(void)
7557 err = l2cap_init_sockets();
7561 if (IS_ERR_OR_NULL(bt_debugfs))
7564 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7565 NULL, &l2cap_debugfs_fops);
7567 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7569 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7575 void l2cap_exit(void)
7577 debugfs_remove(l2cap_debugfs);
7578 l2cap_cleanup_sockets();
7581 module_param(disable_ertm, bool, 0644);
7582 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");