2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
46 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
47 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
49 static LIST_HEAD(chan_list);
50 static DEFINE_RWLOCK(chan_list_lock);
52 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
53 u8 code, u8 ident, u16 dlen, void *data);
54 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
57 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
60 struct sk_buff_head *skbs, u8 event);
62 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
64 if (hcon->type == LE_LINK) {
65 if (type == ADDR_LE_DEV_PUBLIC)
66 return BDADDR_LE_PUBLIC;
68 return BDADDR_LE_RANDOM;
74 /* ---- L2CAP channels ---- */
76 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
81 list_for_each_entry(c, &conn->chan_l, list) {
88 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
93 list_for_each_entry(c, &conn->chan_l, list) {
100 /* Find channel with given SCID.
101 * Returns locked channel. */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
105 struct l2cap_chan *c;
107 mutex_lock(&conn->chan_lock);
108 c = __l2cap_get_chan_by_scid(conn, cid);
111 mutex_unlock(&conn->chan_lock);
116 /* Find channel with given DCID.
117 * Returns locked channel.
119 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
122 struct l2cap_chan *c;
124 mutex_lock(&conn->chan_lock);
125 c = __l2cap_get_chan_by_dcid(conn, cid);
128 mutex_unlock(&conn->chan_lock);
133 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
136 struct l2cap_chan *c;
138 list_for_each_entry(c, &conn->chan_l, list) {
139 if (c->ident == ident)
145 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
148 struct l2cap_chan *c;
150 mutex_lock(&conn->chan_lock);
151 c = __l2cap_get_chan_by_ident(conn, ident);
154 mutex_unlock(&conn->chan_lock);
159 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
161 struct l2cap_chan *c;
163 list_for_each_entry(c, &chan_list, global_l) {
164 if (c->sport == psm && !bacmp(&c->src, src))
170 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
174 write_lock(&chan_list_lock);
176 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
189 for (p = 0x1001; p < 0x1100; p += 2)
190 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
191 chan->psm = cpu_to_le16(p);
192 chan->sport = cpu_to_le16(p);
199 write_unlock(&chan_list_lock);
203 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
205 write_lock(&chan_list_lock);
209 write_unlock(&chan_list_lock);
214 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
216 u16 cid = L2CAP_CID_DYN_START;
218 for (; cid < L2CAP_CID_DYN_END; cid++) {
219 if (!__l2cap_get_chan_by_scid(conn, cid))
226 static void l2cap_state_change(struct l2cap_chan *chan, int state)
228 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
229 state_to_string(state));
232 chan->ops->state_change(chan, state, 0);
235 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
239 chan->ops->state_change(chan, chan->state, err);
242 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
244 chan->ops->state_change(chan, chan->state, err);
247 static void __set_retrans_timer(struct l2cap_chan *chan)
249 if (!delayed_work_pending(&chan->monitor_timer) &&
250 chan->retrans_timeout) {
251 l2cap_set_timer(chan, &chan->retrans_timer,
252 msecs_to_jiffies(chan->retrans_timeout));
256 static void __set_monitor_timer(struct l2cap_chan *chan)
258 __clear_retrans_timer(chan);
259 if (chan->monitor_timeout) {
260 l2cap_set_timer(chan, &chan->monitor_timer,
261 msecs_to_jiffies(chan->monitor_timeout));
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
270 skb_queue_walk(head, skb) {
271 if (bt_cb(skb)->control.txseq == seq)
278 /* ---- L2CAP sequence number lists ---- */
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
291 size_t alloc_size, i;
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
297 alloc_size = roundup_pow_of_two(size);
299 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
303 seq_list->mask = alloc_size - 1;
304 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 for (i = 0; i < alloc_size; i++)
307 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
314 kfree(seq_list->list);
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
320 /* Constant-time check for list membership */
321 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
326 u16 mask = seq_list->mask;
328 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR;
331 } else if (seq_list->head == seq) {
332 /* Head can be removed in constant time */
333 seq_list->head = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
336 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 /* Walk the list to find the sequence number */
342 u16 prev = seq_list->head;
343 while (seq_list->list[prev & mask] != seq) {
344 prev = seq_list->list[prev & mask];
345 if (prev == L2CAP_SEQ_LIST_TAIL)
346 return L2CAP_SEQ_LIST_CLEAR;
349 /* Unlink the number from the list and clear it */
350 seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 if (seq_list->tail == seq)
353 seq_list->tail = prev;
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list, seq_list->head);
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
380 u16 mask = seq_list->mask;
382 /* All appends happen in constant time */
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
390 seq_list->list[seq_list->tail & mask] = seq;
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
396 static void l2cap_chan_timeout(struct work_struct *work)
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
400 struct l2cap_conn *conn = chan->conn;
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
416 l2cap_chan_close(chan, reason);
418 l2cap_chan_unlock(chan);
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
423 l2cap_chan_put(chan);
426 struct l2cap_chan *l2cap_chan_create(void)
428 struct l2cap_chan *chan;
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
434 mutex_init(&chan->lock);
436 write_lock(&chan_list_lock);
437 list_add(&chan->global_l, &chan_list);
438 write_unlock(&chan_list_lock);
440 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
442 chan->state = BT_OPEN;
444 kref_init(&chan->kref);
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
449 BT_DBG("chan %p", chan);
454 static void l2cap_chan_destroy(struct kref *kref)
456 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
458 BT_DBG("chan %p", chan);
460 write_lock(&chan_list_lock);
461 list_del(&chan->global_l);
462 write_unlock(&chan_list_lock);
467 void l2cap_chan_hold(struct l2cap_chan *c)
469 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
474 void l2cap_chan_put(struct l2cap_chan *c)
476 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
478 kref_put(&c->kref, l2cap_chan_destroy);
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
483 chan->fcs = L2CAP_FCS_CRC16;
484 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 chan->sec_level = BT_SECURITY_LOW;
490 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
493 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
495 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
496 __le16_to_cpu(chan->psm), chan->dcid);
498 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
502 switch (chan->chan_type) {
503 case L2CAP_CHAN_CONN_ORIENTED:
504 if (conn->hcon->type == LE_LINK) {
506 chan->omtu = L2CAP_DEFAULT_MTU;
507 if (chan->dcid == L2CAP_CID_ATT)
508 chan->scid = L2CAP_CID_ATT;
510 chan->scid = l2cap_alloc_cid(conn);
512 /* Alloc CID for connection-oriented socket */
513 chan->scid = l2cap_alloc_cid(conn);
514 chan->omtu = L2CAP_DEFAULT_MTU;
518 case L2CAP_CHAN_CONN_LESS:
519 /* Connectionless socket */
520 chan->scid = L2CAP_CID_CONN_LESS;
521 chan->dcid = L2CAP_CID_CONN_LESS;
522 chan->omtu = L2CAP_DEFAULT_MTU;
525 case L2CAP_CHAN_CONN_FIX_A2MP:
526 chan->scid = L2CAP_CID_A2MP;
527 chan->dcid = L2CAP_CID_A2MP;
528 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
529 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
533 /* Raw socket can send/recv signalling messages only */
534 chan->scid = L2CAP_CID_SIGNALING;
535 chan->dcid = L2CAP_CID_SIGNALING;
536 chan->omtu = L2CAP_DEFAULT_MTU;
539 chan->local_id = L2CAP_BESTEFFORT_ID;
540 chan->local_stype = L2CAP_SERV_BESTEFFORT;
541 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
542 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
543 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
544 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
546 l2cap_chan_hold(chan);
548 hci_conn_hold(conn->hcon);
550 list_add(&chan->list, &conn->chan_l);
553 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
555 mutex_lock(&conn->chan_lock);
556 __l2cap_chan_add(conn, chan);
557 mutex_unlock(&conn->chan_lock);
560 void l2cap_chan_del(struct l2cap_chan *chan, int err)
562 struct l2cap_conn *conn = chan->conn;
564 __clear_chan_timer(chan);
566 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
569 struct amp_mgr *mgr = conn->hcon->amp_mgr;
570 /* Delete from channel list */
571 list_del(&chan->list);
573 l2cap_chan_put(chan);
577 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
578 hci_conn_drop(conn->hcon);
580 if (mgr && mgr->bredr_chan == chan)
581 mgr->bredr_chan = NULL;
584 if (chan->hs_hchan) {
585 struct hci_chan *hs_hchan = chan->hs_hchan;
587 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
588 amp_disconnect_logical_link(hs_hchan);
591 chan->ops->teardown(chan, err);
593 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
597 case L2CAP_MODE_BASIC:
600 case L2CAP_MODE_ERTM:
601 __clear_retrans_timer(chan);
602 __clear_monitor_timer(chan);
603 __clear_ack_timer(chan);
605 skb_queue_purge(&chan->srej_q);
607 l2cap_seq_list_free(&chan->srej_list);
608 l2cap_seq_list_free(&chan->retrans_list);
612 case L2CAP_MODE_STREAMING:
613 skb_queue_purge(&chan->tx_q);
620 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
622 struct l2cap_conn *conn = chan->conn;
624 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
626 switch (chan->state) {
628 chan->ops->teardown(chan, 0);
633 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
634 conn->hcon->type == ACL_LINK) {
635 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
636 l2cap_send_disconn_req(chan, reason);
638 l2cap_chan_del(chan, reason);
642 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
643 conn->hcon->type == ACL_LINK) {
644 struct l2cap_conn_rsp rsp;
647 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
648 result = L2CAP_CR_SEC_BLOCK;
650 result = L2CAP_CR_BAD_PSM;
652 l2cap_state_change(chan, BT_DISCONN);
654 rsp.scid = cpu_to_le16(chan->dcid);
655 rsp.dcid = cpu_to_le16(chan->scid);
656 rsp.result = cpu_to_le16(result);
657 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
658 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
662 l2cap_chan_del(chan, reason);
667 l2cap_chan_del(chan, reason);
671 chan->ops->teardown(chan, 0);
676 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
678 switch (chan->chan_type) {
680 switch (chan->sec_level) {
681 case BT_SECURITY_HIGH:
682 return HCI_AT_DEDICATED_BONDING_MITM;
683 case BT_SECURITY_MEDIUM:
684 return HCI_AT_DEDICATED_BONDING;
686 return HCI_AT_NO_BONDING;
689 case L2CAP_CHAN_CONN_LESS:
690 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
691 if (chan->sec_level == BT_SECURITY_LOW)
692 chan->sec_level = BT_SECURITY_SDP;
694 if (chan->sec_level == BT_SECURITY_HIGH)
695 return HCI_AT_NO_BONDING_MITM;
697 return HCI_AT_NO_BONDING;
699 case L2CAP_CHAN_CONN_ORIENTED:
700 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
701 if (chan->sec_level == BT_SECURITY_LOW)
702 chan->sec_level = BT_SECURITY_SDP;
704 if (chan->sec_level == BT_SECURITY_HIGH)
705 return HCI_AT_NO_BONDING_MITM;
707 return HCI_AT_NO_BONDING;
711 switch (chan->sec_level) {
712 case BT_SECURITY_HIGH:
713 return HCI_AT_GENERAL_BONDING_MITM;
714 case BT_SECURITY_MEDIUM:
715 return HCI_AT_GENERAL_BONDING;
717 return HCI_AT_NO_BONDING;
723 /* Service level security */
724 int l2cap_chan_check_security(struct l2cap_chan *chan)
726 struct l2cap_conn *conn = chan->conn;
729 if (conn->hcon->type == LE_LINK)
730 return smp_conn_security(conn->hcon, chan->sec_level);
732 auth_type = l2cap_get_auth_type(chan);
734 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
737 static u8 l2cap_get_ident(struct l2cap_conn *conn)
741 /* Get next available identificator.
742 * 1 - 128 are used by kernel.
743 * 129 - 199 are reserved.
744 * 200 - 254 are used by utilities like l2ping, etc.
747 spin_lock(&conn->lock);
749 if (++conn->tx_ident > 128)
754 spin_unlock(&conn->lock);
759 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
762 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
765 BT_DBG("code 0x%2.2x", code);
770 if (lmp_no_flush_capable(conn->hcon->hdev))
771 flags = ACL_START_NO_FLUSH;
775 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
776 skb->priority = HCI_PRIO_MAX;
778 hci_send_acl(conn->hchan, skb, flags);
781 static bool __chan_is_moving(struct l2cap_chan *chan)
783 return chan->move_state != L2CAP_MOVE_STABLE &&
784 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
787 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
789 struct hci_conn *hcon = chan->conn->hcon;
792 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
795 if (chan->hs_hcon && !__chan_is_moving(chan)) {
797 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
804 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
805 lmp_no_flush_capable(hcon->hdev))
806 flags = ACL_START_NO_FLUSH;
810 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
811 hci_send_acl(chan->conn->hchan, skb, flags);
814 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
816 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
817 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
819 if (enh & L2CAP_CTRL_FRAME_TYPE) {
822 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
823 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
830 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
831 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
838 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
840 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
841 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
843 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
846 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
847 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
854 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
855 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
862 static inline void __unpack_control(struct l2cap_chan *chan,
865 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
866 __unpack_extended_control(get_unaligned_le32(skb->data),
867 &bt_cb(skb)->control);
868 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
870 __unpack_enhanced_control(get_unaligned_le16(skb->data),
871 &bt_cb(skb)->control);
872 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
876 static u32 __pack_extended_control(struct l2cap_ctrl *control)
880 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
881 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
883 if (control->sframe) {
884 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
885 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
886 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
888 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
889 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
895 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
899 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
900 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
902 if (control->sframe) {
903 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
904 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
905 packed |= L2CAP_CTRL_FRAME_TYPE;
907 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
908 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
914 static inline void __pack_control(struct l2cap_chan *chan,
915 struct l2cap_ctrl *control,
918 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
919 put_unaligned_le32(__pack_extended_control(control),
920 skb->data + L2CAP_HDR_SIZE);
922 put_unaligned_le16(__pack_enhanced_control(control),
923 skb->data + L2CAP_HDR_SIZE);
927 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
929 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
930 return L2CAP_EXT_HDR_SIZE;
932 return L2CAP_ENH_HDR_SIZE;
935 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
939 struct l2cap_hdr *lh;
940 int hlen = __ertm_hdr_size(chan);
942 if (chan->fcs == L2CAP_FCS_CRC16)
943 hlen += L2CAP_FCS_SIZE;
945 skb = bt_skb_alloc(hlen, GFP_KERNEL);
948 return ERR_PTR(-ENOMEM);
950 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
951 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
952 lh->cid = cpu_to_le16(chan->dcid);
954 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
955 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
957 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
959 if (chan->fcs == L2CAP_FCS_CRC16) {
960 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
961 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
964 skb->priority = HCI_PRIO_MAX;
968 static void l2cap_send_sframe(struct l2cap_chan *chan,
969 struct l2cap_ctrl *control)
974 BT_DBG("chan %p, control %p", chan, control);
976 if (!control->sframe)
979 if (__chan_is_moving(chan))
982 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
986 if (control->super == L2CAP_SUPER_RR)
987 clear_bit(CONN_RNR_SENT, &chan->conn_state);
988 else if (control->super == L2CAP_SUPER_RNR)
989 set_bit(CONN_RNR_SENT, &chan->conn_state);
991 if (control->super != L2CAP_SUPER_SREJ) {
992 chan->last_acked_seq = control->reqseq;
993 __clear_ack_timer(chan);
996 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
997 control->final, control->poll, control->super);
999 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1000 control_field = __pack_extended_control(control);
1002 control_field = __pack_enhanced_control(control);
1004 skb = l2cap_create_sframe_pdu(chan, control_field);
1006 l2cap_do_send(chan, skb);
1009 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1011 struct l2cap_ctrl control;
1013 BT_DBG("chan %p, poll %d", chan, poll);
1015 memset(&control, 0, sizeof(control));
1017 control.poll = poll;
1019 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1020 control.super = L2CAP_SUPER_RNR;
1022 control.super = L2CAP_SUPER_RR;
1024 control.reqseq = chan->buffer_seq;
1025 l2cap_send_sframe(chan, &control);
1028 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1030 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1033 static bool __amp_capable(struct l2cap_chan *chan)
1035 struct l2cap_conn *conn = chan->conn;
1036 struct hci_dev *hdev;
1037 bool amp_available = false;
1039 if (!conn->hs_enabled)
1042 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1045 read_lock(&hci_dev_list_lock);
1046 list_for_each_entry(hdev, &hci_dev_list, list) {
1047 if (hdev->amp_type != AMP_TYPE_BREDR &&
1048 test_bit(HCI_UP, &hdev->flags)) {
1049 amp_available = true;
1053 read_unlock(&hci_dev_list_lock);
1055 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1056 return amp_available;
1061 static bool l2cap_check_efs(struct l2cap_chan *chan)
1063 /* Check EFS parameters */
1067 void l2cap_send_conn_req(struct l2cap_chan *chan)
1069 struct l2cap_conn *conn = chan->conn;
1070 struct l2cap_conn_req req;
1072 req.scid = cpu_to_le16(chan->scid);
1073 req.psm = chan->psm;
1075 chan->ident = l2cap_get_ident(conn);
1077 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1079 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1082 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1084 struct l2cap_create_chan_req req;
1085 req.scid = cpu_to_le16(chan->scid);
1086 req.psm = chan->psm;
1087 req.amp_id = amp_id;
1089 chan->ident = l2cap_get_ident(chan->conn);
1091 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1095 static void l2cap_move_setup(struct l2cap_chan *chan)
1097 struct sk_buff *skb;
1099 BT_DBG("chan %p", chan);
1101 if (chan->mode != L2CAP_MODE_ERTM)
1104 __clear_retrans_timer(chan);
1105 __clear_monitor_timer(chan);
1106 __clear_ack_timer(chan);
1108 chan->retry_count = 0;
1109 skb_queue_walk(&chan->tx_q, skb) {
1110 if (bt_cb(skb)->control.retries)
1111 bt_cb(skb)->control.retries = 1;
1116 chan->expected_tx_seq = chan->buffer_seq;
1118 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1119 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1120 l2cap_seq_list_clear(&chan->retrans_list);
1121 l2cap_seq_list_clear(&chan->srej_list);
1122 skb_queue_purge(&chan->srej_q);
1124 chan->tx_state = L2CAP_TX_STATE_XMIT;
1125 chan->rx_state = L2CAP_RX_STATE_MOVE;
1127 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1130 static void l2cap_move_done(struct l2cap_chan *chan)
1132 u8 move_role = chan->move_role;
1133 BT_DBG("chan %p", chan);
1135 chan->move_state = L2CAP_MOVE_STABLE;
1136 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1138 if (chan->mode != L2CAP_MODE_ERTM)
1141 switch (move_role) {
1142 case L2CAP_MOVE_ROLE_INITIATOR:
1143 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1144 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1146 case L2CAP_MOVE_ROLE_RESPONDER:
1147 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1152 static void l2cap_chan_ready(struct l2cap_chan *chan)
1154 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1155 chan->conf_state = 0;
1156 __clear_chan_timer(chan);
1158 chan->state = BT_CONNECTED;
1160 chan->ops->ready(chan);
1163 static void l2cap_le_connect(struct l2cap_chan *chan)
1165 struct l2cap_conn *conn = chan->conn;
1166 struct l2cap_le_conn_req req;
1168 req.psm = chan->psm;
1169 req.scid = cpu_to_le16(chan->scid);
1170 req.mtu = cpu_to_le16(chan->imtu);
1171 req.mps = __constant_cpu_to_le16(L2CAP_LE_DEFAULT_MPS);
1172 req.credits = __constant_cpu_to_le16(L2CAP_LE_MAX_CREDITS);
1174 chan->ident = l2cap_get_ident(conn);
1176 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1180 static void l2cap_le_start(struct l2cap_chan *chan)
1182 struct l2cap_conn *conn = chan->conn;
1184 if (!smp_conn_security(conn->hcon, chan->sec_level))
1188 l2cap_chan_ready(chan);
1192 if (chan->state == BT_CONNECT)
1193 l2cap_le_connect(chan);
1196 static void l2cap_start_connection(struct l2cap_chan *chan)
1198 if (__amp_capable(chan)) {
1199 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1200 a2mp_discover_amp(chan);
1201 } else if (chan->conn->hcon->type == LE_LINK) {
1202 l2cap_le_start(chan);
1204 l2cap_send_conn_req(chan);
1208 static void l2cap_do_start(struct l2cap_chan *chan)
1210 struct l2cap_conn *conn = chan->conn;
1212 if (conn->hcon->type == LE_LINK) {
1213 l2cap_le_start(chan);
1217 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1218 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1221 if (l2cap_chan_check_security(chan) &&
1222 __l2cap_no_conn_pending(chan)) {
1223 l2cap_start_connection(chan);
1226 struct l2cap_info_req req;
1227 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1229 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1230 conn->info_ident = l2cap_get_ident(conn);
1232 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1234 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1239 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1241 u32 local_feat_mask = l2cap_feat_mask;
1243 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1246 case L2CAP_MODE_ERTM:
1247 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1248 case L2CAP_MODE_STREAMING:
1249 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1255 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1257 struct l2cap_conn *conn = chan->conn;
1258 struct l2cap_disconn_req req;
1263 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1264 __clear_retrans_timer(chan);
1265 __clear_monitor_timer(chan);
1266 __clear_ack_timer(chan);
1269 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1270 l2cap_state_change(chan, BT_DISCONN);
1274 req.dcid = cpu_to_le16(chan->dcid);
1275 req.scid = cpu_to_le16(chan->scid);
1276 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1279 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1282 /* ---- L2CAP connections ---- */
1283 static void l2cap_conn_start(struct l2cap_conn *conn)
1285 struct l2cap_chan *chan, *tmp;
1287 BT_DBG("conn %p", conn);
1289 mutex_lock(&conn->chan_lock);
1291 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1292 l2cap_chan_lock(chan);
1294 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1295 l2cap_chan_unlock(chan);
1299 if (chan->state == BT_CONNECT) {
1300 if (!l2cap_chan_check_security(chan) ||
1301 !__l2cap_no_conn_pending(chan)) {
1302 l2cap_chan_unlock(chan);
1306 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1307 && test_bit(CONF_STATE2_DEVICE,
1308 &chan->conf_state)) {
1309 l2cap_chan_close(chan, ECONNRESET);
1310 l2cap_chan_unlock(chan);
1314 l2cap_start_connection(chan);
1316 } else if (chan->state == BT_CONNECT2) {
1317 struct l2cap_conn_rsp rsp;
1319 rsp.scid = cpu_to_le16(chan->dcid);
1320 rsp.dcid = cpu_to_le16(chan->scid);
1322 if (l2cap_chan_check_security(chan)) {
1323 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1324 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1325 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1326 chan->ops->defer(chan);
1329 l2cap_state_change(chan, BT_CONFIG);
1330 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1331 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1334 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1335 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1338 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1341 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1342 rsp.result != L2CAP_CR_SUCCESS) {
1343 l2cap_chan_unlock(chan);
1347 set_bit(CONF_REQ_SENT, &chan->conf_state);
1348 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1349 l2cap_build_conf_req(chan, buf), buf);
1350 chan->num_conf_req++;
1353 l2cap_chan_unlock(chan);
1356 mutex_unlock(&conn->chan_lock);
1359 /* Find socket with cid and source/destination bdaddr.
1360 * Returns closest match, locked.
1362 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1366 struct l2cap_chan *c, *c1 = NULL;
1368 read_lock(&chan_list_lock);
1370 list_for_each_entry(c, &chan_list, global_l) {
1371 if (state && c->state != state)
1374 if (c->scid == cid) {
1375 int src_match, dst_match;
1376 int src_any, dst_any;
1379 src_match = !bacmp(&c->src, src);
1380 dst_match = !bacmp(&c->dst, dst);
1381 if (src_match && dst_match) {
1382 read_unlock(&chan_list_lock);
1387 src_any = !bacmp(&c->src, BDADDR_ANY);
1388 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1389 if ((src_match && dst_any) || (src_any && dst_match) ||
1390 (src_any && dst_any))
1395 read_unlock(&chan_list_lock);
1400 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1402 struct hci_conn *hcon = conn->hcon;
1403 struct l2cap_chan *chan, *pchan;
1408 /* Check if we have socket listening on cid */
1409 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1410 &hcon->src, &hcon->dst);
1414 /* Client ATT sockets should override the server one */
1415 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1418 dst_type = bdaddr_type(hcon, hcon->dst_type);
1420 /* If device is blocked, do not create a channel for it */
1421 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1424 l2cap_chan_lock(pchan);
1426 chan = pchan->ops->new_connection(pchan);
1430 chan->dcid = L2CAP_CID_ATT;
1432 bacpy(&chan->src, &hcon->src);
1433 bacpy(&chan->dst, &hcon->dst);
1434 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1435 chan->dst_type = dst_type;
1437 __l2cap_chan_add(conn, chan);
1440 l2cap_chan_unlock(pchan);
1443 static void l2cap_conn_ready(struct l2cap_conn *conn)
1445 struct l2cap_chan *chan;
1446 struct hci_conn *hcon = conn->hcon;
1448 BT_DBG("conn %p", conn);
1450 /* For outgoing pairing which doesn't necessarily have an
1451 * associated socket (e.g. mgmt_pair_device).
1453 if (hcon->out && hcon->type == LE_LINK)
1454 smp_conn_security(hcon, hcon->pending_sec_level);
1456 mutex_lock(&conn->chan_lock);
1458 if (hcon->type == LE_LINK)
1459 l2cap_le_conn_ready(conn);
1461 list_for_each_entry(chan, &conn->chan_l, list) {
1463 l2cap_chan_lock(chan);
1465 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1466 l2cap_chan_unlock(chan);
1470 if (hcon->type == LE_LINK) {
1471 l2cap_le_start(chan);
1472 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1473 l2cap_chan_ready(chan);
1475 } else if (chan->state == BT_CONNECT) {
1476 l2cap_do_start(chan);
1479 l2cap_chan_unlock(chan);
1482 mutex_unlock(&conn->chan_lock);
1485 /* Notify sockets that we cannot guaranty reliability anymore */
1486 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1488 struct l2cap_chan *chan;
1490 BT_DBG("conn %p", conn);
1492 mutex_lock(&conn->chan_lock);
1494 list_for_each_entry(chan, &conn->chan_l, list) {
1495 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1496 l2cap_chan_set_err(chan, err);
1499 mutex_unlock(&conn->chan_lock);
1502 static void l2cap_info_timeout(struct work_struct *work)
1504 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1507 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1508 conn->info_ident = 0;
1510 l2cap_conn_start(conn);
1515 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1516 * callback is called during registration. The ->remove callback is called
1517 * during unregistration.
1518 * An l2cap_user object can either be explicitly unregistered or when the
1519 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1520 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1521 * External modules must own a reference to the l2cap_conn object if they intend
1522 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1523 * any time if they don't.
1526 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1528 struct hci_dev *hdev = conn->hcon->hdev;
1531 /* We need to check whether l2cap_conn is registered. If it is not, we
1532 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1533 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1534 * relies on the parent hci_conn object to be locked. This itself relies
1535 * on the hci_dev object to be locked. So we must lock the hci device
1540 if (user->list.next || user->list.prev) {
1545 /* conn->hchan is NULL after l2cap_conn_del() was called */
1551 ret = user->probe(conn, user);
1555 list_add(&user->list, &conn->users);
1559 hci_dev_unlock(hdev);
1562 EXPORT_SYMBOL(l2cap_register_user);
1564 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1566 struct hci_dev *hdev = conn->hcon->hdev;
1570 if (!user->list.next || !user->list.prev)
1573 list_del(&user->list);
1574 user->list.next = NULL;
1575 user->list.prev = NULL;
1576 user->remove(conn, user);
1579 hci_dev_unlock(hdev);
1581 EXPORT_SYMBOL(l2cap_unregister_user);
1583 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1585 struct l2cap_user *user;
1587 while (!list_empty(&conn->users)) {
1588 user = list_first_entry(&conn->users, struct l2cap_user, list);
1589 list_del(&user->list);
1590 user->list.next = NULL;
1591 user->list.prev = NULL;
1592 user->remove(conn, user);
1596 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1598 struct l2cap_conn *conn = hcon->l2cap_data;
1599 struct l2cap_chan *chan, *l;
1604 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1606 kfree_skb(conn->rx_skb);
1608 l2cap_unregister_all_users(conn);
1610 mutex_lock(&conn->chan_lock);
1613 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1614 l2cap_chan_hold(chan);
1615 l2cap_chan_lock(chan);
1617 l2cap_chan_del(chan, err);
1619 l2cap_chan_unlock(chan);
1621 chan->ops->close(chan);
1622 l2cap_chan_put(chan);
1625 mutex_unlock(&conn->chan_lock);
1627 hci_chan_del(conn->hchan);
1629 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1630 cancel_delayed_work_sync(&conn->info_timer);
1632 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1633 cancel_delayed_work_sync(&conn->security_timer);
1634 smp_chan_destroy(conn);
1637 hcon->l2cap_data = NULL;
1639 l2cap_conn_put(conn);
1642 static void security_timeout(struct work_struct *work)
1644 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1645 security_timer.work);
1647 BT_DBG("conn %p", conn);
1649 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1650 smp_chan_destroy(conn);
1651 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1655 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1657 struct l2cap_conn *conn = hcon->l2cap_data;
1658 struct hci_chan *hchan;
1663 hchan = hci_chan_create(hcon);
1667 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1669 hci_chan_del(hchan);
1673 kref_init(&conn->ref);
1674 hcon->l2cap_data = conn;
1676 hci_conn_get(conn->hcon);
1677 conn->hchan = hchan;
1679 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1681 switch (hcon->type) {
1683 if (hcon->hdev->le_mtu) {
1684 conn->mtu = hcon->hdev->le_mtu;
1689 conn->mtu = hcon->hdev->acl_mtu;
1693 conn->feat_mask = 0;
1695 if (hcon->type == ACL_LINK)
1696 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1697 &hcon->hdev->dev_flags);
1699 spin_lock_init(&conn->lock);
1700 mutex_init(&conn->chan_lock);
1702 INIT_LIST_HEAD(&conn->chan_l);
1703 INIT_LIST_HEAD(&conn->users);
1705 if (hcon->type == LE_LINK)
1706 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1708 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1710 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1715 static void l2cap_conn_free(struct kref *ref)
1717 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1719 hci_conn_put(conn->hcon);
1723 void l2cap_conn_get(struct l2cap_conn *conn)
1725 kref_get(&conn->ref);
1727 EXPORT_SYMBOL(l2cap_conn_get);
1729 void l2cap_conn_put(struct l2cap_conn *conn)
1731 kref_put(&conn->ref, l2cap_conn_free);
1733 EXPORT_SYMBOL(l2cap_conn_put);
1735 /* ---- Socket interface ---- */
1737 /* Find socket with psm and source / destination bdaddr.
1738 * Returns closest match.
1740 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1745 struct l2cap_chan *c, *c1 = NULL;
1747 read_lock(&chan_list_lock);
1749 list_for_each_entry(c, &chan_list, global_l) {
1750 if (state && c->state != state)
1753 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1756 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1759 if (c->psm == psm) {
1760 int src_match, dst_match;
1761 int src_any, dst_any;
1764 src_match = !bacmp(&c->src, src);
1765 dst_match = !bacmp(&c->dst, dst);
1766 if (src_match && dst_match) {
1767 read_unlock(&chan_list_lock);
1772 src_any = !bacmp(&c->src, BDADDR_ANY);
1773 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1774 if ((src_match && dst_any) || (src_any && dst_match) ||
1775 (src_any && dst_any))
1780 read_unlock(&chan_list_lock);
1785 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1786 bdaddr_t *dst, u8 dst_type)
1788 struct l2cap_conn *conn;
1789 struct hci_conn *hcon;
1790 struct hci_dev *hdev;
1794 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
1795 dst_type, __le16_to_cpu(psm));
1797 hdev = hci_get_route(dst, &chan->src);
1799 return -EHOSTUNREACH;
1803 l2cap_chan_lock(chan);
1805 /* PSM must be odd and lsb of upper byte must be 0 */
1806 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1807 chan->chan_type != L2CAP_CHAN_RAW) {
1812 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1817 switch (chan->mode) {
1818 case L2CAP_MODE_BASIC:
1820 case L2CAP_MODE_ERTM:
1821 case L2CAP_MODE_STREAMING:
1830 switch (chan->state) {
1834 /* Already connecting */
1839 /* Already connected */
1853 /* Set destination address and psm */
1854 bacpy(&chan->dst, dst);
1855 chan->dst_type = dst_type;
1860 auth_type = l2cap_get_auth_type(chan);
1862 if (bdaddr_type_is_le(dst_type))
1863 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1864 chan->sec_level, auth_type);
1866 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1867 chan->sec_level, auth_type);
1870 err = PTR_ERR(hcon);
1874 conn = l2cap_conn_add(hcon);
1876 hci_conn_drop(hcon);
1881 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1882 hci_conn_drop(hcon);
1887 /* Update source addr of the socket */
1888 bacpy(&chan->src, &hcon->src);
1889 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1891 l2cap_chan_unlock(chan);
1892 l2cap_chan_add(conn, chan);
1893 l2cap_chan_lock(chan);
1895 /* l2cap_chan_add takes its own ref so we can drop this one */
1896 hci_conn_drop(hcon);
1898 l2cap_state_change(chan, BT_CONNECT);
1899 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
1901 if (hcon->state == BT_CONNECTED) {
1902 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1903 __clear_chan_timer(chan);
1904 if (l2cap_chan_check_security(chan))
1905 l2cap_state_change(chan, BT_CONNECTED);
1907 l2cap_do_start(chan);
1913 l2cap_chan_unlock(chan);
1914 hci_dev_unlock(hdev);
1919 static void l2cap_monitor_timeout(struct work_struct *work)
1921 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1922 monitor_timer.work);
1924 BT_DBG("chan %p", chan);
1926 l2cap_chan_lock(chan);
1929 l2cap_chan_unlock(chan);
1930 l2cap_chan_put(chan);
1934 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1936 l2cap_chan_unlock(chan);
1937 l2cap_chan_put(chan);
1940 static void l2cap_retrans_timeout(struct work_struct *work)
1942 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1943 retrans_timer.work);
1945 BT_DBG("chan %p", chan);
1947 l2cap_chan_lock(chan);
1950 l2cap_chan_unlock(chan);
1951 l2cap_chan_put(chan);
1955 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1956 l2cap_chan_unlock(chan);
1957 l2cap_chan_put(chan);
1960 static void l2cap_streaming_send(struct l2cap_chan *chan,
1961 struct sk_buff_head *skbs)
1963 struct sk_buff *skb;
1964 struct l2cap_ctrl *control;
1966 BT_DBG("chan %p, skbs %p", chan, skbs);
1968 if (__chan_is_moving(chan))
1971 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1973 while (!skb_queue_empty(&chan->tx_q)) {
1975 skb = skb_dequeue(&chan->tx_q);
1977 bt_cb(skb)->control.retries = 1;
1978 control = &bt_cb(skb)->control;
1980 control->reqseq = 0;
1981 control->txseq = chan->next_tx_seq;
1983 __pack_control(chan, control, skb);
1985 if (chan->fcs == L2CAP_FCS_CRC16) {
1986 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1987 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1990 l2cap_do_send(chan, skb);
1992 BT_DBG("Sent txseq %u", control->txseq);
1994 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1995 chan->frames_sent++;
1999 static int l2cap_ertm_send(struct l2cap_chan *chan)
2001 struct sk_buff *skb, *tx_skb;
2002 struct l2cap_ctrl *control;
2005 BT_DBG("chan %p", chan);
2007 if (chan->state != BT_CONNECTED)
2010 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2013 if (__chan_is_moving(chan))
2016 while (chan->tx_send_head &&
2017 chan->unacked_frames < chan->remote_tx_win &&
2018 chan->tx_state == L2CAP_TX_STATE_XMIT) {
2020 skb = chan->tx_send_head;
2022 bt_cb(skb)->control.retries = 1;
2023 control = &bt_cb(skb)->control;
2025 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2028 control->reqseq = chan->buffer_seq;
2029 chan->last_acked_seq = chan->buffer_seq;
2030 control->txseq = chan->next_tx_seq;
2032 __pack_control(chan, control, skb);
2034 if (chan->fcs == L2CAP_FCS_CRC16) {
2035 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2036 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2039 /* Clone after data has been modified. Data is assumed to be
2040 read-only (for locking purposes) on cloned sk_buffs.
2042 tx_skb = skb_clone(skb, GFP_KERNEL);
2047 __set_retrans_timer(chan);
2049 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2050 chan->unacked_frames++;
2051 chan->frames_sent++;
2054 if (skb_queue_is_last(&chan->tx_q, skb))
2055 chan->tx_send_head = NULL;
2057 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2059 l2cap_do_send(chan, tx_skb);
2060 BT_DBG("Sent txseq %u", control->txseq);
2063 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2064 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2069 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2071 struct l2cap_ctrl control;
2072 struct sk_buff *skb;
2073 struct sk_buff *tx_skb;
2076 BT_DBG("chan %p", chan);
2078 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2081 if (__chan_is_moving(chan))
2084 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2085 seq = l2cap_seq_list_pop(&chan->retrans_list);
2087 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2089 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2094 bt_cb(skb)->control.retries++;
2095 control = bt_cb(skb)->control;
2097 if (chan->max_tx != 0 &&
2098 bt_cb(skb)->control.retries > chan->max_tx) {
2099 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2100 l2cap_send_disconn_req(chan, ECONNRESET);
2101 l2cap_seq_list_clear(&chan->retrans_list);
2105 control.reqseq = chan->buffer_seq;
2106 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2111 if (skb_cloned(skb)) {
2112 /* Cloned sk_buffs are read-only, so we need a
2115 tx_skb = skb_copy(skb, GFP_KERNEL);
2117 tx_skb = skb_clone(skb, GFP_KERNEL);
2121 l2cap_seq_list_clear(&chan->retrans_list);
2125 /* Update skb contents */
2126 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2127 put_unaligned_le32(__pack_extended_control(&control),
2128 tx_skb->data + L2CAP_HDR_SIZE);
2130 put_unaligned_le16(__pack_enhanced_control(&control),
2131 tx_skb->data + L2CAP_HDR_SIZE);
2134 if (chan->fcs == L2CAP_FCS_CRC16) {
2135 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2136 put_unaligned_le16(fcs, skb_put(tx_skb,
2140 l2cap_do_send(chan, tx_skb);
2142 BT_DBG("Resent txseq %d", control.txseq);
2144 chan->last_acked_seq = chan->buffer_seq;
2148 static void l2cap_retransmit(struct l2cap_chan *chan,
2149 struct l2cap_ctrl *control)
2151 BT_DBG("chan %p, control %p", chan, control);
2153 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2154 l2cap_ertm_resend(chan);
2157 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2158 struct l2cap_ctrl *control)
2160 struct sk_buff *skb;
2162 BT_DBG("chan %p, control %p", chan, control);
2165 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2167 l2cap_seq_list_clear(&chan->retrans_list);
2169 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2172 if (chan->unacked_frames) {
2173 skb_queue_walk(&chan->tx_q, skb) {
2174 if (bt_cb(skb)->control.txseq == control->reqseq ||
2175 skb == chan->tx_send_head)
2179 skb_queue_walk_from(&chan->tx_q, skb) {
2180 if (skb == chan->tx_send_head)
2183 l2cap_seq_list_append(&chan->retrans_list,
2184 bt_cb(skb)->control.txseq);
2187 l2cap_ertm_resend(chan);
2191 static void l2cap_send_ack(struct l2cap_chan *chan)
2193 struct l2cap_ctrl control;
2194 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2195 chan->last_acked_seq);
2198 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2199 chan, chan->last_acked_seq, chan->buffer_seq);
2201 memset(&control, 0, sizeof(control));
2204 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2205 chan->rx_state == L2CAP_RX_STATE_RECV) {
2206 __clear_ack_timer(chan);
2207 control.super = L2CAP_SUPER_RNR;
2208 control.reqseq = chan->buffer_seq;
2209 l2cap_send_sframe(chan, &control);
2211 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2212 l2cap_ertm_send(chan);
2213 /* If any i-frames were sent, they included an ack */
2214 if (chan->buffer_seq == chan->last_acked_seq)
2218 /* Ack now if the window is 3/4ths full.
2219 * Calculate without mul or div
2221 threshold = chan->ack_win;
2222 threshold += threshold << 1;
2225 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2228 if (frames_to_ack >= threshold) {
2229 __clear_ack_timer(chan);
2230 control.super = L2CAP_SUPER_RR;
2231 control.reqseq = chan->buffer_seq;
2232 l2cap_send_sframe(chan, &control);
2237 __set_ack_timer(chan);
2241 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2242 struct msghdr *msg, int len,
2243 int count, struct sk_buff *skb)
2245 struct l2cap_conn *conn = chan->conn;
2246 struct sk_buff **frag;
2249 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2255 /* Continuation fragments (no L2CAP header) */
2256 frag = &skb_shinfo(skb)->frag_list;
2258 struct sk_buff *tmp;
2260 count = min_t(unsigned int, conn->mtu, len);
2262 tmp = chan->ops->alloc_skb(chan, count,
2263 msg->msg_flags & MSG_DONTWAIT);
2265 return PTR_ERR(tmp);
2269 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2272 (*frag)->priority = skb->priority;
2277 skb->len += (*frag)->len;
2278 skb->data_len += (*frag)->len;
2280 frag = &(*frag)->next;
2286 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2287 struct msghdr *msg, size_t len,
2290 struct l2cap_conn *conn = chan->conn;
2291 struct sk_buff *skb;
2292 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2293 struct l2cap_hdr *lh;
2295 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2296 __le16_to_cpu(chan->psm), len, priority);
2298 count = min_t(unsigned int, (conn->mtu - hlen), len);
2300 skb = chan->ops->alloc_skb(chan, count + hlen,
2301 msg->msg_flags & MSG_DONTWAIT);
2305 skb->priority = priority;
2307 /* Create L2CAP header */
2308 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2309 lh->cid = cpu_to_le16(chan->dcid);
2310 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2311 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2313 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2314 if (unlikely(err < 0)) {
2316 return ERR_PTR(err);
2321 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2322 struct msghdr *msg, size_t len,
2325 struct l2cap_conn *conn = chan->conn;
2326 struct sk_buff *skb;
2328 struct l2cap_hdr *lh;
2330 BT_DBG("chan %p len %zu", chan, len);
2332 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2334 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2335 msg->msg_flags & MSG_DONTWAIT);
2339 skb->priority = priority;
2341 /* Create L2CAP header */
2342 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2343 lh->cid = cpu_to_le16(chan->dcid);
2344 lh->len = cpu_to_le16(len);
2346 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2347 if (unlikely(err < 0)) {
2349 return ERR_PTR(err);
2354 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2355 struct msghdr *msg, size_t len,
2358 struct l2cap_conn *conn = chan->conn;
2359 struct sk_buff *skb;
2360 int err, count, hlen;
2361 struct l2cap_hdr *lh;
2363 BT_DBG("chan %p len %zu", chan, len);
2366 return ERR_PTR(-ENOTCONN);
2368 hlen = __ertm_hdr_size(chan);
2371 hlen += L2CAP_SDULEN_SIZE;
2373 if (chan->fcs == L2CAP_FCS_CRC16)
2374 hlen += L2CAP_FCS_SIZE;
2376 count = min_t(unsigned int, (conn->mtu - hlen), len);
2378 skb = chan->ops->alloc_skb(chan, count + hlen,
2379 msg->msg_flags & MSG_DONTWAIT);
2383 /* Create L2CAP header */
2384 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2385 lh->cid = cpu_to_le16(chan->dcid);
2386 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2388 /* Control header is populated later */
2389 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2390 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2392 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2395 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2397 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2398 if (unlikely(err < 0)) {
2400 return ERR_PTR(err);
2403 bt_cb(skb)->control.fcs = chan->fcs;
2404 bt_cb(skb)->control.retries = 0;
2408 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2409 struct sk_buff_head *seg_queue,
2410 struct msghdr *msg, size_t len)
2412 struct sk_buff *skb;
2417 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2419 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2420 * so fragmented skbs are not used. The HCI layer's handling
2421 * of fragmented skbs is not compatible with ERTM's queueing.
2424 /* PDU size is derived from the HCI MTU */
2425 pdu_len = chan->conn->mtu;
2427 /* Constrain PDU size for BR/EDR connections */
2429 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2431 /* Adjust for largest possible L2CAP overhead. */
2433 pdu_len -= L2CAP_FCS_SIZE;
2435 pdu_len -= __ertm_hdr_size(chan);
2437 /* Remote device may have requested smaller PDUs */
2438 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2440 if (len <= pdu_len) {
2441 sar = L2CAP_SAR_UNSEGMENTED;
2445 sar = L2CAP_SAR_START;
2447 pdu_len -= L2CAP_SDULEN_SIZE;
2451 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2454 __skb_queue_purge(seg_queue);
2455 return PTR_ERR(skb);
2458 bt_cb(skb)->control.sar = sar;
2459 __skb_queue_tail(seg_queue, skb);
2464 pdu_len += L2CAP_SDULEN_SIZE;
2467 if (len <= pdu_len) {
2468 sar = L2CAP_SAR_END;
2471 sar = L2CAP_SAR_CONTINUE;
2478 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2481 struct sk_buff *skb;
2483 struct sk_buff_head seg_queue;
2488 /* Connectionless channel */
2489 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2490 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2492 return PTR_ERR(skb);
2494 l2cap_do_send(chan, skb);
2498 switch (chan->mode) {
2499 case L2CAP_MODE_BASIC:
2500 /* Check outgoing MTU */
2501 if (len > chan->omtu)
2504 /* Create a basic PDU */
2505 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2507 return PTR_ERR(skb);
2509 l2cap_do_send(chan, skb);
2513 case L2CAP_MODE_ERTM:
2514 case L2CAP_MODE_STREAMING:
2515 /* Check outgoing MTU */
2516 if (len > chan->omtu) {
2521 __skb_queue_head_init(&seg_queue);
2523 /* Do segmentation before calling in to the state machine,
2524 * since it's possible to block while waiting for memory
2527 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2529 /* The channel could have been closed while segmenting,
2530 * check that it is still connected.
2532 if (chan->state != BT_CONNECTED) {
2533 __skb_queue_purge(&seg_queue);
2540 if (chan->mode == L2CAP_MODE_ERTM)
2541 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2543 l2cap_streaming_send(chan, &seg_queue);
2547 /* If the skbs were not queued for sending, they'll still be in
2548 * seg_queue and need to be purged.
2550 __skb_queue_purge(&seg_queue);
2554 BT_DBG("bad state %1.1x", chan->mode);
2561 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2563 struct l2cap_ctrl control;
2566 BT_DBG("chan %p, txseq %u", chan, txseq);
2568 memset(&control, 0, sizeof(control));
2570 control.super = L2CAP_SUPER_SREJ;
2572 for (seq = chan->expected_tx_seq; seq != txseq;
2573 seq = __next_seq(chan, seq)) {
2574 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2575 control.reqseq = seq;
2576 l2cap_send_sframe(chan, &control);
2577 l2cap_seq_list_append(&chan->srej_list, seq);
2581 chan->expected_tx_seq = __next_seq(chan, txseq);
2584 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2586 struct l2cap_ctrl control;
2588 BT_DBG("chan %p", chan);
2590 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2593 memset(&control, 0, sizeof(control));
2595 control.super = L2CAP_SUPER_SREJ;
2596 control.reqseq = chan->srej_list.tail;
2597 l2cap_send_sframe(chan, &control);
2600 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2602 struct l2cap_ctrl control;
2606 BT_DBG("chan %p, txseq %u", chan, txseq);
2608 memset(&control, 0, sizeof(control));
2610 control.super = L2CAP_SUPER_SREJ;
2612 /* Capture initial list head to allow only one pass through the list. */
2613 initial_head = chan->srej_list.head;
2616 seq = l2cap_seq_list_pop(&chan->srej_list);
2617 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2620 control.reqseq = seq;
2621 l2cap_send_sframe(chan, &control);
2622 l2cap_seq_list_append(&chan->srej_list, seq);
2623 } while (chan->srej_list.head != initial_head);
2626 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2628 struct sk_buff *acked_skb;
2631 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2633 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2636 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2637 chan->expected_ack_seq, chan->unacked_frames);
2639 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2640 ackseq = __next_seq(chan, ackseq)) {
2642 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2644 skb_unlink(acked_skb, &chan->tx_q);
2645 kfree_skb(acked_skb);
2646 chan->unacked_frames--;
2650 chan->expected_ack_seq = reqseq;
2652 if (chan->unacked_frames == 0)
2653 __clear_retrans_timer(chan);
2655 BT_DBG("unacked_frames %u", chan->unacked_frames);
2658 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2660 BT_DBG("chan %p", chan);
2662 chan->expected_tx_seq = chan->buffer_seq;
2663 l2cap_seq_list_clear(&chan->srej_list);
2664 skb_queue_purge(&chan->srej_q);
2665 chan->rx_state = L2CAP_RX_STATE_RECV;
2668 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2669 struct l2cap_ctrl *control,
2670 struct sk_buff_head *skbs, u8 event)
2672 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2676 case L2CAP_EV_DATA_REQUEST:
2677 if (chan->tx_send_head == NULL)
2678 chan->tx_send_head = skb_peek(skbs);
2680 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2681 l2cap_ertm_send(chan);
2683 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2684 BT_DBG("Enter LOCAL_BUSY");
2685 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2687 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2688 /* The SREJ_SENT state must be aborted if we are to
2689 * enter the LOCAL_BUSY state.
2691 l2cap_abort_rx_srej_sent(chan);
2694 l2cap_send_ack(chan);
2697 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2698 BT_DBG("Exit LOCAL_BUSY");
2699 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2701 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2702 struct l2cap_ctrl local_control;
2704 memset(&local_control, 0, sizeof(local_control));
2705 local_control.sframe = 1;
2706 local_control.super = L2CAP_SUPER_RR;
2707 local_control.poll = 1;
2708 local_control.reqseq = chan->buffer_seq;
2709 l2cap_send_sframe(chan, &local_control);
2711 chan->retry_count = 1;
2712 __set_monitor_timer(chan);
2713 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2716 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2717 l2cap_process_reqseq(chan, control->reqseq);
2719 case L2CAP_EV_EXPLICIT_POLL:
2720 l2cap_send_rr_or_rnr(chan, 1);
2721 chan->retry_count = 1;
2722 __set_monitor_timer(chan);
2723 __clear_ack_timer(chan);
2724 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2726 case L2CAP_EV_RETRANS_TO:
2727 l2cap_send_rr_or_rnr(chan, 1);
2728 chan->retry_count = 1;
2729 __set_monitor_timer(chan);
2730 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2732 case L2CAP_EV_RECV_FBIT:
2733 /* Nothing to process */
2740 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2741 struct l2cap_ctrl *control,
2742 struct sk_buff_head *skbs, u8 event)
2744 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2748 case L2CAP_EV_DATA_REQUEST:
2749 if (chan->tx_send_head == NULL)
2750 chan->tx_send_head = skb_peek(skbs);
2751 /* Queue data, but don't send. */
2752 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2754 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2755 BT_DBG("Enter LOCAL_BUSY");
2756 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2758 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2759 /* The SREJ_SENT state must be aborted if we are to
2760 * enter the LOCAL_BUSY state.
2762 l2cap_abort_rx_srej_sent(chan);
2765 l2cap_send_ack(chan);
2768 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2769 BT_DBG("Exit LOCAL_BUSY");
2770 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2772 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2773 struct l2cap_ctrl local_control;
2774 memset(&local_control, 0, sizeof(local_control));
2775 local_control.sframe = 1;
2776 local_control.super = L2CAP_SUPER_RR;
2777 local_control.poll = 1;
2778 local_control.reqseq = chan->buffer_seq;
2779 l2cap_send_sframe(chan, &local_control);
2781 chan->retry_count = 1;
2782 __set_monitor_timer(chan);
2783 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2786 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2787 l2cap_process_reqseq(chan, control->reqseq);
2791 case L2CAP_EV_RECV_FBIT:
2792 if (control && control->final) {
2793 __clear_monitor_timer(chan);
2794 if (chan->unacked_frames > 0)
2795 __set_retrans_timer(chan);
2796 chan->retry_count = 0;
2797 chan->tx_state = L2CAP_TX_STATE_XMIT;
2798 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2801 case L2CAP_EV_EXPLICIT_POLL:
2804 case L2CAP_EV_MONITOR_TO:
2805 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2806 l2cap_send_rr_or_rnr(chan, 1);
2807 __set_monitor_timer(chan);
2808 chan->retry_count++;
2810 l2cap_send_disconn_req(chan, ECONNABORTED);
2818 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2819 struct sk_buff_head *skbs, u8 event)
2821 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2822 chan, control, skbs, event, chan->tx_state);
2824 switch (chan->tx_state) {
2825 case L2CAP_TX_STATE_XMIT:
2826 l2cap_tx_state_xmit(chan, control, skbs, event);
2828 case L2CAP_TX_STATE_WAIT_F:
2829 l2cap_tx_state_wait_f(chan, control, skbs, event);
2837 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2838 struct l2cap_ctrl *control)
2840 BT_DBG("chan %p, control %p", chan, control);
2841 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2844 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2845 struct l2cap_ctrl *control)
2847 BT_DBG("chan %p, control %p", chan, control);
2848 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2851 /* Copy frame to all raw sockets on that connection */
2852 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2854 struct sk_buff *nskb;
2855 struct l2cap_chan *chan;
2857 BT_DBG("conn %p", conn);
2859 mutex_lock(&conn->chan_lock);
2861 list_for_each_entry(chan, &conn->chan_l, list) {
2862 if (chan->chan_type != L2CAP_CHAN_RAW)
2865 /* Don't send frame to the channel it came from */
2866 if (bt_cb(skb)->chan == chan)
2869 nskb = skb_clone(skb, GFP_KERNEL);
2872 if (chan->ops->recv(chan, nskb))
2876 mutex_unlock(&conn->chan_lock);
2879 /* ---- L2CAP signalling commands ---- */
2880 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2881 u8 ident, u16 dlen, void *data)
2883 struct sk_buff *skb, **frag;
2884 struct l2cap_cmd_hdr *cmd;
2885 struct l2cap_hdr *lh;
2888 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2889 conn, code, ident, dlen);
2891 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2894 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2895 count = min_t(unsigned int, conn->mtu, len);
2897 skb = bt_skb_alloc(count, GFP_KERNEL);
2901 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2902 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2904 if (conn->hcon->type == LE_LINK)
2905 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2907 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2909 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2912 cmd->len = cpu_to_le16(dlen);
2915 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2916 memcpy(skb_put(skb, count), data, count);
2922 /* Continuation fragments (no L2CAP header) */
2923 frag = &skb_shinfo(skb)->frag_list;
2925 count = min_t(unsigned int, conn->mtu, len);
2927 *frag = bt_skb_alloc(count, GFP_KERNEL);
2931 memcpy(skb_put(*frag, count), data, count);
2936 frag = &(*frag)->next;
2946 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2949 struct l2cap_conf_opt *opt = *ptr;
2952 len = L2CAP_CONF_OPT_SIZE + opt->len;
2960 *val = *((u8 *) opt->val);
2964 *val = get_unaligned_le16(opt->val);
2968 *val = get_unaligned_le32(opt->val);
2972 *val = (unsigned long) opt->val;
2976 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2980 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2982 struct l2cap_conf_opt *opt = *ptr;
2984 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2991 *((u8 *) opt->val) = val;
2995 put_unaligned_le16(val, opt->val);
2999 put_unaligned_le32(val, opt->val);
3003 memcpy(opt->val, (void *) val, len);
3007 *ptr += L2CAP_CONF_OPT_SIZE + len;
3010 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3012 struct l2cap_conf_efs efs;
3014 switch (chan->mode) {
3015 case L2CAP_MODE_ERTM:
3016 efs.id = chan->local_id;
3017 efs.stype = chan->local_stype;
3018 efs.msdu = cpu_to_le16(chan->local_msdu);
3019 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3020 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3021 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3024 case L2CAP_MODE_STREAMING:
3026 efs.stype = L2CAP_SERV_BESTEFFORT;
3027 efs.msdu = cpu_to_le16(chan->local_msdu);
3028 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3037 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3038 (unsigned long) &efs);
3041 static void l2cap_ack_timeout(struct work_struct *work)
3043 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3047 BT_DBG("chan %p", chan);
3049 l2cap_chan_lock(chan);
3051 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3052 chan->last_acked_seq);
3055 l2cap_send_rr_or_rnr(chan, 0);
3057 l2cap_chan_unlock(chan);
3058 l2cap_chan_put(chan);
3061 int l2cap_ertm_init(struct l2cap_chan *chan)
3065 chan->next_tx_seq = 0;
3066 chan->expected_tx_seq = 0;
3067 chan->expected_ack_seq = 0;
3068 chan->unacked_frames = 0;
3069 chan->buffer_seq = 0;
3070 chan->frames_sent = 0;
3071 chan->last_acked_seq = 0;
3073 chan->sdu_last_frag = NULL;
3076 skb_queue_head_init(&chan->tx_q);
3078 chan->local_amp_id = AMP_ID_BREDR;
3079 chan->move_id = AMP_ID_BREDR;
3080 chan->move_state = L2CAP_MOVE_STABLE;
3081 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3083 if (chan->mode != L2CAP_MODE_ERTM)
3086 chan->rx_state = L2CAP_RX_STATE_RECV;
3087 chan->tx_state = L2CAP_TX_STATE_XMIT;
3089 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3090 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3091 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3093 skb_queue_head_init(&chan->srej_q);
3095 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3099 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3101 l2cap_seq_list_free(&chan->srej_list);
3106 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3109 case L2CAP_MODE_STREAMING:
3110 case L2CAP_MODE_ERTM:
3111 if (l2cap_mode_supported(mode, remote_feat_mask))
3115 return L2CAP_MODE_BASIC;
3119 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3121 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3124 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3126 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3129 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3130 struct l2cap_conf_rfc *rfc)
3132 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3133 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3135 /* Class 1 devices have must have ERTM timeouts
3136 * exceeding the Link Supervision Timeout. The
3137 * default Link Supervision Timeout for AMP
3138 * controllers is 10 seconds.
3140 * Class 1 devices use 0xffffffff for their
3141 * best-effort flush timeout, so the clamping logic
3142 * will result in a timeout that meets the above
3143 * requirement. ERTM timeouts are 16-bit values, so
3144 * the maximum timeout is 65.535 seconds.
3147 /* Convert timeout to milliseconds and round */
3148 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3150 /* This is the recommended formula for class 2 devices
3151 * that start ERTM timers when packets are sent to the
3154 ertm_to = 3 * ertm_to + 500;
3156 if (ertm_to > 0xffff)
3159 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3160 rfc->monitor_timeout = rfc->retrans_timeout;
3162 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3163 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3167 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3169 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3170 __l2cap_ews_supported(chan->conn)) {
3171 /* use extended control field */
3172 set_bit(FLAG_EXT_CTRL, &chan->flags);
3173 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3175 chan->tx_win = min_t(u16, chan->tx_win,
3176 L2CAP_DEFAULT_TX_WINDOW);
3177 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3179 chan->ack_win = chan->tx_win;
3182 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3184 struct l2cap_conf_req *req = data;
3185 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3186 void *ptr = req->data;
3189 BT_DBG("chan %p", chan);
3191 if (chan->num_conf_req || chan->num_conf_rsp)
3194 switch (chan->mode) {
3195 case L2CAP_MODE_STREAMING:
3196 case L2CAP_MODE_ERTM:
3197 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3200 if (__l2cap_efs_supported(chan->conn))
3201 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3205 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3210 if (chan->imtu != L2CAP_DEFAULT_MTU)
3211 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3213 switch (chan->mode) {
3214 case L2CAP_MODE_BASIC:
3215 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3216 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3219 rfc.mode = L2CAP_MODE_BASIC;
3221 rfc.max_transmit = 0;
3222 rfc.retrans_timeout = 0;
3223 rfc.monitor_timeout = 0;
3224 rfc.max_pdu_size = 0;
3226 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3227 (unsigned long) &rfc);
3230 case L2CAP_MODE_ERTM:
3231 rfc.mode = L2CAP_MODE_ERTM;
3232 rfc.max_transmit = chan->max_tx;
3234 __l2cap_set_ertm_timeouts(chan, &rfc);
3236 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3237 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3239 rfc.max_pdu_size = cpu_to_le16(size);
3241 l2cap_txwin_setup(chan);
3243 rfc.txwin_size = min_t(u16, chan->tx_win,
3244 L2CAP_DEFAULT_TX_WINDOW);
3246 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3247 (unsigned long) &rfc);
3249 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3250 l2cap_add_opt_efs(&ptr, chan);
3252 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3253 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3256 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3257 if (chan->fcs == L2CAP_FCS_NONE ||
3258 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3259 chan->fcs = L2CAP_FCS_NONE;
3260 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3265 case L2CAP_MODE_STREAMING:
3266 l2cap_txwin_setup(chan);
3267 rfc.mode = L2CAP_MODE_STREAMING;
3269 rfc.max_transmit = 0;
3270 rfc.retrans_timeout = 0;
3271 rfc.monitor_timeout = 0;
3273 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3274 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3276 rfc.max_pdu_size = cpu_to_le16(size);
3278 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3279 (unsigned long) &rfc);
3281 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3282 l2cap_add_opt_efs(&ptr, chan);
3284 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3285 if (chan->fcs == L2CAP_FCS_NONE ||
3286 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3287 chan->fcs = L2CAP_FCS_NONE;
3288 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3294 req->dcid = cpu_to_le16(chan->dcid);
3295 req->flags = __constant_cpu_to_le16(0);
3300 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3302 struct l2cap_conf_rsp *rsp = data;
3303 void *ptr = rsp->data;
3304 void *req = chan->conf_req;
3305 int len = chan->conf_len;
3306 int type, hint, olen;
3308 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3309 struct l2cap_conf_efs efs;
3311 u16 mtu = L2CAP_DEFAULT_MTU;
3312 u16 result = L2CAP_CONF_SUCCESS;
3315 BT_DBG("chan %p", chan);
3317 while (len >= L2CAP_CONF_OPT_SIZE) {
3318 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3320 hint = type & L2CAP_CONF_HINT;
3321 type &= L2CAP_CONF_MASK;
3324 case L2CAP_CONF_MTU:
3328 case L2CAP_CONF_FLUSH_TO:
3329 chan->flush_to = val;
3332 case L2CAP_CONF_QOS:
3335 case L2CAP_CONF_RFC:
3336 if (olen == sizeof(rfc))
3337 memcpy(&rfc, (void *) val, olen);
3340 case L2CAP_CONF_FCS:
3341 if (val == L2CAP_FCS_NONE)
3342 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3345 case L2CAP_CONF_EFS:
3347 if (olen == sizeof(efs))
3348 memcpy(&efs, (void *) val, olen);
3351 case L2CAP_CONF_EWS:
3352 if (!chan->conn->hs_enabled)
3353 return -ECONNREFUSED;
3355 set_bit(FLAG_EXT_CTRL, &chan->flags);
3356 set_bit(CONF_EWS_RECV, &chan->conf_state);
3357 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3358 chan->remote_tx_win = val;
3365 result = L2CAP_CONF_UNKNOWN;
3366 *((u8 *) ptr++) = type;
3371 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3374 switch (chan->mode) {
3375 case L2CAP_MODE_STREAMING:
3376 case L2CAP_MODE_ERTM:
3377 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3378 chan->mode = l2cap_select_mode(rfc.mode,
3379 chan->conn->feat_mask);
3384 if (__l2cap_efs_supported(chan->conn))
3385 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3387 return -ECONNREFUSED;
3390 if (chan->mode != rfc.mode)
3391 return -ECONNREFUSED;
3397 if (chan->mode != rfc.mode) {
3398 result = L2CAP_CONF_UNACCEPT;
3399 rfc.mode = chan->mode;
3401 if (chan->num_conf_rsp == 1)
3402 return -ECONNREFUSED;
3404 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3405 (unsigned long) &rfc);
3408 if (result == L2CAP_CONF_SUCCESS) {
3409 /* Configure output options and let the other side know
3410 * which ones we don't like. */
3412 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3413 result = L2CAP_CONF_UNACCEPT;
3416 set_bit(CONF_MTU_DONE, &chan->conf_state);
3418 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3421 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3422 efs.stype != L2CAP_SERV_NOTRAFIC &&
3423 efs.stype != chan->local_stype) {
3425 result = L2CAP_CONF_UNACCEPT;
3427 if (chan->num_conf_req >= 1)
3428 return -ECONNREFUSED;
3430 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3432 (unsigned long) &efs);
3434 /* Send PENDING Conf Rsp */
3435 result = L2CAP_CONF_PENDING;
3436 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3441 case L2CAP_MODE_BASIC:
3442 chan->fcs = L2CAP_FCS_NONE;
3443 set_bit(CONF_MODE_DONE, &chan->conf_state);
3446 case L2CAP_MODE_ERTM:
3447 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3448 chan->remote_tx_win = rfc.txwin_size;
3450 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3452 chan->remote_max_tx = rfc.max_transmit;
3454 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3455 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3456 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3457 rfc.max_pdu_size = cpu_to_le16(size);
3458 chan->remote_mps = size;
3460 __l2cap_set_ertm_timeouts(chan, &rfc);
3462 set_bit(CONF_MODE_DONE, &chan->conf_state);
3464 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3465 sizeof(rfc), (unsigned long) &rfc);
3467 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3468 chan->remote_id = efs.id;
3469 chan->remote_stype = efs.stype;
3470 chan->remote_msdu = le16_to_cpu(efs.msdu);
3471 chan->remote_flush_to =
3472 le32_to_cpu(efs.flush_to);
3473 chan->remote_acc_lat =
3474 le32_to_cpu(efs.acc_lat);
3475 chan->remote_sdu_itime =
3476 le32_to_cpu(efs.sdu_itime);
3477 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3479 (unsigned long) &efs);
3483 case L2CAP_MODE_STREAMING:
3484 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3485 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3486 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3487 rfc.max_pdu_size = cpu_to_le16(size);
3488 chan->remote_mps = size;
3490 set_bit(CONF_MODE_DONE, &chan->conf_state);
3492 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3493 (unsigned long) &rfc);
3498 result = L2CAP_CONF_UNACCEPT;
3500 memset(&rfc, 0, sizeof(rfc));
3501 rfc.mode = chan->mode;
3504 if (result == L2CAP_CONF_SUCCESS)
3505 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3507 rsp->scid = cpu_to_le16(chan->dcid);
3508 rsp->result = cpu_to_le16(result);
3509 rsp->flags = __constant_cpu_to_le16(0);
3514 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3515 void *data, u16 *result)
3517 struct l2cap_conf_req *req = data;
3518 void *ptr = req->data;
3521 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3522 struct l2cap_conf_efs efs;
3524 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3526 while (len >= L2CAP_CONF_OPT_SIZE) {
3527 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3530 case L2CAP_CONF_MTU:
3531 if (val < L2CAP_DEFAULT_MIN_MTU) {
3532 *result = L2CAP_CONF_UNACCEPT;
3533 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3536 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3539 case L2CAP_CONF_FLUSH_TO:
3540 chan->flush_to = val;
3541 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3545 case L2CAP_CONF_RFC:
3546 if (olen == sizeof(rfc))
3547 memcpy(&rfc, (void *)val, olen);
3549 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3550 rfc.mode != chan->mode)
3551 return -ECONNREFUSED;
3555 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3556 sizeof(rfc), (unsigned long) &rfc);
3559 case L2CAP_CONF_EWS:
3560 chan->ack_win = min_t(u16, val, chan->ack_win);
3561 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3565 case L2CAP_CONF_EFS:
3566 if (olen == sizeof(efs))
3567 memcpy(&efs, (void *)val, olen);
3569 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3570 efs.stype != L2CAP_SERV_NOTRAFIC &&
3571 efs.stype != chan->local_stype)
3572 return -ECONNREFUSED;
3574 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3575 (unsigned long) &efs);
3578 case L2CAP_CONF_FCS:
3579 if (*result == L2CAP_CONF_PENDING)
3580 if (val == L2CAP_FCS_NONE)
3581 set_bit(CONF_RECV_NO_FCS,
3587 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3588 return -ECONNREFUSED;
3590 chan->mode = rfc.mode;
3592 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3594 case L2CAP_MODE_ERTM:
3595 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3596 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3597 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3598 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3599 chan->ack_win = min_t(u16, chan->ack_win,
3602 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3603 chan->local_msdu = le16_to_cpu(efs.msdu);
3604 chan->local_sdu_itime =
3605 le32_to_cpu(efs.sdu_itime);
3606 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3607 chan->local_flush_to =
3608 le32_to_cpu(efs.flush_to);
3612 case L2CAP_MODE_STREAMING:
3613 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3617 req->dcid = cpu_to_le16(chan->dcid);
3618 req->flags = __constant_cpu_to_le16(0);
3623 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3624 u16 result, u16 flags)
3626 struct l2cap_conf_rsp *rsp = data;
3627 void *ptr = rsp->data;
3629 BT_DBG("chan %p", chan);
3631 rsp->scid = cpu_to_le16(chan->dcid);
3632 rsp->result = cpu_to_le16(result);
3633 rsp->flags = cpu_to_le16(flags);
3638 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3640 struct l2cap_conn_rsp rsp;
3641 struct l2cap_conn *conn = chan->conn;
3645 rsp.scid = cpu_to_le16(chan->dcid);
3646 rsp.dcid = cpu_to_le16(chan->scid);
3647 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3648 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3651 rsp_code = L2CAP_CREATE_CHAN_RSP;
3653 rsp_code = L2CAP_CONN_RSP;
3655 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3657 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3659 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3662 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3663 l2cap_build_conf_req(chan, buf), buf);
3664 chan->num_conf_req++;
3667 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3671 /* Use sane default values in case a misbehaving remote device
3672 * did not send an RFC or extended window size option.
3674 u16 txwin_ext = chan->ack_win;
3675 struct l2cap_conf_rfc rfc = {
3677 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3678 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3679 .max_pdu_size = cpu_to_le16(chan->imtu),
3680 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3683 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3685 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3688 while (len >= L2CAP_CONF_OPT_SIZE) {
3689 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3692 case L2CAP_CONF_RFC:
3693 if (olen == sizeof(rfc))
3694 memcpy(&rfc, (void *)val, olen);
3696 case L2CAP_CONF_EWS:
3703 case L2CAP_MODE_ERTM:
3704 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3705 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3706 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3707 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3708 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3710 chan->ack_win = min_t(u16, chan->ack_win,
3713 case L2CAP_MODE_STREAMING:
3714 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3718 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3719 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3722 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3724 if (cmd_len < sizeof(*rej))
3727 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3730 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3731 cmd->ident == conn->info_ident) {
3732 cancel_delayed_work(&conn->info_timer);
3734 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3735 conn->info_ident = 0;
3737 l2cap_conn_start(conn);
3743 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3744 struct l2cap_cmd_hdr *cmd,
3745 u8 *data, u8 rsp_code, u8 amp_id)
3747 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3748 struct l2cap_conn_rsp rsp;
3749 struct l2cap_chan *chan = NULL, *pchan;
3750 int result, status = L2CAP_CS_NO_INFO;
3752 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3753 __le16 psm = req->psm;
3755 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3757 /* Check if we have socket listening on psm */
3758 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3759 &conn->hcon->dst, ACL_LINK);
3761 result = L2CAP_CR_BAD_PSM;
3765 mutex_lock(&conn->chan_lock);
3766 l2cap_chan_lock(pchan);
3768 /* Check if the ACL is secure enough (if not SDP) */
3769 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3770 !hci_conn_check_link_mode(conn->hcon)) {
3771 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3772 result = L2CAP_CR_SEC_BLOCK;
3776 result = L2CAP_CR_NO_MEM;
3778 /* Check if we already have channel with that dcid */
3779 if (__l2cap_get_chan_by_dcid(conn, scid))
3782 chan = pchan->ops->new_connection(pchan);
3786 /* For certain devices (ex: HID mouse), support for authentication,
3787 * pairing and bonding is optional. For such devices, inorder to avoid
3788 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3789 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3791 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3793 bacpy(&chan->src, &conn->hcon->src);
3794 bacpy(&chan->dst, &conn->hcon->dst);
3795 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3796 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3799 chan->local_amp_id = amp_id;
3801 __l2cap_chan_add(conn, chan);
3805 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3807 chan->ident = cmd->ident;
3809 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3810 if (l2cap_chan_check_security(chan)) {
3811 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3812 l2cap_state_change(chan, BT_CONNECT2);
3813 result = L2CAP_CR_PEND;
3814 status = L2CAP_CS_AUTHOR_PEND;
3815 chan->ops->defer(chan);
3817 /* Force pending result for AMP controllers.
3818 * The connection will succeed after the
3819 * physical link is up.
3821 if (amp_id == AMP_ID_BREDR) {
3822 l2cap_state_change(chan, BT_CONFIG);
3823 result = L2CAP_CR_SUCCESS;
3825 l2cap_state_change(chan, BT_CONNECT2);
3826 result = L2CAP_CR_PEND;
3828 status = L2CAP_CS_NO_INFO;
3831 l2cap_state_change(chan, BT_CONNECT2);
3832 result = L2CAP_CR_PEND;
3833 status = L2CAP_CS_AUTHEN_PEND;
3836 l2cap_state_change(chan, BT_CONNECT2);
3837 result = L2CAP_CR_PEND;
3838 status = L2CAP_CS_NO_INFO;
3842 l2cap_chan_unlock(pchan);
3843 mutex_unlock(&conn->chan_lock);
3846 rsp.scid = cpu_to_le16(scid);
3847 rsp.dcid = cpu_to_le16(dcid);
3848 rsp.result = cpu_to_le16(result);
3849 rsp.status = cpu_to_le16(status);
3850 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3852 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3853 struct l2cap_info_req info;
3854 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3856 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3857 conn->info_ident = l2cap_get_ident(conn);
3859 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3861 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3862 sizeof(info), &info);
3865 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3866 result == L2CAP_CR_SUCCESS) {
3868 set_bit(CONF_REQ_SENT, &chan->conf_state);
3869 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3870 l2cap_build_conf_req(chan, buf), buf);
3871 chan->num_conf_req++;
3877 static int l2cap_connect_req(struct l2cap_conn *conn,
3878 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3880 struct hci_dev *hdev = conn->hcon->hdev;
3881 struct hci_conn *hcon = conn->hcon;
3883 if (cmd_len < sizeof(struct l2cap_conn_req))
3887 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3888 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3889 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3890 hcon->dst_type, 0, NULL, 0,
3892 hci_dev_unlock(hdev);
3894 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3898 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3899 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3902 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3903 u16 scid, dcid, result, status;
3904 struct l2cap_chan *chan;
3908 if (cmd_len < sizeof(*rsp))
3911 scid = __le16_to_cpu(rsp->scid);
3912 dcid = __le16_to_cpu(rsp->dcid);
3913 result = __le16_to_cpu(rsp->result);
3914 status = __le16_to_cpu(rsp->status);
3916 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3917 dcid, scid, result, status);
3919 mutex_lock(&conn->chan_lock);
3922 chan = __l2cap_get_chan_by_scid(conn, scid);
3928 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3937 l2cap_chan_lock(chan);
3940 case L2CAP_CR_SUCCESS:
3941 l2cap_state_change(chan, BT_CONFIG);
3944 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3946 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3949 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3950 l2cap_build_conf_req(chan, req), req);
3951 chan->num_conf_req++;
3955 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3959 l2cap_chan_del(chan, ECONNREFUSED);
3963 l2cap_chan_unlock(chan);
3966 mutex_unlock(&conn->chan_lock);
3971 static inline void set_default_fcs(struct l2cap_chan *chan)
3973 /* FCS is enabled only in ERTM or streaming mode, if one or both
3976 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3977 chan->fcs = L2CAP_FCS_NONE;
3978 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3979 chan->fcs = L2CAP_FCS_CRC16;
3982 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3983 u8 ident, u16 flags)
3985 struct l2cap_conn *conn = chan->conn;
3987 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3990 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3991 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3993 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3994 l2cap_build_conf_rsp(chan, data,
3995 L2CAP_CONF_SUCCESS, flags), data);
3998 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4001 struct l2cap_cmd_rej_cid rej;
4003 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
4004 rej.scid = __cpu_to_le16(scid);
4005 rej.dcid = __cpu_to_le16(dcid);
4007 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4010 static inline int l2cap_config_req(struct l2cap_conn *conn,
4011 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4014 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4017 struct l2cap_chan *chan;
4020 if (cmd_len < sizeof(*req))
4023 dcid = __le16_to_cpu(req->dcid);
4024 flags = __le16_to_cpu(req->flags);
4026 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4028 chan = l2cap_get_chan_by_scid(conn, dcid);
4030 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4034 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4035 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4040 /* Reject if config buffer is too small. */
4041 len = cmd_len - sizeof(*req);
4042 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4043 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4044 l2cap_build_conf_rsp(chan, rsp,
4045 L2CAP_CONF_REJECT, flags), rsp);
4050 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4051 chan->conf_len += len;
4053 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4054 /* Incomplete config. Send empty response. */
4055 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4056 l2cap_build_conf_rsp(chan, rsp,
4057 L2CAP_CONF_SUCCESS, flags), rsp);
4061 /* Complete config. */
4062 len = l2cap_parse_conf_req(chan, rsp);
4064 l2cap_send_disconn_req(chan, ECONNRESET);
4068 chan->ident = cmd->ident;
4069 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4070 chan->num_conf_rsp++;
4072 /* Reset config buffer. */
4075 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4078 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4079 set_default_fcs(chan);
4081 if (chan->mode == L2CAP_MODE_ERTM ||
4082 chan->mode == L2CAP_MODE_STREAMING)
4083 err = l2cap_ertm_init(chan);
4086 l2cap_send_disconn_req(chan, -err);
4088 l2cap_chan_ready(chan);
4093 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4095 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4096 l2cap_build_conf_req(chan, buf), buf);
4097 chan->num_conf_req++;
4100 /* Got Conf Rsp PENDING from remote side and asume we sent
4101 Conf Rsp PENDING in the code above */
4102 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4103 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4105 /* check compatibility */
4107 /* Send rsp for BR/EDR channel */
4109 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4111 chan->ident = cmd->ident;
4115 l2cap_chan_unlock(chan);
4119 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4120 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4123 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4124 u16 scid, flags, result;
4125 struct l2cap_chan *chan;
4126 int len = cmd_len - sizeof(*rsp);
4129 if (cmd_len < sizeof(*rsp))
4132 scid = __le16_to_cpu(rsp->scid);
4133 flags = __le16_to_cpu(rsp->flags);
4134 result = __le16_to_cpu(rsp->result);
4136 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4139 chan = l2cap_get_chan_by_scid(conn, scid);
4144 case L2CAP_CONF_SUCCESS:
4145 l2cap_conf_rfc_get(chan, rsp->data, len);
4146 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4149 case L2CAP_CONF_PENDING:
4150 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4152 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4155 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4158 l2cap_send_disconn_req(chan, ECONNRESET);
4162 if (!chan->hs_hcon) {
4163 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4166 if (l2cap_check_efs(chan)) {
4167 amp_create_logical_link(chan);
4168 chan->ident = cmd->ident;
4174 case L2CAP_CONF_UNACCEPT:
4175 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4178 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4179 l2cap_send_disconn_req(chan, ECONNRESET);
4183 /* throw out any old stored conf requests */
4184 result = L2CAP_CONF_SUCCESS;
4185 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4188 l2cap_send_disconn_req(chan, ECONNRESET);
4192 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4193 L2CAP_CONF_REQ, len, req);
4194 chan->num_conf_req++;
4195 if (result != L2CAP_CONF_SUCCESS)
4201 l2cap_chan_set_err(chan, ECONNRESET);
4203 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4204 l2cap_send_disconn_req(chan, ECONNRESET);
4208 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4211 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4213 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4214 set_default_fcs(chan);
4216 if (chan->mode == L2CAP_MODE_ERTM ||
4217 chan->mode == L2CAP_MODE_STREAMING)
4218 err = l2cap_ertm_init(chan);
4221 l2cap_send_disconn_req(chan, -err);
4223 l2cap_chan_ready(chan);
4227 l2cap_chan_unlock(chan);
4231 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4232 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4235 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4236 struct l2cap_disconn_rsp rsp;
4238 struct l2cap_chan *chan;
4240 if (cmd_len != sizeof(*req))
4243 scid = __le16_to_cpu(req->scid);
4244 dcid = __le16_to_cpu(req->dcid);
4246 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4248 mutex_lock(&conn->chan_lock);
4250 chan = __l2cap_get_chan_by_scid(conn, dcid);
4252 mutex_unlock(&conn->chan_lock);
4253 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4257 l2cap_chan_lock(chan);
4259 rsp.dcid = cpu_to_le16(chan->scid);
4260 rsp.scid = cpu_to_le16(chan->dcid);
4261 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4263 chan->ops->set_shutdown(chan);
4265 l2cap_chan_hold(chan);
4266 l2cap_chan_del(chan, ECONNRESET);
4268 l2cap_chan_unlock(chan);
4270 chan->ops->close(chan);
4271 l2cap_chan_put(chan);
4273 mutex_unlock(&conn->chan_lock);
4278 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4279 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4282 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4284 struct l2cap_chan *chan;
4286 if (cmd_len != sizeof(*rsp))
4289 scid = __le16_to_cpu(rsp->scid);
4290 dcid = __le16_to_cpu(rsp->dcid);
4292 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4294 mutex_lock(&conn->chan_lock);
4296 chan = __l2cap_get_chan_by_scid(conn, scid);
4298 mutex_unlock(&conn->chan_lock);
4302 l2cap_chan_lock(chan);
4304 l2cap_chan_hold(chan);
4305 l2cap_chan_del(chan, 0);
4307 l2cap_chan_unlock(chan);
4309 chan->ops->close(chan);
4310 l2cap_chan_put(chan);
4312 mutex_unlock(&conn->chan_lock);
4317 static inline int l2cap_information_req(struct l2cap_conn *conn,
4318 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4321 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4324 if (cmd_len != sizeof(*req))
4327 type = __le16_to_cpu(req->type);
4329 BT_DBG("type 0x%4.4x", type);
4331 if (type == L2CAP_IT_FEAT_MASK) {
4333 u32 feat_mask = l2cap_feat_mask;
4334 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4335 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4336 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4338 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4340 if (conn->hs_enabled)
4341 feat_mask |= L2CAP_FEAT_EXT_FLOW
4342 | L2CAP_FEAT_EXT_WINDOW;
4344 put_unaligned_le32(feat_mask, rsp->data);
4345 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4347 } else if (type == L2CAP_IT_FIXED_CHAN) {
4349 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4351 if (conn->hs_enabled)
4352 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4354 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4356 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4357 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4358 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4359 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4362 struct l2cap_info_rsp rsp;
4363 rsp.type = cpu_to_le16(type);
4364 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4365 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4372 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4373 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4376 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4379 if (cmd_len < sizeof(*rsp))
4382 type = __le16_to_cpu(rsp->type);
4383 result = __le16_to_cpu(rsp->result);
4385 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4387 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4388 if (cmd->ident != conn->info_ident ||
4389 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4392 cancel_delayed_work(&conn->info_timer);
4394 if (result != L2CAP_IR_SUCCESS) {
4395 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4396 conn->info_ident = 0;
4398 l2cap_conn_start(conn);
4404 case L2CAP_IT_FEAT_MASK:
4405 conn->feat_mask = get_unaligned_le32(rsp->data);
4407 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4408 struct l2cap_info_req req;
4409 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4411 conn->info_ident = l2cap_get_ident(conn);
4413 l2cap_send_cmd(conn, conn->info_ident,
4414 L2CAP_INFO_REQ, sizeof(req), &req);
4416 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4417 conn->info_ident = 0;
4419 l2cap_conn_start(conn);
4423 case L2CAP_IT_FIXED_CHAN:
4424 conn->fixed_chan_mask = rsp->data[0];
4425 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4426 conn->info_ident = 0;
4428 l2cap_conn_start(conn);
4435 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4436 struct l2cap_cmd_hdr *cmd,
4437 u16 cmd_len, void *data)
4439 struct l2cap_create_chan_req *req = data;
4440 struct l2cap_create_chan_rsp rsp;
4441 struct l2cap_chan *chan;
4442 struct hci_dev *hdev;
4445 if (cmd_len != sizeof(*req))
4448 if (!conn->hs_enabled)
4451 psm = le16_to_cpu(req->psm);
4452 scid = le16_to_cpu(req->scid);
4454 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4456 /* For controller id 0 make BR/EDR connection */
4457 if (req->amp_id == AMP_ID_BREDR) {
4458 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4463 /* Validate AMP controller id */
4464 hdev = hci_dev_get(req->amp_id);
4468 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4473 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4476 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4477 struct hci_conn *hs_hcon;
4479 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4483 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4488 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4490 mgr->bredr_chan = chan;
4491 chan->hs_hcon = hs_hcon;
4492 chan->fcs = L2CAP_FCS_NONE;
4493 conn->mtu = hdev->block_mtu;
4502 rsp.scid = cpu_to_le16(scid);
4503 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4504 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4506 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4512 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4514 struct l2cap_move_chan_req req;
4517 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4519 ident = l2cap_get_ident(chan->conn);
4520 chan->ident = ident;
4522 req.icid = cpu_to_le16(chan->scid);
4523 req.dest_amp_id = dest_amp_id;
4525 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4528 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4531 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4533 struct l2cap_move_chan_rsp rsp;
4535 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4537 rsp.icid = cpu_to_le16(chan->dcid);
4538 rsp.result = cpu_to_le16(result);
4540 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4544 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4546 struct l2cap_move_chan_cfm cfm;
4548 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4550 chan->ident = l2cap_get_ident(chan->conn);
4552 cfm.icid = cpu_to_le16(chan->scid);
4553 cfm.result = cpu_to_le16(result);
4555 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4558 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4561 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4563 struct l2cap_move_chan_cfm cfm;
4565 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4567 cfm.icid = cpu_to_le16(icid);
4568 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4570 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4574 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4577 struct l2cap_move_chan_cfm_rsp rsp;
4579 BT_DBG("icid 0x%4.4x", icid);
4581 rsp.icid = cpu_to_le16(icid);
4582 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4585 static void __release_logical_link(struct l2cap_chan *chan)
4587 chan->hs_hchan = NULL;
4588 chan->hs_hcon = NULL;
4590 /* Placeholder - release the logical link */
4593 static void l2cap_logical_fail(struct l2cap_chan *chan)
4595 /* Logical link setup failed */
4596 if (chan->state != BT_CONNECTED) {
4597 /* Create channel failure, disconnect */
4598 l2cap_send_disconn_req(chan, ECONNRESET);
4602 switch (chan->move_role) {
4603 case L2CAP_MOVE_ROLE_RESPONDER:
4604 l2cap_move_done(chan);
4605 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4607 case L2CAP_MOVE_ROLE_INITIATOR:
4608 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4609 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4610 /* Remote has only sent pending or
4611 * success responses, clean up
4613 l2cap_move_done(chan);
4616 /* Other amp move states imply that the move
4617 * has already aborted
4619 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4624 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4625 struct hci_chan *hchan)
4627 struct l2cap_conf_rsp rsp;
4629 chan->hs_hchan = hchan;
4630 chan->hs_hcon->l2cap_data = chan->conn;
4632 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4634 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4637 set_default_fcs(chan);
4639 err = l2cap_ertm_init(chan);
4641 l2cap_send_disconn_req(chan, -err);
4643 l2cap_chan_ready(chan);
4647 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4648 struct hci_chan *hchan)
4650 chan->hs_hcon = hchan->conn;
4651 chan->hs_hcon->l2cap_data = chan->conn;
4653 BT_DBG("move_state %d", chan->move_state);
4655 switch (chan->move_state) {
4656 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4657 /* Move confirm will be sent after a success
4658 * response is received
4660 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4662 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4663 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4664 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4665 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4666 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4667 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4668 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4669 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4670 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4674 /* Move was not in expected state, free the channel */
4675 __release_logical_link(chan);
4677 chan->move_state = L2CAP_MOVE_STABLE;
4681 /* Call with chan locked */
4682 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4685 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4688 l2cap_logical_fail(chan);
4689 __release_logical_link(chan);
4693 if (chan->state != BT_CONNECTED) {
4694 /* Ignore logical link if channel is on BR/EDR */
4695 if (chan->local_amp_id != AMP_ID_BREDR)
4696 l2cap_logical_finish_create(chan, hchan);
4698 l2cap_logical_finish_move(chan, hchan);
4702 void l2cap_move_start(struct l2cap_chan *chan)
4704 BT_DBG("chan %p", chan);
4706 if (chan->local_amp_id == AMP_ID_BREDR) {
4707 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4709 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4710 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4711 /* Placeholder - start physical link setup */
4713 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4714 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4716 l2cap_move_setup(chan);
4717 l2cap_send_move_chan_req(chan, 0);
4721 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4722 u8 local_amp_id, u8 remote_amp_id)
4724 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4725 local_amp_id, remote_amp_id);
4727 chan->fcs = L2CAP_FCS_NONE;
4729 /* Outgoing channel on AMP */
4730 if (chan->state == BT_CONNECT) {
4731 if (result == L2CAP_CR_SUCCESS) {
4732 chan->local_amp_id = local_amp_id;
4733 l2cap_send_create_chan_req(chan, remote_amp_id);
4735 /* Revert to BR/EDR connect */
4736 l2cap_send_conn_req(chan);
4742 /* Incoming channel on AMP */
4743 if (__l2cap_no_conn_pending(chan)) {
4744 struct l2cap_conn_rsp rsp;
4746 rsp.scid = cpu_to_le16(chan->dcid);
4747 rsp.dcid = cpu_to_le16(chan->scid);
4749 if (result == L2CAP_CR_SUCCESS) {
4750 /* Send successful response */
4751 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4752 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4754 /* Send negative response */
4755 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4756 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4759 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4762 if (result == L2CAP_CR_SUCCESS) {
4763 l2cap_state_change(chan, BT_CONFIG);
4764 set_bit(CONF_REQ_SENT, &chan->conf_state);
4765 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4767 l2cap_build_conf_req(chan, buf), buf);
4768 chan->num_conf_req++;
4773 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4776 l2cap_move_setup(chan);
4777 chan->move_id = local_amp_id;
4778 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4780 l2cap_send_move_chan_req(chan, remote_amp_id);
4783 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4785 struct hci_chan *hchan = NULL;
4787 /* Placeholder - get hci_chan for logical link */
4790 if (hchan->state == BT_CONNECTED) {
4791 /* Logical link is ready to go */
4792 chan->hs_hcon = hchan->conn;
4793 chan->hs_hcon->l2cap_data = chan->conn;
4794 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4795 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4797 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4799 /* Wait for logical link to be ready */
4800 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4803 /* Logical link not available */
4804 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4808 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4810 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4812 if (result == -EINVAL)
4813 rsp_result = L2CAP_MR_BAD_ID;
4815 rsp_result = L2CAP_MR_NOT_ALLOWED;
4817 l2cap_send_move_chan_rsp(chan, rsp_result);
4820 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4821 chan->move_state = L2CAP_MOVE_STABLE;
4823 /* Restart data transmission */
4824 l2cap_ertm_send(chan);
4827 /* Invoke with locked chan */
4828 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4830 u8 local_amp_id = chan->local_amp_id;
4831 u8 remote_amp_id = chan->remote_amp_id;
4833 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4834 chan, result, local_amp_id, remote_amp_id);
4836 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4837 l2cap_chan_unlock(chan);
4841 if (chan->state != BT_CONNECTED) {
4842 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4843 } else if (result != L2CAP_MR_SUCCESS) {
4844 l2cap_do_move_cancel(chan, result);
4846 switch (chan->move_role) {
4847 case L2CAP_MOVE_ROLE_INITIATOR:
4848 l2cap_do_move_initiate(chan, local_amp_id,
4851 case L2CAP_MOVE_ROLE_RESPONDER:
4852 l2cap_do_move_respond(chan, result);
4855 l2cap_do_move_cancel(chan, result);
4861 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4862 struct l2cap_cmd_hdr *cmd,
4863 u16 cmd_len, void *data)
4865 struct l2cap_move_chan_req *req = data;
4866 struct l2cap_move_chan_rsp rsp;
4867 struct l2cap_chan *chan;
4869 u16 result = L2CAP_MR_NOT_ALLOWED;
4871 if (cmd_len != sizeof(*req))
4874 icid = le16_to_cpu(req->icid);
4876 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4878 if (!conn->hs_enabled)
4881 chan = l2cap_get_chan_by_dcid(conn, icid);
4883 rsp.icid = cpu_to_le16(icid);
4884 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4885 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4890 chan->ident = cmd->ident;
4892 if (chan->scid < L2CAP_CID_DYN_START ||
4893 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4894 (chan->mode != L2CAP_MODE_ERTM &&
4895 chan->mode != L2CAP_MODE_STREAMING)) {
4896 result = L2CAP_MR_NOT_ALLOWED;
4897 goto send_move_response;
4900 if (chan->local_amp_id == req->dest_amp_id) {
4901 result = L2CAP_MR_SAME_ID;
4902 goto send_move_response;
4905 if (req->dest_amp_id != AMP_ID_BREDR) {
4906 struct hci_dev *hdev;
4907 hdev = hci_dev_get(req->dest_amp_id);
4908 if (!hdev || hdev->dev_type != HCI_AMP ||
4909 !test_bit(HCI_UP, &hdev->flags)) {
4913 result = L2CAP_MR_BAD_ID;
4914 goto send_move_response;
4919 /* Detect a move collision. Only send a collision response
4920 * if this side has "lost", otherwise proceed with the move.
4921 * The winner has the larger bd_addr.
4923 if ((__chan_is_moving(chan) ||
4924 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4925 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4926 result = L2CAP_MR_COLLISION;
4927 goto send_move_response;
4930 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4931 l2cap_move_setup(chan);
4932 chan->move_id = req->dest_amp_id;
4935 if (req->dest_amp_id == AMP_ID_BREDR) {
4936 /* Moving to BR/EDR */
4937 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4938 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4939 result = L2CAP_MR_PEND;
4941 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4942 result = L2CAP_MR_SUCCESS;
4945 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4946 /* Placeholder - uncomment when amp functions are available */
4947 /*amp_accept_physical(chan, req->dest_amp_id);*/
4948 result = L2CAP_MR_PEND;
4952 l2cap_send_move_chan_rsp(chan, result);
4954 l2cap_chan_unlock(chan);
4959 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4961 struct l2cap_chan *chan;
4962 struct hci_chan *hchan = NULL;
4964 chan = l2cap_get_chan_by_scid(conn, icid);
4966 l2cap_send_move_chan_cfm_icid(conn, icid);
4970 __clear_chan_timer(chan);
4971 if (result == L2CAP_MR_PEND)
4972 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4974 switch (chan->move_state) {
4975 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4976 /* Move confirm will be sent when logical link
4979 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4981 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4982 if (result == L2CAP_MR_PEND) {
4984 } else if (test_bit(CONN_LOCAL_BUSY,
4985 &chan->conn_state)) {
4986 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4988 /* Logical link is up or moving to BR/EDR,
4991 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4992 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4995 case L2CAP_MOVE_WAIT_RSP:
4997 if (result == L2CAP_MR_SUCCESS) {
4998 /* Remote is ready, send confirm immediately
4999 * after logical link is ready
5001 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5003 /* Both logical link and move success
5004 * are required to confirm
5006 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5009 /* Placeholder - get hci_chan for logical link */
5011 /* Logical link not available */
5012 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5016 /* If the logical link is not yet connected, do not
5017 * send confirmation.
5019 if (hchan->state != BT_CONNECTED)
5022 /* Logical link is already ready to go */
5024 chan->hs_hcon = hchan->conn;
5025 chan->hs_hcon->l2cap_data = chan->conn;
5027 if (result == L2CAP_MR_SUCCESS) {
5028 /* Can confirm now */
5029 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5031 /* Now only need move success
5034 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5037 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5040 /* Any other amp move state means the move failed. */
5041 chan->move_id = chan->local_amp_id;
5042 l2cap_move_done(chan);
5043 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5046 l2cap_chan_unlock(chan);
5049 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5052 struct l2cap_chan *chan;
5054 chan = l2cap_get_chan_by_ident(conn, ident);
5056 /* Could not locate channel, icid is best guess */
5057 l2cap_send_move_chan_cfm_icid(conn, icid);
5061 __clear_chan_timer(chan);
5063 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5064 if (result == L2CAP_MR_COLLISION) {
5065 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5067 /* Cleanup - cancel move */
5068 chan->move_id = chan->local_amp_id;
5069 l2cap_move_done(chan);
5073 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5075 l2cap_chan_unlock(chan);
5078 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5079 struct l2cap_cmd_hdr *cmd,
5080 u16 cmd_len, void *data)
5082 struct l2cap_move_chan_rsp *rsp = data;
5085 if (cmd_len != sizeof(*rsp))
5088 icid = le16_to_cpu(rsp->icid);
5089 result = le16_to_cpu(rsp->result);
5091 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5093 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5094 l2cap_move_continue(conn, icid, result);
5096 l2cap_move_fail(conn, cmd->ident, icid, result);
5101 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5102 struct l2cap_cmd_hdr *cmd,
5103 u16 cmd_len, void *data)
5105 struct l2cap_move_chan_cfm *cfm = data;
5106 struct l2cap_chan *chan;
5109 if (cmd_len != sizeof(*cfm))
5112 icid = le16_to_cpu(cfm->icid);
5113 result = le16_to_cpu(cfm->result);
5115 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5117 chan = l2cap_get_chan_by_dcid(conn, icid);
5119 /* Spec requires a response even if the icid was not found */
5120 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5124 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5125 if (result == L2CAP_MC_CONFIRMED) {
5126 chan->local_amp_id = chan->move_id;
5127 if (chan->local_amp_id == AMP_ID_BREDR)
5128 __release_logical_link(chan);
5130 chan->move_id = chan->local_amp_id;
5133 l2cap_move_done(chan);
5136 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5138 l2cap_chan_unlock(chan);
5143 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5144 struct l2cap_cmd_hdr *cmd,
5145 u16 cmd_len, void *data)
5147 struct l2cap_move_chan_cfm_rsp *rsp = data;
5148 struct l2cap_chan *chan;
5151 if (cmd_len != sizeof(*rsp))
5154 icid = le16_to_cpu(rsp->icid);
5156 BT_DBG("icid 0x%4.4x", icid);
5158 chan = l2cap_get_chan_by_scid(conn, icid);
5162 __clear_chan_timer(chan);
5164 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5165 chan->local_amp_id = chan->move_id;
5167 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5168 __release_logical_link(chan);
5170 l2cap_move_done(chan);
5173 l2cap_chan_unlock(chan);
5178 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5183 if (min > max || min < 6 || max > 3200)
5186 if (to_multiplier < 10 || to_multiplier > 3200)
5189 if (max >= to_multiplier * 8)
5192 max_latency = (to_multiplier * 8 / max) - 1;
5193 if (latency > 499 || latency > max_latency)
5199 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5200 struct l2cap_cmd_hdr *cmd,
5201 u16 cmd_len, u8 *data)
5203 struct hci_conn *hcon = conn->hcon;
5204 struct l2cap_conn_param_update_req *req;
5205 struct l2cap_conn_param_update_rsp rsp;
5206 u16 min, max, latency, to_multiplier;
5209 if (!(hcon->link_mode & HCI_LM_MASTER))
5212 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5215 req = (struct l2cap_conn_param_update_req *) data;
5216 min = __le16_to_cpu(req->min);
5217 max = __le16_to_cpu(req->max);
5218 latency = __le16_to_cpu(req->latency);
5219 to_multiplier = __le16_to_cpu(req->to_multiplier);
5221 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5222 min, max, latency, to_multiplier);
5224 memset(&rsp, 0, sizeof(rsp));
5226 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5228 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5230 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5232 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5236 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5241 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5242 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5245 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5246 u16 dcid, mtu, mps, credits, result;
5247 struct l2cap_chan *chan;
5250 if (cmd_len < sizeof(*rsp))
5253 dcid = __le16_to_cpu(rsp->dcid);
5254 mtu = __le16_to_cpu(rsp->mtu);
5255 mps = __le16_to_cpu(rsp->mps);
5256 credits = __le16_to_cpu(rsp->credits);
5257 result = __le16_to_cpu(rsp->result);
5259 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5262 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5263 dcid, mtu, mps, credits, result);
5265 mutex_lock(&conn->chan_lock);
5267 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5275 l2cap_chan_lock(chan);
5278 case L2CAP_CR_SUCCESS:
5282 chan->remote_mps = mps;
5283 l2cap_chan_ready(chan);
5287 l2cap_chan_del(chan, ECONNREFUSED);
5291 l2cap_chan_unlock(chan);
5294 mutex_unlock(&conn->chan_lock);
5299 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5300 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5305 switch (cmd->code) {
5306 case L2CAP_COMMAND_REJ:
5307 l2cap_command_rej(conn, cmd, cmd_len, data);
5310 case L2CAP_CONN_REQ:
5311 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5314 case L2CAP_CONN_RSP:
5315 case L2CAP_CREATE_CHAN_RSP:
5316 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5319 case L2CAP_CONF_REQ:
5320 err = l2cap_config_req(conn, cmd, cmd_len, data);
5323 case L2CAP_CONF_RSP:
5324 l2cap_config_rsp(conn, cmd, cmd_len, data);
5327 case L2CAP_DISCONN_REQ:
5328 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5331 case L2CAP_DISCONN_RSP:
5332 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5335 case L2CAP_ECHO_REQ:
5336 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5339 case L2CAP_ECHO_RSP:
5342 case L2CAP_INFO_REQ:
5343 err = l2cap_information_req(conn, cmd, cmd_len, data);
5346 case L2CAP_INFO_RSP:
5347 l2cap_information_rsp(conn, cmd, cmd_len, data);
5350 case L2CAP_CREATE_CHAN_REQ:
5351 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5354 case L2CAP_MOVE_CHAN_REQ:
5355 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5358 case L2CAP_MOVE_CHAN_RSP:
5359 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5362 case L2CAP_MOVE_CHAN_CFM:
5363 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5366 case L2CAP_MOVE_CHAN_CFM_RSP:
5367 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5371 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5379 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5380 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5383 switch (cmd->code) {
5384 case L2CAP_COMMAND_REJ:
5387 case L2CAP_CONN_PARAM_UPDATE_REQ:
5388 return l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5390 case L2CAP_CONN_PARAM_UPDATE_RSP:
5393 case L2CAP_LE_CONN_RSP:
5394 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5398 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5403 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5404 struct sk_buff *skb)
5406 struct hci_conn *hcon = conn->hcon;
5407 struct l2cap_cmd_hdr *cmd;
5411 if (hcon->type != LE_LINK)
5414 if (skb->len < L2CAP_CMD_HDR_SIZE)
5417 cmd = (void *) skb->data;
5418 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5420 len = le16_to_cpu(cmd->len);
5422 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5424 if (len != skb->len || !cmd->ident) {
5425 BT_DBG("corrupted command");
5429 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5431 struct l2cap_cmd_rej_unk rej;
5433 BT_ERR("Wrong link type (%d)", err);
5435 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5436 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5444 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5445 struct sk_buff *skb)
5447 struct hci_conn *hcon = conn->hcon;
5448 u8 *data = skb->data;
5450 struct l2cap_cmd_hdr cmd;
5453 l2cap_raw_recv(conn, skb);
5455 if (hcon->type != ACL_LINK)
5458 while (len >= L2CAP_CMD_HDR_SIZE) {
5460 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5461 data += L2CAP_CMD_HDR_SIZE;
5462 len -= L2CAP_CMD_HDR_SIZE;
5464 cmd_len = le16_to_cpu(cmd.len);
5466 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5469 if (cmd_len > len || !cmd.ident) {
5470 BT_DBG("corrupted command");
5474 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5476 struct l2cap_cmd_rej_unk rej;
5478 BT_ERR("Wrong link type (%d)", err);
5480 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5481 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5493 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5495 u16 our_fcs, rcv_fcs;
5498 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5499 hdr_size = L2CAP_EXT_HDR_SIZE;
5501 hdr_size = L2CAP_ENH_HDR_SIZE;
5503 if (chan->fcs == L2CAP_FCS_CRC16) {
5504 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5505 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5506 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5508 if (our_fcs != rcv_fcs)
5514 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5516 struct l2cap_ctrl control;
5518 BT_DBG("chan %p", chan);
5520 memset(&control, 0, sizeof(control));
5523 control.reqseq = chan->buffer_seq;
5524 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5526 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5527 control.super = L2CAP_SUPER_RNR;
5528 l2cap_send_sframe(chan, &control);
5531 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5532 chan->unacked_frames > 0)
5533 __set_retrans_timer(chan);
5535 /* Send pending iframes */
5536 l2cap_ertm_send(chan);
5538 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5539 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5540 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5543 control.super = L2CAP_SUPER_RR;
5544 l2cap_send_sframe(chan, &control);
5548 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5549 struct sk_buff **last_frag)
5551 /* skb->len reflects data in skb as well as all fragments
5552 * skb->data_len reflects only data in fragments
5554 if (!skb_has_frag_list(skb))
5555 skb_shinfo(skb)->frag_list = new_frag;
5557 new_frag->next = NULL;
5559 (*last_frag)->next = new_frag;
5560 *last_frag = new_frag;
5562 skb->len += new_frag->len;
5563 skb->data_len += new_frag->len;
5564 skb->truesize += new_frag->truesize;
5567 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5568 struct l2cap_ctrl *control)
5572 switch (control->sar) {
5573 case L2CAP_SAR_UNSEGMENTED:
5577 err = chan->ops->recv(chan, skb);
5580 case L2CAP_SAR_START:
5584 chan->sdu_len = get_unaligned_le16(skb->data);
5585 skb_pull(skb, L2CAP_SDULEN_SIZE);
5587 if (chan->sdu_len > chan->imtu) {
5592 if (skb->len >= chan->sdu_len)
5596 chan->sdu_last_frag = skb;
5602 case L2CAP_SAR_CONTINUE:
5606 append_skb_frag(chan->sdu, skb,
5607 &chan->sdu_last_frag);
5610 if (chan->sdu->len >= chan->sdu_len)
5620 append_skb_frag(chan->sdu, skb,
5621 &chan->sdu_last_frag);
5624 if (chan->sdu->len != chan->sdu_len)
5627 err = chan->ops->recv(chan, chan->sdu);
5630 /* Reassembly complete */
5632 chan->sdu_last_frag = NULL;
5640 kfree_skb(chan->sdu);
5642 chan->sdu_last_frag = NULL;
5649 static int l2cap_resegment(struct l2cap_chan *chan)
5655 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5659 if (chan->mode != L2CAP_MODE_ERTM)
5662 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5663 l2cap_tx(chan, NULL, NULL, event);
5666 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5669 /* Pass sequential frames to l2cap_reassemble_sdu()
5670 * until a gap is encountered.
5673 BT_DBG("chan %p", chan);
5675 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5676 struct sk_buff *skb;
5677 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5678 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5680 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5685 skb_unlink(skb, &chan->srej_q);
5686 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5687 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5692 if (skb_queue_empty(&chan->srej_q)) {
5693 chan->rx_state = L2CAP_RX_STATE_RECV;
5694 l2cap_send_ack(chan);
5700 static void l2cap_handle_srej(struct l2cap_chan *chan,
5701 struct l2cap_ctrl *control)
5703 struct sk_buff *skb;
5705 BT_DBG("chan %p, control %p", chan, control);
5707 if (control->reqseq == chan->next_tx_seq) {
5708 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5709 l2cap_send_disconn_req(chan, ECONNRESET);
5713 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5716 BT_DBG("Seq %d not available for retransmission",
5721 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5722 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5723 l2cap_send_disconn_req(chan, ECONNRESET);
5727 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5729 if (control->poll) {
5730 l2cap_pass_to_tx(chan, control);
5732 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5733 l2cap_retransmit(chan, control);
5734 l2cap_ertm_send(chan);
5736 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5737 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5738 chan->srej_save_reqseq = control->reqseq;
5741 l2cap_pass_to_tx_fbit(chan, control);
5743 if (control->final) {
5744 if (chan->srej_save_reqseq != control->reqseq ||
5745 !test_and_clear_bit(CONN_SREJ_ACT,
5747 l2cap_retransmit(chan, control);
5749 l2cap_retransmit(chan, control);
5750 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5751 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5752 chan->srej_save_reqseq = control->reqseq;
5758 static void l2cap_handle_rej(struct l2cap_chan *chan,
5759 struct l2cap_ctrl *control)
5761 struct sk_buff *skb;
5763 BT_DBG("chan %p, control %p", chan, control);
5765 if (control->reqseq == chan->next_tx_seq) {
5766 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5767 l2cap_send_disconn_req(chan, ECONNRESET);
5771 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5773 if (chan->max_tx && skb &&
5774 bt_cb(skb)->control.retries >= chan->max_tx) {
5775 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5776 l2cap_send_disconn_req(chan, ECONNRESET);
5780 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5782 l2cap_pass_to_tx(chan, control);
5784 if (control->final) {
5785 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5786 l2cap_retransmit_all(chan, control);
5788 l2cap_retransmit_all(chan, control);
5789 l2cap_ertm_send(chan);
5790 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5791 set_bit(CONN_REJ_ACT, &chan->conn_state);
5795 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5797 BT_DBG("chan %p, txseq %d", chan, txseq);
5799 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5800 chan->expected_tx_seq);
5802 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5803 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5805 /* See notes below regarding "double poll" and
5808 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5809 BT_DBG("Invalid/Ignore - after SREJ");
5810 return L2CAP_TXSEQ_INVALID_IGNORE;
5812 BT_DBG("Invalid - in window after SREJ sent");
5813 return L2CAP_TXSEQ_INVALID;
5817 if (chan->srej_list.head == txseq) {
5818 BT_DBG("Expected SREJ");
5819 return L2CAP_TXSEQ_EXPECTED_SREJ;
5822 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5823 BT_DBG("Duplicate SREJ - txseq already stored");
5824 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5827 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5828 BT_DBG("Unexpected SREJ - not requested");
5829 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5833 if (chan->expected_tx_seq == txseq) {
5834 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5836 BT_DBG("Invalid - txseq outside tx window");
5837 return L2CAP_TXSEQ_INVALID;
5840 return L2CAP_TXSEQ_EXPECTED;
5844 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5845 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5846 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5847 return L2CAP_TXSEQ_DUPLICATE;
5850 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5851 /* A source of invalid packets is a "double poll" condition,
5852 * where delays cause us to send multiple poll packets. If
5853 * the remote stack receives and processes both polls,
5854 * sequence numbers can wrap around in such a way that a
5855 * resent frame has a sequence number that looks like new data
5856 * with a sequence gap. This would trigger an erroneous SREJ
5859 * Fortunately, this is impossible with a tx window that's
5860 * less than half of the maximum sequence number, which allows
5861 * invalid frames to be safely ignored.
5863 * With tx window sizes greater than half of the tx window
5864 * maximum, the frame is invalid and cannot be ignored. This
5865 * causes a disconnect.
5868 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5869 BT_DBG("Invalid/Ignore - txseq outside tx window");
5870 return L2CAP_TXSEQ_INVALID_IGNORE;
5872 BT_DBG("Invalid - txseq outside tx window");
5873 return L2CAP_TXSEQ_INVALID;
5876 BT_DBG("Unexpected - txseq indicates missing frames");
5877 return L2CAP_TXSEQ_UNEXPECTED;
5881 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5882 struct l2cap_ctrl *control,
5883 struct sk_buff *skb, u8 event)
5886 bool skb_in_use = false;
5888 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5892 case L2CAP_EV_RECV_IFRAME:
5893 switch (l2cap_classify_txseq(chan, control->txseq)) {
5894 case L2CAP_TXSEQ_EXPECTED:
5895 l2cap_pass_to_tx(chan, control);
5897 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5898 BT_DBG("Busy, discarding expected seq %d",
5903 chan->expected_tx_seq = __next_seq(chan,
5906 chan->buffer_seq = chan->expected_tx_seq;
5909 err = l2cap_reassemble_sdu(chan, skb, control);
5913 if (control->final) {
5914 if (!test_and_clear_bit(CONN_REJ_ACT,
5915 &chan->conn_state)) {
5917 l2cap_retransmit_all(chan, control);
5918 l2cap_ertm_send(chan);
5922 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5923 l2cap_send_ack(chan);
5925 case L2CAP_TXSEQ_UNEXPECTED:
5926 l2cap_pass_to_tx(chan, control);
5928 /* Can't issue SREJ frames in the local busy state.
5929 * Drop this frame, it will be seen as missing
5930 * when local busy is exited.
5932 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5933 BT_DBG("Busy, discarding unexpected seq %d",
5938 /* There was a gap in the sequence, so an SREJ
5939 * must be sent for each missing frame. The
5940 * current frame is stored for later use.
5942 skb_queue_tail(&chan->srej_q, skb);
5944 BT_DBG("Queued %p (queue len %d)", skb,
5945 skb_queue_len(&chan->srej_q));
5947 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5948 l2cap_seq_list_clear(&chan->srej_list);
5949 l2cap_send_srej(chan, control->txseq);
5951 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5953 case L2CAP_TXSEQ_DUPLICATE:
5954 l2cap_pass_to_tx(chan, control);
5956 case L2CAP_TXSEQ_INVALID_IGNORE:
5958 case L2CAP_TXSEQ_INVALID:
5960 l2cap_send_disconn_req(chan, ECONNRESET);
5964 case L2CAP_EV_RECV_RR:
5965 l2cap_pass_to_tx(chan, control);
5966 if (control->final) {
5967 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5969 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5970 !__chan_is_moving(chan)) {
5972 l2cap_retransmit_all(chan, control);
5975 l2cap_ertm_send(chan);
5976 } else if (control->poll) {
5977 l2cap_send_i_or_rr_or_rnr(chan);
5979 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5980 &chan->conn_state) &&
5981 chan->unacked_frames)
5982 __set_retrans_timer(chan);
5984 l2cap_ertm_send(chan);
5987 case L2CAP_EV_RECV_RNR:
5988 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5989 l2cap_pass_to_tx(chan, control);
5990 if (control && control->poll) {
5991 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5992 l2cap_send_rr_or_rnr(chan, 0);
5994 __clear_retrans_timer(chan);
5995 l2cap_seq_list_clear(&chan->retrans_list);
5997 case L2CAP_EV_RECV_REJ:
5998 l2cap_handle_rej(chan, control);
6000 case L2CAP_EV_RECV_SREJ:
6001 l2cap_handle_srej(chan, control);
6007 if (skb && !skb_in_use) {
6008 BT_DBG("Freeing %p", skb);
6015 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6016 struct l2cap_ctrl *control,
6017 struct sk_buff *skb, u8 event)
6020 u16 txseq = control->txseq;
6021 bool skb_in_use = false;
6023 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6027 case L2CAP_EV_RECV_IFRAME:
6028 switch (l2cap_classify_txseq(chan, txseq)) {
6029 case L2CAP_TXSEQ_EXPECTED:
6030 /* Keep frame for reassembly later */
6031 l2cap_pass_to_tx(chan, control);
6032 skb_queue_tail(&chan->srej_q, skb);
6034 BT_DBG("Queued %p (queue len %d)", skb,
6035 skb_queue_len(&chan->srej_q));
6037 chan->expected_tx_seq = __next_seq(chan, txseq);
6039 case L2CAP_TXSEQ_EXPECTED_SREJ:
6040 l2cap_seq_list_pop(&chan->srej_list);
6042 l2cap_pass_to_tx(chan, control);
6043 skb_queue_tail(&chan->srej_q, skb);
6045 BT_DBG("Queued %p (queue len %d)", skb,
6046 skb_queue_len(&chan->srej_q));
6048 err = l2cap_rx_queued_iframes(chan);
6053 case L2CAP_TXSEQ_UNEXPECTED:
6054 /* Got a frame that can't be reassembled yet.
6055 * Save it for later, and send SREJs to cover
6056 * the missing frames.
6058 skb_queue_tail(&chan->srej_q, skb);
6060 BT_DBG("Queued %p (queue len %d)", skb,
6061 skb_queue_len(&chan->srej_q));
6063 l2cap_pass_to_tx(chan, control);
6064 l2cap_send_srej(chan, control->txseq);
6066 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6067 /* This frame was requested with an SREJ, but
6068 * some expected retransmitted frames are
6069 * missing. Request retransmission of missing
6072 skb_queue_tail(&chan->srej_q, skb);
6074 BT_DBG("Queued %p (queue len %d)", skb,
6075 skb_queue_len(&chan->srej_q));
6077 l2cap_pass_to_tx(chan, control);
6078 l2cap_send_srej_list(chan, control->txseq);
6080 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6081 /* We've already queued this frame. Drop this copy. */
6082 l2cap_pass_to_tx(chan, control);
6084 case L2CAP_TXSEQ_DUPLICATE:
6085 /* Expecting a later sequence number, so this frame
6086 * was already received. Ignore it completely.
6089 case L2CAP_TXSEQ_INVALID_IGNORE:
6091 case L2CAP_TXSEQ_INVALID:
6093 l2cap_send_disconn_req(chan, ECONNRESET);
6097 case L2CAP_EV_RECV_RR:
6098 l2cap_pass_to_tx(chan, control);
6099 if (control->final) {
6100 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6102 if (!test_and_clear_bit(CONN_REJ_ACT,
6103 &chan->conn_state)) {
6105 l2cap_retransmit_all(chan, control);
6108 l2cap_ertm_send(chan);
6109 } else if (control->poll) {
6110 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6111 &chan->conn_state) &&
6112 chan->unacked_frames) {
6113 __set_retrans_timer(chan);
6116 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6117 l2cap_send_srej_tail(chan);
6119 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6120 &chan->conn_state) &&
6121 chan->unacked_frames)
6122 __set_retrans_timer(chan);
6124 l2cap_send_ack(chan);
6127 case L2CAP_EV_RECV_RNR:
6128 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6129 l2cap_pass_to_tx(chan, control);
6130 if (control->poll) {
6131 l2cap_send_srej_tail(chan);
6133 struct l2cap_ctrl rr_control;
6134 memset(&rr_control, 0, sizeof(rr_control));
6135 rr_control.sframe = 1;
6136 rr_control.super = L2CAP_SUPER_RR;
6137 rr_control.reqseq = chan->buffer_seq;
6138 l2cap_send_sframe(chan, &rr_control);
6142 case L2CAP_EV_RECV_REJ:
6143 l2cap_handle_rej(chan, control);
6145 case L2CAP_EV_RECV_SREJ:
6146 l2cap_handle_srej(chan, control);
6150 if (skb && !skb_in_use) {
6151 BT_DBG("Freeing %p", skb);
6158 static int l2cap_finish_move(struct l2cap_chan *chan)
6160 BT_DBG("chan %p", chan);
6162 chan->rx_state = L2CAP_RX_STATE_RECV;
6165 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6167 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6169 return l2cap_resegment(chan);
6172 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6173 struct l2cap_ctrl *control,
6174 struct sk_buff *skb, u8 event)
6178 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6184 l2cap_process_reqseq(chan, control->reqseq);
6186 if (!skb_queue_empty(&chan->tx_q))
6187 chan->tx_send_head = skb_peek(&chan->tx_q);
6189 chan->tx_send_head = NULL;
6191 /* Rewind next_tx_seq to the point expected
6194 chan->next_tx_seq = control->reqseq;
6195 chan->unacked_frames = 0;
6197 err = l2cap_finish_move(chan);
6201 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6202 l2cap_send_i_or_rr_or_rnr(chan);
6204 if (event == L2CAP_EV_RECV_IFRAME)
6207 return l2cap_rx_state_recv(chan, control, NULL, event);
6210 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6211 struct l2cap_ctrl *control,
6212 struct sk_buff *skb, u8 event)
6216 if (!control->final)
6219 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6221 chan->rx_state = L2CAP_RX_STATE_RECV;
6222 l2cap_process_reqseq(chan, control->reqseq);
6224 if (!skb_queue_empty(&chan->tx_q))
6225 chan->tx_send_head = skb_peek(&chan->tx_q);
6227 chan->tx_send_head = NULL;
6229 /* Rewind next_tx_seq to the point expected
6232 chan->next_tx_seq = control->reqseq;
6233 chan->unacked_frames = 0;
6236 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6238 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6240 err = l2cap_resegment(chan);
6243 err = l2cap_rx_state_recv(chan, control, skb, event);
6248 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6250 /* Make sure reqseq is for a packet that has been sent but not acked */
6253 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6254 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6257 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6258 struct sk_buff *skb, u8 event)
6262 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6263 control, skb, event, chan->rx_state);
6265 if (__valid_reqseq(chan, control->reqseq)) {
6266 switch (chan->rx_state) {
6267 case L2CAP_RX_STATE_RECV:
6268 err = l2cap_rx_state_recv(chan, control, skb, event);
6270 case L2CAP_RX_STATE_SREJ_SENT:
6271 err = l2cap_rx_state_srej_sent(chan, control, skb,
6274 case L2CAP_RX_STATE_WAIT_P:
6275 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6277 case L2CAP_RX_STATE_WAIT_F:
6278 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6285 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6286 control->reqseq, chan->next_tx_seq,
6287 chan->expected_ack_seq);
6288 l2cap_send_disconn_req(chan, ECONNRESET);
6294 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6295 struct sk_buff *skb)
6299 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6302 if (l2cap_classify_txseq(chan, control->txseq) ==
6303 L2CAP_TXSEQ_EXPECTED) {
6304 l2cap_pass_to_tx(chan, control);
6306 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6307 __next_seq(chan, chan->buffer_seq));
6309 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6311 l2cap_reassemble_sdu(chan, skb, control);
6314 kfree_skb(chan->sdu);
6317 chan->sdu_last_frag = NULL;
6321 BT_DBG("Freeing %p", skb);
6326 chan->last_acked_seq = control->txseq;
6327 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6332 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6334 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6338 __unpack_control(chan, skb);
6343 * We can just drop the corrupted I-frame here.
6344 * Receiver will miss it and start proper recovery
6345 * procedures and ask for retransmission.
6347 if (l2cap_check_fcs(chan, skb))
6350 if (!control->sframe && control->sar == L2CAP_SAR_START)
6351 len -= L2CAP_SDULEN_SIZE;
6353 if (chan->fcs == L2CAP_FCS_CRC16)
6354 len -= L2CAP_FCS_SIZE;
6356 if (len > chan->mps) {
6357 l2cap_send_disconn_req(chan, ECONNRESET);
6361 if (!control->sframe) {
6364 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6365 control->sar, control->reqseq, control->final,
6368 /* Validate F-bit - F=0 always valid, F=1 only
6369 * valid in TX WAIT_F
6371 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6374 if (chan->mode != L2CAP_MODE_STREAMING) {
6375 event = L2CAP_EV_RECV_IFRAME;
6376 err = l2cap_rx(chan, control, skb, event);
6378 err = l2cap_stream_rx(chan, control, skb);
6382 l2cap_send_disconn_req(chan, ECONNRESET);
6384 const u8 rx_func_to_event[4] = {
6385 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6386 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6389 /* Only I-frames are expected in streaming mode */
6390 if (chan->mode == L2CAP_MODE_STREAMING)
6393 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6394 control->reqseq, control->final, control->poll,
6398 BT_ERR("Trailing bytes: %d in sframe", len);
6399 l2cap_send_disconn_req(chan, ECONNRESET);
6403 /* Validate F and P bits */
6404 if (control->final && (control->poll ||
6405 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6408 event = rx_func_to_event[control->super];
6409 if (l2cap_rx(chan, control, skb, event))
6410 l2cap_send_disconn_req(chan, ECONNRESET);
6420 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6421 struct sk_buff *skb)
6423 struct l2cap_chan *chan;
6425 chan = l2cap_get_chan_by_scid(conn, cid);
6427 if (cid == L2CAP_CID_A2MP) {
6428 chan = a2mp_channel_create(conn, skb);
6434 l2cap_chan_lock(chan);
6436 BT_DBG("unknown cid 0x%4.4x", cid);
6437 /* Drop packet and return */
6443 BT_DBG("chan %p, len %d", chan, skb->len);
6445 if (chan->state != BT_CONNECTED)
6448 switch (chan->mode) {
6449 case L2CAP_MODE_BASIC:
6450 /* If socket recv buffers overflows we drop data here
6451 * which is *bad* because L2CAP has to be reliable.
6452 * But we don't have any other choice. L2CAP doesn't
6453 * provide flow control mechanism. */
6455 if (chan->imtu < skb->len)
6458 if (!chan->ops->recv(chan, skb))
6462 case L2CAP_MODE_ERTM:
6463 case L2CAP_MODE_STREAMING:
6464 l2cap_data_rcv(chan, skb);
6468 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6476 l2cap_chan_unlock(chan);
6479 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6480 struct sk_buff *skb)
6482 struct hci_conn *hcon = conn->hcon;
6483 struct l2cap_chan *chan;
6485 if (hcon->type != ACL_LINK)
6488 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6493 BT_DBG("chan %p, len %d", chan, skb->len);
6495 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6498 if (chan->imtu < skb->len)
6501 /* Store remote BD_ADDR and PSM for msg_name */
6502 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6503 bt_cb(skb)->psm = psm;
6505 if (!chan->ops->recv(chan, skb))
6512 static void l2cap_att_channel(struct l2cap_conn *conn,
6513 struct sk_buff *skb)
6515 struct hci_conn *hcon = conn->hcon;
6516 struct l2cap_chan *chan;
6518 if (hcon->type != LE_LINK)
6521 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6522 &hcon->src, &hcon->dst);
6526 BT_DBG("chan %p, len %d", chan, skb->len);
6528 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
6531 if (chan->imtu < skb->len)
6534 if (!chan->ops->recv(chan, skb))
6541 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6543 struct l2cap_hdr *lh = (void *) skb->data;
6547 skb_pull(skb, L2CAP_HDR_SIZE);
6548 cid = __le16_to_cpu(lh->cid);
6549 len = __le16_to_cpu(lh->len);
6551 if (len != skb->len) {
6556 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6559 case L2CAP_CID_SIGNALING:
6560 l2cap_sig_channel(conn, skb);
6563 case L2CAP_CID_CONN_LESS:
6564 psm = get_unaligned((__le16 *) skb->data);
6565 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6566 l2cap_conless_channel(conn, psm, skb);
6570 l2cap_att_channel(conn, skb);
6573 case L2CAP_CID_LE_SIGNALING:
6574 l2cap_le_sig_channel(conn, skb);
6578 if (smp_sig_channel(conn, skb))
6579 l2cap_conn_del(conn->hcon, EACCES);
6583 l2cap_data_channel(conn, cid, skb);
6588 /* ---- L2CAP interface with lower layer (HCI) ---- */
6590 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6592 int exact = 0, lm1 = 0, lm2 = 0;
6593 struct l2cap_chan *c;
6595 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6597 /* Find listening sockets and check their link_mode */
6598 read_lock(&chan_list_lock);
6599 list_for_each_entry(c, &chan_list, global_l) {
6600 if (c->state != BT_LISTEN)
6603 if (!bacmp(&c->src, &hdev->bdaddr)) {
6604 lm1 |= HCI_LM_ACCEPT;
6605 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6606 lm1 |= HCI_LM_MASTER;
6608 } else if (!bacmp(&c->src, BDADDR_ANY)) {
6609 lm2 |= HCI_LM_ACCEPT;
6610 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6611 lm2 |= HCI_LM_MASTER;
6614 read_unlock(&chan_list_lock);
6616 return exact ? lm1 : lm2;
6619 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6621 struct l2cap_conn *conn;
6623 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6626 conn = l2cap_conn_add(hcon);
6628 l2cap_conn_ready(conn);
6630 l2cap_conn_del(hcon, bt_to_errno(status));
6634 int l2cap_disconn_ind(struct hci_conn *hcon)
6636 struct l2cap_conn *conn = hcon->l2cap_data;
6638 BT_DBG("hcon %p", hcon);
6641 return HCI_ERROR_REMOTE_USER_TERM;
6642 return conn->disc_reason;
6645 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6647 BT_DBG("hcon %p reason %d", hcon, reason);
6649 l2cap_conn_del(hcon, bt_to_errno(reason));
6652 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6654 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6657 if (encrypt == 0x00) {
6658 if (chan->sec_level == BT_SECURITY_MEDIUM) {
6659 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6660 } else if (chan->sec_level == BT_SECURITY_HIGH)
6661 l2cap_chan_close(chan, ECONNREFUSED);
6663 if (chan->sec_level == BT_SECURITY_MEDIUM)
6664 __clear_chan_timer(chan);
6668 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6670 struct l2cap_conn *conn = hcon->l2cap_data;
6671 struct l2cap_chan *chan;
6676 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6678 if (hcon->type == LE_LINK) {
6679 if (!status && encrypt)
6680 smp_distribute_keys(conn, 0);
6681 cancel_delayed_work(&conn->security_timer);
6684 mutex_lock(&conn->chan_lock);
6686 list_for_each_entry(chan, &conn->chan_l, list) {
6687 l2cap_chan_lock(chan);
6689 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6690 state_to_string(chan->state));
6692 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6693 l2cap_chan_unlock(chan);
6697 if (chan->scid == L2CAP_CID_ATT) {
6698 if (!status && encrypt) {
6699 chan->sec_level = hcon->sec_level;
6700 l2cap_chan_ready(chan);
6703 l2cap_chan_unlock(chan);
6707 if (!__l2cap_no_conn_pending(chan)) {
6708 l2cap_chan_unlock(chan);
6712 if (!status && (chan->state == BT_CONNECTED ||
6713 chan->state == BT_CONFIG)) {
6714 chan->ops->resume(chan);
6715 l2cap_check_encryption(chan, encrypt);
6716 l2cap_chan_unlock(chan);
6720 if (chan->state == BT_CONNECT) {
6722 l2cap_start_connection(chan);
6724 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6725 } else if (chan->state == BT_CONNECT2) {
6726 struct l2cap_conn_rsp rsp;
6730 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6731 res = L2CAP_CR_PEND;
6732 stat = L2CAP_CS_AUTHOR_PEND;
6733 chan->ops->defer(chan);
6735 l2cap_state_change(chan, BT_CONFIG);
6736 res = L2CAP_CR_SUCCESS;
6737 stat = L2CAP_CS_NO_INFO;
6740 l2cap_state_change(chan, BT_DISCONN);
6741 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6742 res = L2CAP_CR_SEC_BLOCK;
6743 stat = L2CAP_CS_NO_INFO;
6746 rsp.scid = cpu_to_le16(chan->dcid);
6747 rsp.dcid = cpu_to_le16(chan->scid);
6748 rsp.result = cpu_to_le16(res);
6749 rsp.status = cpu_to_le16(stat);
6750 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6753 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6754 res == L2CAP_CR_SUCCESS) {
6756 set_bit(CONF_REQ_SENT, &chan->conf_state);
6757 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6759 l2cap_build_conf_req(chan, buf),
6761 chan->num_conf_req++;
6765 l2cap_chan_unlock(chan);
6768 mutex_unlock(&conn->chan_lock);
6773 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6775 struct l2cap_conn *conn = hcon->l2cap_data;
6776 struct l2cap_hdr *hdr;
6779 /* For AMP controller do not create l2cap conn */
6780 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6784 conn = l2cap_conn_add(hcon);
6789 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6793 case ACL_START_NO_FLUSH:
6796 BT_ERR("Unexpected start frame (len %d)", skb->len);
6797 kfree_skb(conn->rx_skb);
6798 conn->rx_skb = NULL;
6800 l2cap_conn_unreliable(conn, ECOMM);
6803 /* Start fragment always begin with Basic L2CAP header */
6804 if (skb->len < L2CAP_HDR_SIZE) {
6805 BT_ERR("Frame is too short (len %d)", skb->len);
6806 l2cap_conn_unreliable(conn, ECOMM);
6810 hdr = (struct l2cap_hdr *) skb->data;
6811 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6813 if (len == skb->len) {
6814 /* Complete frame received */
6815 l2cap_recv_frame(conn, skb);
6819 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6821 if (skb->len > len) {
6822 BT_ERR("Frame is too long (len %d, expected len %d)",
6824 l2cap_conn_unreliable(conn, ECOMM);
6828 /* Allocate skb for the complete frame (with header) */
6829 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6833 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6835 conn->rx_len = len - skb->len;
6839 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6841 if (!conn->rx_len) {
6842 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6843 l2cap_conn_unreliable(conn, ECOMM);
6847 if (skb->len > conn->rx_len) {
6848 BT_ERR("Fragment is too long (len %d, expected %d)",
6849 skb->len, conn->rx_len);
6850 kfree_skb(conn->rx_skb);
6851 conn->rx_skb = NULL;
6853 l2cap_conn_unreliable(conn, ECOMM);
6857 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6859 conn->rx_len -= skb->len;
6861 if (!conn->rx_len) {
6862 /* Complete frame received. l2cap_recv_frame
6863 * takes ownership of the skb so set the global
6864 * rx_skb pointer to NULL first.
6866 struct sk_buff *rx_skb = conn->rx_skb;
6867 conn->rx_skb = NULL;
6868 l2cap_recv_frame(conn, rx_skb);
6878 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6880 struct l2cap_chan *c;
6882 read_lock(&chan_list_lock);
6884 list_for_each_entry(c, &chan_list, global_l) {
6885 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6887 c->state, __le16_to_cpu(c->psm),
6888 c->scid, c->dcid, c->imtu, c->omtu,
6889 c->sec_level, c->mode);
6892 read_unlock(&chan_list_lock);
6897 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6899 return single_open(file, l2cap_debugfs_show, inode->i_private);
6902 static const struct file_operations l2cap_debugfs_fops = {
6903 .open = l2cap_debugfs_open,
6905 .llseek = seq_lseek,
6906 .release = single_release,
6909 static struct dentry *l2cap_debugfs;
6911 int __init l2cap_init(void)
6915 err = l2cap_init_sockets();
6919 if (IS_ERR_OR_NULL(bt_debugfs))
6922 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6923 NULL, &l2cap_debugfs_fops);
6928 void l2cap_exit(void)
6930 debugfs_remove(l2cap_debugfs);
6931 l2cap_cleanup_sockets();
6934 module_param(disable_ertm, bool, 0644);
6935 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");