2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
45 #define LE_FLOWCTL_MAX_CREDITS 65535
49 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
54 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
57 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
58 u8 code, u8 ident, u16 dlen, void *data);
59 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
61 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
62 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
64 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
65 struct sk_buff_head *skbs, u8 event);
67 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
69 if (link_type == LE_LINK) {
70 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
71 return BDADDR_LE_PUBLIC;
73 return BDADDR_LE_RANDOM;
79 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
81 return bdaddr_type(hcon->type, hcon->src_type);
84 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
86 return bdaddr_type(hcon->type, hcon->dst_type);
89 /* ---- L2CAP channels ---- */
91 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
96 list_for_each_entry(c, &conn->chan_l, list) {
103 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
106 struct l2cap_chan *c;
108 list_for_each_entry(c, &conn->chan_l, list) {
115 /* Find channel with given SCID.
116 * Returns locked channel. */
117 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
120 struct l2cap_chan *c;
122 mutex_lock(&conn->chan_lock);
123 c = __l2cap_get_chan_by_scid(conn, cid);
126 mutex_unlock(&conn->chan_lock);
131 /* Find channel with given DCID.
132 * Returns locked channel.
134 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
137 struct l2cap_chan *c;
139 mutex_lock(&conn->chan_lock);
140 c = __l2cap_get_chan_by_dcid(conn, cid);
143 mutex_unlock(&conn->chan_lock);
148 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
151 struct l2cap_chan *c;
153 list_for_each_entry(c, &conn->chan_l, list) {
154 if (c->ident == ident)
160 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
163 struct l2cap_chan *c;
165 mutex_lock(&conn->chan_lock);
166 c = __l2cap_get_chan_by_ident(conn, ident);
169 mutex_unlock(&conn->chan_lock);
174 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
176 struct l2cap_chan *c;
178 list_for_each_entry(c, &chan_list, global_l) {
179 if (c->sport == psm && !bacmp(&c->src, src))
185 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
189 write_lock(&chan_list_lock);
191 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
201 u16 p, start, end, incr;
203 if (chan->src_type == BDADDR_BREDR) {
204 start = L2CAP_PSM_DYN_START;
205 end = L2CAP_PSM_AUTO_END;
208 start = L2CAP_PSM_LE_DYN_START;
209 end = L2CAP_PSM_LE_DYN_END;
214 for (p = start; p <= end; p += incr)
215 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
216 chan->psm = cpu_to_le16(p);
217 chan->sport = cpu_to_le16(p);
224 write_unlock(&chan_list_lock);
227 EXPORT_SYMBOL_GPL(l2cap_add_psm);
229 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
231 write_lock(&chan_list_lock);
233 /* Override the defaults (which are for conn-oriented) */
234 chan->omtu = L2CAP_DEFAULT_MTU;
235 chan->chan_type = L2CAP_CHAN_FIXED;
239 write_unlock(&chan_list_lock);
244 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
248 if (conn->hcon->type == LE_LINK)
249 dyn_end = L2CAP_CID_LE_DYN_END;
251 dyn_end = L2CAP_CID_DYN_END;
253 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
254 if (!__l2cap_get_chan_by_scid(conn, cid))
261 static void l2cap_state_change(struct l2cap_chan *chan, int state)
263 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
264 state_to_string(state));
267 chan->ops->state_change(chan, state, 0);
270 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
274 chan->ops->state_change(chan, chan->state, err);
277 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
279 chan->ops->state_change(chan, chan->state, err);
282 static void __set_retrans_timer(struct l2cap_chan *chan)
284 if (!delayed_work_pending(&chan->monitor_timer) &&
285 chan->retrans_timeout) {
286 l2cap_set_timer(chan, &chan->retrans_timer,
287 msecs_to_jiffies(chan->retrans_timeout));
291 static void __set_monitor_timer(struct l2cap_chan *chan)
293 __clear_retrans_timer(chan);
294 if (chan->monitor_timeout) {
295 l2cap_set_timer(chan, &chan->monitor_timer,
296 msecs_to_jiffies(chan->monitor_timeout));
300 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
305 skb_queue_walk(head, skb) {
306 if (bt_cb(skb)->l2cap.txseq == seq)
313 /* ---- L2CAP sequence number lists ---- */
315 /* For ERTM, ordered lists of sequence numbers must be tracked for
316 * SREJ requests that are received and for frames that are to be
317 * retransmitted. These seq_list functions implement a singly-linked
318 * list in an array, where membership in the list can also be checked
319 * in constant time. Items can also be added to the tail of the list
320 * and removed from the head in constant time, without further memory
324 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
326 size_t alloc_size, i;
328 /* Allocated size is a power of 2 to map sequence numbers
329 * (which may be up to 14 bits) in to a smaller array that is
330 * sized for the negotiated ERTM transmit windows.
332 alloc_size = roundup_pow_of_two(size);
334 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
338 seq_list->mask = alloc_size - 1;
339 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 for (i = 0; i < alloc_size; i++)
342 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
347 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
349 kfree(seq_list->list);
352 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
355 /* Constant-time check for list membership */
356 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
359 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
361 u16 seq = seq_list->head;
362 u16 mask = seq_list->mask;
364 seq_list->head = seq_list->list[seq & mask];
365 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
367 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
368 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
369 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
375 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
379 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
382 for (i = 0; i <= seq_list->mask; i++)
383 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
385 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
386 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
389 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
391 u16 mask = seq_list->mask;
393 /* All appends happen in constant time */
395 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
398 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
399 seq_list->head = seq;
401 seq_list->list[seq_list->tail & mask] = seq;
403 seq_list->tail = seq;
404 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
407 static void l2cap_chan_timeout(struct work_struct *work)
409 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
411 struct l2cap_conn *conn = chan->conn;
414 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
416 mutex_lock(&conn->chan_lock);
417 l2cap_chan_lock(chan);
419 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
420 reason = ECONNREFUSED;
421 else if (chan->state == BT_CONNECT &&
422 chan->sec_level != BT_SECURITY_SDP)
423 reason = ECONNREFUSED;
427 l2cap_chan_close(chan, reason);
429 l2cap_chan_unlock(chan);
431 chan->ops->close(chan);
432 mutex_unlock(&conn->chan_lock);
434 l2cap_chan_put(chan);
437 struct l2cap_chan *l2cap_chan_create(void)
439 struct l2cap_chan *chan;
441 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
445 mutex_init(&chan->lock);
447 /* Set default lock nesting level */
448 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
450 write_lock(&chan_list_lock);
451 list_add(&chan->global_l, &chan_list);
452 write_unlock(&chan_list_lock);
454 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
456 chan->state = BT_OPEN;
458 kref_init(&chan->kref);
460 /* This flag is cleared in l2cap_chan_ready() */
461 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
463 BT_DBG("chan %p", chan);
467 EXPORT_SYMBOL_GPL(l2cap_chan_create);
469 static void l2cap_chan_destroy(struct kref *kref)
471 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
473 BT_DBG("chan %p", chan);
475 write_lock(&chan_list_lock);
476 list_del(&chan->global_l);
477 write_unlock(&chan_list_lock);
482 void l2cap_chan_hold(struct l2cap_chan *c)
484 BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
489 void l2cap_chan_put(struct l2cap_chan *c)
491 BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
493 kref_put(&c->kref, l2cap_chan_destroy);
495 EXPORT_SYMBOL_GPL(l2cap_chan_put);
497 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
499 chan->fcs = L2CAP_FCS_CRC16;
500 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
501 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
502 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
503 chan->remote_max_tx = chan->max_tx;
504 chan->remote_tx_win = chan->tx_win;
505 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
506 chan->sec_level = BT_SECURITY_LOW;
507 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
508 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
509 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
510 chan->conf_state = 0;
512 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
514 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
516 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
519 chan->sdu_last_frag = NULL;
521 chan->tx_credits = 0;
522 chan->rx_credits = le_max_credits;
523 chan->mps = min_t(u16, chan->imtu, le_default_mps);
525 skb_queue_head_init(&chan->tx_q);
528 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
530 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
531 __le16_to_cpu(chan->psm), chan->dcid);
533 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
537 switch (chan->chan_type) {
538 case L2CAP_CHAN_CONN_ORIENTED:
539 /* Alloc CID for connection-oriented socket */
540 chan->scid = l2cap_alloc_cid(conn);
541 if (conn->hcon->type == ACL_LINK)
542 chan->omtu = L2CAP_DEFAULT_MTU;
545 case L2CAP_CHAN_CONN_LESS:
546 /* Connectionless socket */
547 chan->scid = L2CAP_CID_CONN_LESS;
548 chan->dcid = L2CAP_CID_CONN_LESS;
549 chan->omtu = L2CAP_DEFAULT_MTU;
552 case L2CAP_CHAN_FIXED:
553 /* Caller will set CID and CID specific MTU values */
557 /* Raw socket can send/recv signalling messages only */
558 chan->scid = L2CAP_CID_SIGNALING;
559 chan->dcid = L2CAP_CID_SIGNALING;
560 chan->omtu = L2CAP_DEFAULT_MTU;
563 chan->local_id = L2CAP_BESTEFFORT_ID;
564 chan->local_stype = L2CAP_SERV_BESTEFFORT;
565 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
566 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
567 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
568 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
570 l2cap_chan_hold(chan);
572 /* Only keep a reference for fixed channels if they requested it */
573 if (chan->chan_type != L2CAP_CHAN_FIXED ||
574 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
575 hci_conn_hold(conn->hcon);
577 list_add(&chan->list, &conn->chan_l);
580 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
582 mutex_lock(&conn->chan_lock);
583 __l2cap_chan_add(conn, chan);
584 mutex_unlock(&conn->chan_lock);
587 void l2cap_chan_del(struct l2cap_chan *chan, int err)
589 struct l2cap_conn *conn = chan->conn;
591 __clear_chan_timer(chan);
593 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
594 state_to_string(chan->state));
596 chan->ops->teardown(chan, err);
599 struct amp_mgr *mgr = conn->hcon->amp_mgr;
600 /* Delete from channel list */
601 list_del(&chan->list);
603 l2cap_chan_put(chan);
607 /* Reference was only held for non-fixed channels or
608 * fixed channels that explicitly requested it using the
609 * FLAG_HOLD_HCI_CONN flag.
611 if (chan->chan_type != L2CAP_CHAN_FIXED ||
612 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
613 hci_conn_drop(conn->hcon);
615 if (mgr && mgr->bredr_chan == chan)
616 mgr->bredr_chan = NULL;
619 if (chan->hs_hchan) {
620 struct hci_chan *hs_hchan = chan->hs_hchan;
622 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
623 amp_disconnect_logical_link(hs_hchan);
626 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
630 case L2CAP_MODE_BASIC:
633 case L2CAP_MODE_LE_FLOWCTL:
634 skb_queue_purge(&chan->tx_q);
637 case L2CAP_MODE_ERTM:
638 __clear_retrans_timer(chan);
639 __clear_monitor_timer(chan);
640 __clear_ack_timer(chan);
642 skb_queue_purge(&chan->srej_q);
644 l2cap_seq_list_free(&chan->srej_list);
645 l2cap_seq_list_free(&chan->retrans_list);
649 case L2CAP_MODE_STREAMING:
650 skb_queue_purge(&chan->tx_q);
656 EXPORT_SYMBOL_GPL(l2cap_chan_del);
658 static void l2cap_conn_update_id_addr(struct work_struct *work)
660 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
661 id_addr_update_work);
662 struct hci_conn *hcon = conn->hcon;
663 struct l2cap_chan *chan;
665 mutex_lock(&conn->chan_lock);
667 list_for_each_entry(chan, &conn->chan_l, list) {
668 l2cap_chan_lock(chan);
669 bacpy(&chan->dst, &hcon->dst);
670 chan->dst_type = bdaddr_dst_type(hcon);
671 l2cap_chan_unlock(chan);
674 mutex_unlock(&conn->chan_lock);
677 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
679 struct l2cap_conn *conn = chan->conn;
680 struct l2cap_le_conn_rsp rsp;
683 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
684 result = L2CAP_CR_AUTHORIZATION;
686 result = L2CAP_CR_BAD_PSM;
688 l2cap_state_change(chan, BT_DISCONN);
690 rsp.dcid = cpu_to_le16(chan->scid);
691 rsp.mtu = cpu_to_le16(chan->imtu);
692 rsp.mps = cpu_to_le16(chan->mps);
693 rsp.credits = cpu_to_le16(chan->rx_credits);
694 rsp.result = cpu_to_le16(result);
696 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
700 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
702 struct l2cap_conn *conn = chan->conn;
703 struct l2cap_conn_rsp rsp;
706 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
707 result = L2CAP_CR_SEC_BLOCK;
709 result = L2CAP_CR_BAD_PSM;
711 l2cap_state_change(chan, BT_DISCONN);
713 rsp.scid = cpu_to_le16(chan->dcid);
714 rsp.dcid = cpu_to_le16(chan->scid);
715 rsp.result = cpu_to_le16(result);
716 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
718 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
721 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
723 struct l2cap_conn *conn = chan->conn;
725 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
727 switch (chan->state) {
729 chan->ops->teardown(chan, 0);
734 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
735 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
736 l2cap_send_disconn_req(chan, reason);
738 l2cap_chan_del(chan, reason);
742 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
743 if (conn->hcon->type == ACL_LINK)
744 l2cap_chan_connect_reject(chan);
745 else if (conn->hcon->type == LE_LINK)
746 l2cap_chan_le_connect_reject(chan);
749 l2cap_chan_del(chan, reason);
754 l2cap_chan_del(chan, reason);
758 chan->ops->teardown(chan, 0);
762 EXPORT_SYMBOL(l2cap_chan_close);
764 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
766 switch (chan->chan_type) {
768 switch (chan->sec_level) {
769 case BT_SECURITY_HIGH:
770 case BT_SECURITY_FIPS:
771 return HCI_AT_DEDICATED_BONDING_MITM;
772 case BT_SECURITY_MEDIUM:
773 return HCI_AT_DEDICATED_BONDING;
775 return HCI_AT_NO_BONDING;
778 case L2CAP_CHAN_CONN_LESS:
779 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
780 if (chan->sec_level == BT_SECURITY_LOW)
781 chan->sec_level = BT_SECURITY_SDP;
783 if (chan->sec_level == BT_SECURITY_HIGH ||
784 chan->sec_level == BT_SECURITY_FIPS)
785 return HCI_AT_NO_BONDING_MITM;
787 return HCI_AT_NO_BONDING;
789 case L2CAP_CHAN_CONN_ORIENTED:
790 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
791 if (chan->sec_level == BT_SECURITY_LOW)
792 chan->sec_level = BT_SECURITY_SDP;
794 if (chan->sec_level == BT_SECURITY_HIGH ||
795 chan->sec_level == BT_SECURITY_FIPS)
796 return HCI_AT_NO_BONDING_MITM;
798 return HCI_AT_NO_BONDING;
802 switch (chan->sec_level) {
803 case BT_SECURITY_HIGH:
804 case BT_SECURITY_FIPS:
805 return HCI_AT_GENERAL_BONDING_MITM;
806 case BT_SECURITY_MEDIUM:
807 return HCI_AT_GENERAL_BONDING;
809 return HCI_AT_NO_BONDING;
815 /* Service level security */
816 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
818 struct l2cap_conn *conn = chan->conn;
821 if (conn->hcon->type == LE_LINK)
822 return smp_conn_security(conn->hcon, chan->sec_level);
824 auth_type = l2cap_get_auth_type(chan);
826 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
830 static u8 l2cap_get_ident(struct l2cap_conn *conn)
834 /* Get next available identificator.
835 * 1 - 128 are used by kernel.
836 * 129 - 199 are reserved.
837 * 200 - 254 are used by utilities like l2ping, etc.
840 mutex_lock(&conn->ident_lock);
842 if (++conn->tx_ident > 128)
847 mutex_unlock(&conn->ident_lock);
852 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
855 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
858 BT_DBG("code 0x%2.2x", code);
863 /* Use NO_FLUSH if supported or we have an LE link (which does
864 * not support auto-flushing packets) */
865 if (lmp_no_flush_capable(conn->hcon->hdev) ||
866 conn->hcon->type == LE_LINK)
867 flags = ACL_START_NO_FLUSH;
871 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
872 skb->priority = HCI_PRIO_MAX;
874 hci_send_acl(conn->hchan, skb, flags);
877 static bool __chan_is_moving(struct l2cap_chan *chan)
879 return chan->move_state != L2CAP_MOVE_STABLE &&
880 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
883 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
885 struct hci_conn *hcon = chan->conn->hcon;
888 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
891 if (chan->hs_hcon && !__chan_is_moving(chan)) {
893 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
900 /* Use NO_FLUSH for LE links (where this is the only option) or
901 * if the BR/EDR link supports it and flushing has not been
902 * explicitly requested (through FLAG_FLUSHABLE).
904 if (hcon->type == LE_LINK ||
905 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
906 lmp_no_flush_capable(hcon->hdev)))
907 flags = ACL_START_NO_FLUSH;
911 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
912 hci_send_acl(chan->conn->hchan, skb, flags);
915 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
917 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
918 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
920 if (enh & L2CAP_CTRL_FRAME_TYPE) {
923 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
924 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
931 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
932 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
939 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
941 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
942 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
944 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
947 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
948 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
955 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
956 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
963 static inline void __unpack_control(struct l2cap_chan *chan,
966 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
967 __unpack_extended_control(get_unaligned_le32(skb->data),
969 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
971 __unpack_enhanced_control(get_unaligned_le16(skb->data),
973 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
977 static u32 __pack_extended_control(struct l2cap_ctrl *control)
981 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
982 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
984 if (control->sframe) {
985 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
986 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
987 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
989 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
990 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
996 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1000 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1001 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1003 if (control->sframe) {
1004 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1005 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1006 packed |= L2CAP_CTRL_FRAME_TYPE;
1008 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1009 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1015 static inline void __pack_control(struct l2cap_chan *chan,
1016 struct l2cap_ctrl *control,
1017 struct sk_buff *skb)
1019 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1020 put_unaligned_le32(__pack_extended_control(control),
1021 skb->data + L2CAP_HDR_SIZE);
1023 put_unaligned_le16(__pack_enhanced_control(control),
1024 skb->data + L2CAP_HDR_SIZE);
1028 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1030 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1031 return L2CAP_EXT_HDR_SIZE;
1033 return L2CAP_ENH_HDR_SIZE;
1036 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1039 struct sk_buff *skb;
1040 struct l2cap_hdr *lh;
1041 int hlen = __ertm_hdr_size(chan);
1043 if (chan->fcs == L2CAP_FCS_CRC16)
1044 hlen += L2CAP_FCS_SIZE;
1046 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1049 return ERR_PTR(-ENOMEM);
1051 lh = skb_put(skb, L2CAP_HDR_SIZE);
1052 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1053 lh->cid = cpu_to_le16(chan->dcid);
1055 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1056 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1058 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1060 if (chan->fcs == L2CAP_FCS_CRC16) {
1061 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1062 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1065 skb->priority = HCI_PRIO_MAX;
1069 static void l2cap_send_sframe(struct l2cap_chan *chan,
1070 struct l2cap_ctrl *control)
1072 struct sk_buff *skb;
1075 BT_DBG("chan %p, control %p", chan, control);
1077 if (!control->sframe)
1080 if (__chan_is_moving(chan))
1083 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1087 if (control->super == L2CAP_SUPER_RR)
1088 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1089 else if (control->super == L2CAP_SUPER_RNR)
1090 set_bit(CONN_RNR_SENT, &chan->conn_state);
1092 if (control->super != L2CAP_SUPER_SREJ) {
1093 chan->last_acked_seq = control->reqseq;
1094 __clear_ack_timer(chan);
1097 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1098 control->final, control->poll, control->super);
1100 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1101 control_field = __pack_extended_control(control);
1103 control_field = __pack_enhanced_control(control);
1105 skb = l2cap_create_sframe_pdu(chan, control_field);
1107 l2cap_do_send(chan, skb);
1110 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1112 struct l2cap_ctrl control;
1114 BT_DBG("chan %p, poll %d", chan, poll);
1116 memset(&control, 0, sizeof(control));
1118 control.poll = poll;
1120 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1121 control.super = L2CAP_SUPER_RNR;
1123 control.super = L2CAP_SUPER_RR;
1125 control.reqseq = chan->buffer_seq;
1126 l2cap_send_sframe(chan, &control);
1129 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1131 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1134 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1137 static bool __amp_capable(struct l2cap_chan *chan)
1139 struct l2cap_conn *conn = chan->conn;
1140 struct hci_dev *hdev;
1141 bool amp_available = false;
1143 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1146 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1149 read_lock(&hci_dev_list_lock);
1150 list_for_each_entry(hdev, &hci_dev_list, list) {
1151 if (hdev->amp_type != AMP_TYPE_BREDR &&
1152 test_bit(HCI_UP, &hdev->flags)) {
1153 amp_available = true;
1157 read_unlock(&hci_dev_list_lock);
1159 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1160 return amp_available;
1165 static bool l2cap_check_efs(struct l2cap_chan *chan)
1167 /* Check EFS parameters */
1171 void l2cap_send_conn_req(struct l2cap_chan *chan)
1173 struct l2cap_conn *conn = chan->conn;
1174 struct l2cap_conn_req req;
1176 req.scid = cpu_to_le16(chan->scid);
1177 req.psm = chan->psm;
1179 chan->ident = l2cap_get_ident(conn);
1181 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1183 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1186 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1188 struct l2cap_create_chan_req req;
1189 req.scid = cpu_to_le16(chan->scid);
1190 req.psm = chan->psm;
1191 req.amp_id = amp_id;
1193 chan->ident = l2cap_get_ident(chan->conn);
1195 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1199 static void l2cap_move_setup(struct l2cap_chan *chan)
1201 struct sk_buff *skb;
1203 BT_DBG("chan %p", chan);
1205 if (chan->mode != L2CAP_MODE_ERTM)
1208 __clear_retrans_timer(chan);
1209 __clear_monitor_timer(chan);
1210 __clear_ack_timer(chan);
1212 chan->retry_count = 0;
1213 skb_queue_walk(&chan->tx_q, skb) {
1214 if (bt_cb(skb)->l2cap.retries)
1215 bt_cb(skb)->l2cap.retries = 1;
1220 chan->expected_tx_seq = chan->buffer_seq;
1222 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1223 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1224 l2cap_seq_list_clear(&chan->retrans_list);
1225 l2cap_seq_list_clear(&chan->srej_list);
1226 skb_queue_purge(&chan->srej_q);
1228 chan->tx_state = L2CAP_TX_STATE_XMIT;
1229 chan->rx_state = L2CAP_RX_STATE_MOVE;
1231 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1234 static void l2cap_move_done(struct l2cap_chan *chan)
1236 u8 move_role = chan->move_role;
1237 BT_DBG("chan %p", chan);
1239 chan->move_state = L2CAP_MOVE_STABLE;
1240 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1242 if (chan->mode != L2CAP_MODE_ERTM)
1245 switch (move_role) {
1246 case L2CAP_MOVE_ROLE_INITIATOR:
1247 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1248 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1250 case L2CAP_MOVE_ROLE_RESPONDER:
1251 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1256 static void l2cap_chan_ready(struct l2cap_chan *chan)
1258 /* The channel may have already been flagged as connected in
1259 * case of receiving data before the L2CAP info req/rsp
1260 * procedure is complete.
1262 if (chan->state == BT_CONNECTED)
1265 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1266 chan->conf_state = 0;
1267 __clear_chan_timer(chan);
1269 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1270 chan->ops->suspend(chan);
1272 chan->state = BT_CONNECTED;
1274 chan->ops->ready(chan);
1277 static void l2cap_le_connect(struct l2cap_chan *chan)
1279 struct l2cap_conn *conn = chan->conn;
1280 struct l2cap_le_conn_req req;
1282 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1285 req.psm = chan->psm;
1286 req.scid = cpu_to_le16(chan->scid);
1287 req.mtu = cpu_to_le16(chan->imtu);
1288 req.mps = cpu_to_le16(chan->mps);
1289 req.credits = cpu_to_le16(chan->rx_credits);
1291 chan->ident = l2cap_get_ident(conn);
1293 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1297 static void l2cap_le_start(struct l2cap_chan *chan)
1299 struct l2cap_conn *conn = chan->conn;
1301 if (!smp_conn_security(conn->hcon, chan->sec_level))
1305 l2cap_chan_ready(chan);
1309 if (chan->state == BT_CONNECT)
1310 l2cap_le_connect(chan);
1313 static void l2cap_start_connection(struct l2cap_chan *chan)
1315 if (__amp_capable(chan)) {
1316 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1317 a2mp_discover_amp(chan);
1318 } else if (chan->conn->hcon->type == LE_LINK) {
1319 l2cap_le_start(chan);
1321 l2cap_send_conn_req(chan);
1325 static void l2cap_request_info(struct l2cap_conn *conn)
1327 struct l2cap_info_req req;
1329 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1332 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1334 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1335 conn->info_ident = l2cap_get_ident(conn);
1337 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1339 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1343 static void l2cap_do_start(struct l2cap_chan *chan)
1345 struct l2cap_conn *conn = chan->conn;
1347 if (conn->hcon->type == LE_LINK) {
1348 l2cap_le_start(chan);
1352 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1353 l2cap_request_info(conn);
1357 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1360 if (l2cap_chan_check_security(chan, true) &&
1361 __l2cap_no_conn_pending(chan))
1362 l2cap_start_connection(chan);
1365 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1367 u32 local_feat_mask = l2cap_feat_mask;
1369 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1372 case L2CAP_MODE_ERTM:
1373 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1374 case L2CAP_MODE_STREAMING:
1375 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1381 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1383 struct l2cap_conn *conn = chan->conn;
1384 struct l2cap_disconn_req req;
1389 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1390 __clear_retrans_timer(chan);
1391 __clear_monitor_timer(chan);
1392 __clear_ack_timer(chan);
1395 if (chan->scid == L2CAP_CID_A2MP) {
1396 l2cap_state_change(chan, BT_DISCONN);
1400 req.dcid = cpu_to_le16(chan->dcid);
1401 req.scid = cpu_to_le16(chan->scid);
1402 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1405 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1408 /* ---- L2CAP connections ---- */
1409 static void l2cap_conn_start(struct l2cap_conn *conn)
1411 struct l2cap_chan *chan, *tmp;
1413 BT_DBG("conn %p", conn);
1415 mutex_lock(&conn->chan_lock);
1417 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1418 l2cap_chan_lock(chan);
1420 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1421 l2cap_chan_ready(chan);
1422 l2cap_chan_unlock(chan);
1426 if (chan->state == BT_CONNECT) {
1427 if (!l2cap_chan_check_security(chan, true) ||
1428 !__l2cap_no_conn_pending(chan)) {
1429 l2cap_chan_unlock(chan);
1433 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1434 && test_bit(CONF_STATE2_DEVICE,
1435 &chan->conf_state)) {
1436 l2cap_chan_close(chan, ECONNRESET);
1437 l2cap_chan_unlock(chan);
1441 l2cap_start_connection(chan);
1443 } else if (chan->state == BT_CONNECT2) {
1444 struct l2cap_conn_rsp rsp;
1446 rsp.scid = cpu_to_le16(chan->dcid);
1447 rsp.dcid = cpu_to_le16(chan->scid);
1449 if (l2cap_chan_check_security(chan, false)) {
1450 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1451 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1452 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1453 chan->ops->defer(chan);
1456 l2cap_state_change(chan, BT_CONFIG);
1457 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1458 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1461 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1462 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1465 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1468 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1469 rsp.result != L2CAP_CR_SUCCESS) {
1470 l2cap_chan_unlock(chan);
1474 set_bit(CONF_REQ_SENT, &chan->conf_state);
1475 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1476 l2cap_build_conf_req(chan, buf), buf);
1477 chan->num_conf_req++;
1480 l2cap_chan_unlock(chan);
1483 mutex_unlock(&conn->chan_lock);
1486 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1488 struct hci_conn *hcon = conn->hcon;
1489 struct hci_dev *hdev = hcon->hdev;
1491 BT_DBG("%s conn %p", hdev->name, conn);
1493 /* For outgoing pairing which doesn't necessarily have an
1494 * associated socket (e.g. mgmt_pair_device).
1497 smp_conn_security(hcon, hcon->pending_sec_level);
1499 /* For LE slave connections, make sure the connection interval
1500 * is in the range of the minium and maximum interval that has
1501 * been configured for this connection. If not, then trigger
1502 * the connection update procedure.
1504 if (hcon->role == HCI_ROLE_SLAVE &&
1505 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1506 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1507 struct l2cap_conn_param_update_req req;
1509 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1510 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1511 req.latency = cpu_to_le16(hcon->le_conn_latency);
1512 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1514 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1515 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1519 static void l2cap_conn_ready(struct l2cap_conn *conn)
1521 struct l2cap_chan *chan;
1522 struct hci_conn *hcon = conn->hcon;
1524 BT_DBG("conn %p", conn);
1526 if (hcon->type == ACL_LINK)
1527 l2cap_request_info(conn);
1529 mutex_lock(&conn->chan_lock);
1531 list_for_each_entry(chan, &conn->chan_l, list) {
1533 l2cap_chan_lock(chan);
1535 if (chan->scid == L2CAP_CID_A2MP) {
1536 l2cap_chan_unlock(chan);
1540 if (hcon->type == LE_LINK) {
1541 l2cap_le_start(chan);
1542 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1543 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1544 l2cap_chan_ready(chan);
1545 } else if (chan->state == BT_CONNECT) {
1546 l2cap_do_start(chan);
1549 l2cap_chan_unlock(chan);
1552 mutex_unlock(&conn->chan_lock);
1554 if (hcon->type == LE_LINK)
1555 l2cap_le_conn_ready(conn);
1557 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1560 /* Notify sockets that we cannot guaranty reliability anymore */
1561 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1563 struct l2cap_chan *chan;
1565 BT_DBG("conn %p", conn);
1567 mutex_lock(&conn->chan_lock);
1569 list_for_each_entry(chan, &conn->chan_l, list) {
1570 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1571 l2cap_chan_set_err(chan, err);
1574 mutex_unlock(&conn->chan_lock);
1577 static void l2cap_info_timeout(struct work_struct *work)
1579 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1582 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1583 conn->info_ident = 0;
1585 l2cap_conn_start(conn);
1590 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1591 * callback is called during registration. The ->remove callback is called
1592 * during unregistration.
1593 * An l2cap_user object can either be explicitly unregistered or when the
1594 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1595 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1596 * External modules must own a reference to the l2cap_conn object if they intend
1597 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1598 * any time if they don't.
1601 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1603 struct hci_dev *hdev = conn->hcon->hdev;
1606 /* We need to check whether l2cap_conn is registered. If it is not, we
1607 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1608 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1609 * relies on the parent hci_conn object to be locked. This itself relies
1610 * on the hci_dev object to be locked. So we must lock the hci device
1615 if (!list_empty(&user->list)) {
1620 /* conn->hchan is NULL after l2cap_conn_del() was called */
1626 ret = user->probe(conn, user);
1630 list_add(&user->list, &conn->users);
1634 hci_dev_unlock(hdev);
1637 EXPORT_SYMBOL(l2cap_register_user);
1639 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1641 struct hci_dev *hdev = conn->hcon->hdev;
1645 if (list_empty(&user->list))
1648 list_del_init(&user->list);
1649 user->remove(conn, user);
1652 hci_dev_unlock(hdev);
1654 EXPORT_SYMBOL(l2cap_unregister_user);
1656 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1658 struct l2cap_user *user;
1660 while (!list_empty(&conn->users)) {
1661 user = list_first_entry(&conn->users, struct l2cap_user, list);
1662 list_del_init(&user->list);
1663 user->remove(conn, user);
1667 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1669 struct l2cap_conn *conn = hcon->l2cap_data;
1670 struct l2cap_chan *chan, *l;
1675 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1677 kfree_skb(conn->rx_skb);
1679 skb_queue_purge(&conn->pending_rx);
1681 /* We can not call flush_work(&conn->pending_rx_work) here since we
1682 * might block if we are running on a worker from the same workqueue
1683 * pending_rx_work is waiting on.
1685 if (work_pending(&conn->pending_rx_work))
1686 cancel_work_sync(&conn->pending_rx_work);
1688 if (work_pending(&conn->id_addr_update_work))
1689 cancel_work_sync(&conn->id_addr_update_work);
1691 l2cap_unregister_all_users(conn);
1693 /* Force the connection to be immediately dropped */
1694 hcon->disc_timeout = 0;
1696 mutex_lock(&conn->chan_lock);
1699 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1700 l2cap_chan_hold(chan);
1701 l2cap_chan_lock(chan);
1703 l2cap_chan_del(chan, err);
1705 l2cap_chan_unlock(chan);
1707 chan->ops->close(chan);
1708 l2cap_chan_put(chan);
1711 mutex_unlock(&conn->chan_lock);
1713 hci_chan_del(conn->hchan);
1715 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1716 cancel_delayed_work_sync(&conn->info_timer);
1718 hcon->l2cap_data = NULL;
1720 l2cap_conn_put(conn);
1723 static void l2cap_conn_free(struct kref *ref)
1725 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1727 hci_conn_put(conn->hcon);
1731 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1733 kref_get(&conn->ref);
1736 EXPORT_SYMBOL(l2cap_conn_get);
1738 void l2cap_conn_put(struct l2cap_conn *conn)
1740 kref_put(&conn->ref, l2cap_conn_free);
1742 EXPORT_SYMBOL(l2cap_conn_put);
1744 /* ---- Socket interface ---- */
1746 /* Find socket with psm and source / destination bdaddr.
1747 * Returns closest match.
1749 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1754 struct l2cap_chan *c, *c1 = NULL;
1756 read_lock(&chan_list_lock);
1758 list_for_each_entry(c, &chan_list, global_l) {
1759 if (state && c->state != state)
1762 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1765 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1768 if (c->psm == psm) {
1769 int src_match, dst_match;
1770 int src_any, dst_any;
1773 src_match = !bacmp(&c->src, src);
1774 dst_match = !bacmp(&c->dst, dst);
1775 if (src_match && dst_match) {
1777 read_unlock(&chan_list_lock);
1782 src_any = !bacmp(&c->src, BDADDR_ANY);
1783 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1784 if ((src_match && dst_any) || (src_any && dst_match) ||
1785 (src_any && dst_any))
1791 l2cap_chan_hold(c1);
1793 read_unlock(&chan_list_lock);
1798 static void l2cap_monitor_timeout(struct work_struct *work)
1800 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1801 monitor_timer.work);
1803 BT_DBG("chan %p", chan);
1805 l2cap_chan_lock(chan);
1808 l2cap_chan_unlock(chan);
1809 l2cap_chan_put(chan);
1813 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1815 l2cap_chan_unlock(chan);
1816 l2cap_chan_put(chan);
1819 static void l2cap_retrans_timeout(struct work_struct *work)
1821 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1822 retrans_timer.work);
1824 BT_DBG("chan %p", chan);
1826 l2cap_chan_lock(chan);
1829 l2cap_chan_unlock(chan);
1830 l2cap_chan_put(chan);
1834 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1835 l2cap_chan_unlock(chan);
1836 l2cap_chan_put(chan);
1839 static void l2cap_streaming_send(struct l2cap_chan *chan,
1840 struct sk_buff_head *skbs)
1842 struct sk_buff *skb;
1843 struct l2cap_ctrl *control;
1845 BT_DBG("chan %p, skbs %p", chan, skbs);
1847 if (__chan_is_moving(chan))
1850 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1852 while (!skb_queue_empty(&chan->tx_q)) {
1854 skb = skb_dequeue(&chan->tx_q);
1856 bt_cb(skb)->l2cap.retries = 1;
1857 control = &bt_cb(skb)->l2cap;
1859 control->reqseq = 0;
1860 control->txseq = chan->next_tx_seq;
1862 __pack_control(chan, control, skb);
1864 if (chan->fcs == L2CAP_FCS_CRC16) {
1865 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1866 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1869 l2cap_do_send(chan, skb);
1871 BT_DBG("Sent txseq %u", control->txseq);
1873 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1874 chan->frames_sent++;
1878 static int l2cap_ertm_send(struct l2cap_chan *chan)
1880 struct sk_buff *skb, *tx_skb;
1881 struct l2cap_ctrl *control;
1884 BT_DBG("chan %p", chan);
1886 if (chan->state != BT_CONNECTED)
1889 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1892 if (__chan_is_moving(chan))
1895 while (chan->tx_send_head &&
1896 chan->unacked_frames < chan->remote_tx_win &&
1897 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1899 skb = chan->tx_send_head;
1901 bt_cb(skb)->l2cap.retries = 1;
1902 control = &bt_cb(skb)->l2cap;
1904 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1907 control->reqseq = chan->buffer_seq;
1908 chan->last_acked_seq = chan->buffer_seq;
1909 control->txseq = chan->next_tx_seq;
1911 __pack_control(chan, control, skb);
1913 if (chan->fcs == L2CAP_FCS_CRC16) {
1914 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1915 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1918 /* Clone after data has been modified. Data is assumed to be
1919 read-only (for locking purposes) on cloned sk_buffs.
1921 tx_skb = skb_clone(skb, GFP_KERNEL);
1926 __set_retrans_timer(chan);
1928 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1929 chan->unacked_frames++;
1930 chan->frames_sent++;
1933 if (skb_queue_is_last(&chan->tx_q, skb))
1934 chan->tx_send_head = NULL;
1936 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1938 l2cap_do_send(chan, tx_skb);
1939 BT_DBG("Sent txseq %u", control->txseq);
1942 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1943 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1948 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1950 struct l2cap_ctrl control;
1951 struct sk_buff *skb;
1952 struct sk_buff *tx_skb;
1955 BT_DBG("chan %p", chan);
1957 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1960 if (__chan_is_moving(chan))
1963 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1964 seq = l2cap_seq_list_pop(&chan->retrans_list);
1966 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1968 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1973 bt_cb(skb)->l2cap.retries++;
1974 control = bt_cb(skb)->l2cap;
1976 if (chan->max_tx != 0 &&
1977 bt_cb(skb)->l2cap.retries > chan->max_tx) {
1978 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1979 l2cap_send_disconn_req(chan, ECONNRESET);
1980 l2cap_seq_list_clear(&chan->retrans_list);
1984 control.reqseq = chan->buffer_seq;
1985 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1990 if (skb_cloned(skb)) {
1991 /* Cloned sk_buffs are read-only, so we need a
1994 tx_skb = skb_copy(skb, GFP_KERNEL);
1996 tx_skb = skb_clone(skb, GFP_KERNEL);
2000 l2cap_seq_list_clear(&chan->retrans_list);
2004 /* Update skb contents */
2005 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2006 put_unaligned_le32(__pack_extended_control(&control),
2007 tx_skb->data + L2CAP_HDR_SIZE);
2009 put_unaligned_le16(__pack_enhanced_control(&control),
2010 tx_skb->data + L2CAP_HDR_SIZE);
2014 if (chan->fcs == L2CAP_FCS_CRC16) {
2015 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2016 tx_skb->len - L2CAP_FCS_SIZE);
2017 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2021 l2cap_do_send(chan, tx_skb);
2023 BT_DBG("Resent txseq %d", control.txseq);
2025 chan->last_acked_seq = chan->buffer_seq;
2029 static void l2cap_retransmit(struct l2cap_chan *chan,
2030 struct l2cap_ctrl *control)
2032 BT_DBG("chan %p, control %p", chan, control);
2034 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2035 l2cap_ertm_resend(chan);
2038 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2039 struct l2cap_ctrl *control)
2041 struct sk_buff *skb;
2043 BT_DBG("chan %p, control %p", chan, control);
2046 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2048 l2cap_seq_list_clear(&chan->retrans_list);
2050 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2053 if (chan->unacked_frames) {
2054 skb_queue_walk(&chan->tx_q, skb) {
2055 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2056 skb == chan->tx_send_head)
2060 skb_queue_walk_from(&chan->tx_q, skb) {
2061 if (skb == chan->tx_send_head)
2064 l2cap_seq_list_append(&chan->retrans_list,
2065 bt_cb(skb)->l2cap.txseq);
2068 l2cap_ertm_resend(chan);
2072 static void l2cap_send_ack(struct l2cap_chan *chan)
2074 struct l2cap_ctrl control;
2075 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2076 chan->last_acked_seq);
2079 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2080 chan, chan->last_acked_seq, chan->buffer_seq);
2082 memset(&control, 0, sizeof(control));
2085 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2086 chan->rx_state == L2CAP_RX_STATE_RECV) {
2087 __clear_ack_timer(chan);
2088 control.super = L2CAP_SUPER_RNR;
2089 control.reqseq = chan->buffer_seq;
2090 l2cap_send_sframe(chan, &control);
2092 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2093 l2cap_ertm_send(chan);
2094 /* If any i-frames were sent, they included an ack */
2095 if (chan->buffer_seq == chan->last_acked_seq)
2099 /* Ack now if the window is 3/4ths full.
2100 * Calculate without mul or div
2102 threshold = chan->ack_win;
2103 threshold += threshold << 1;
2106 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2109 if (frames_to_ack >= threshold) {
2110 __clear_ack_timer(chan);
2111 control.super = L2CAP_SUPER_RR;
2112 control.reqseq = chan->buffer_seq;
2113 l2cap_send_sframe(chan, &control);
2118 __set_ack_timer(chan);
2122 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2123 struct msghdr *msg, int len,
2124 int count, struct sk_buff *skb)
2126 struct l2cap_conn *conn = chan->conn;
2127 struct sk_buff **frag;
2130 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2136 /* Continuation fragments (no L2CAP header) */
2137 frag = &skb_shinfo(skb)->frag_list;
2139 struct sk_buff *tmp;
2141 count = min_t(unsigned int, conn->mtu, len);
2143 tmp = chan->ops->alloc_skb(chan, 0, count,
2144 msg->msg_flags & MSG_DONTWAIT);
2146 return PTR_ERR(tmp);
2150 if (!copy_from_iter_full(skb_put(*frag, count), count,
2157 skb->len += (*frag)->len;
2158 skb->data_len += (*frag)->len;
2160 frag = &(*frag)->next;
2166 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2167 struct msghdr *msg, size_t len)
2169 struct l2cap_conn *conn = chan->conn;
2170 struct sk_buff *skb;
2171 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2172 struct l2cap_hdr *lh;
2174 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2175 __le16_to_cpu(chan->psm), len);
2177 count = min_t(unsigned int, (conn->mtu - hlen), len);
2179 skb = chan->ops->alloc_skb(chan, hlen, count,
2180 msg->msg_flags & MSG_DONTWAIT);
2184 /* Create L2CAP header */
2185 lh = skb_put(skb, L2CAP_HDR_SIZE);
2186 lh->cid = cpu_to_le16(chan->dcid);
2187 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2188 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2190 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2191 if (unlikely(err < 0)) {
2193 return ERR_PTR(err);
2198 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2199 struct msghdr *msg, size_t len)
2201 struct l2cap_conn *conn = chan->conn;
2202 struct sk_buff *skb;
2204 struct l2cap_hdr *lh;
2206 BT_DBG("chan %p len %zu", chan, len);
2208 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2210 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2211 msg->msg_flags & MSG_DONTWAIT);
2215 /* Create L2CAP header */
2216 lh = skb_put(skb, L2CAP_HDR_SIZE);
2217 lh->cid = cpu_to_le16(chan->dcid);
2218 lh->len = cpu_to_le16(len);
2220 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2221 if (unlikely(err < 0)) {
2223 return ERR_PTR(err);
2228 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2229 struct msghdr *msg, size_t len,
2232 struct l2cap_conn *conn = chan->conn;
2233 struct sk_buff *skb;
2234 int err, count, hlen;
2235 struct l2cap_hdr *lh;
2237 BT_DBG("chan %p len %zu", chan, len);
2240 return ERR_PTR(-ENOTCONN);
2242 hlen = __ertm_hdr_size(chan);
2245 hlen += L2CAP_SDULEN_SIZE;
2247 if (chan->fcs == L2CAP_FCS_CRC16)
2248 hlen += L2CAP_FCS_SIZE;
2250 count = min_t(unsigned int, (conn->mtu - hlen), len);
2252 skb = chan->ops->alloc_skb(chan, hlen, count,
2253 msg->msg_flags & MSG_DONTWAIT);
2257 /* Create L2CAP header */
2258 lh = skb_put(skb, L2CAP_HDR_SIZE);
2259 lh->cid = cpu_to_le16(chan->dcid);
2260 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2262 /* Control header is populated later */
2263 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2264 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2266 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2269 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2271 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2272 if (unlikely(err < 0)) {
2274 return ERR_PTR(err);
2277 bt_cb(skb)->l2cap.fcs = chan->fcs;
2278 bt_cb(skb)->l2cap.retries = 0;
2282 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2283 struct sk_buff_head *seg_queue,
2284 struct msghdr *msg, size_t len)
2286 struct sk_buff *skb;
2291 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2293 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2294 * so fragmented skbs are not used. The HCI layer's handling
2295 * of fragmented skbs is not compatible with ERTM's queueing.
2298 /* PDU size is derived from the HCI MTU */
2299 pdu_len = chan->conn->mtu;
2301 /* Constrain PDU size for BR/EDR connections */
2303 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2305 /* Adjust for largest possible L2CAP overhead. */
2307 pdu_len -= L2CAP_FCS_SIZE;
2309 pdu_len -= __ertm_hdr_size(chan);
2311 /* Remote device may have requested smaller PDUs */
2312 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2314 if (len <= pdu_len) {
2315 sar = L2CAP_SAR_UNSEGMENTED;
2319 sar = L2CAP_SAR_START;
2324 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2327 __skb_queue_purge(seg_queue);
2328 return PTR_ERR(skb);
2331 bt_cb(skb)->l2cap.sar = sar;
2332 __skb_queue_tail(seg_queue, skb);
2338 if (len <= pdu_len) {
2339 sar = L2CAP_SAR_END;
2342 sar = L2CAP_SAR_CONTINUE;
2349 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2351 size_t len, u16 sdulen)
2353 struct l2cap_conn *conn = chan->conn;
2354 struct sk_buff *skb;
2355 int err, count, hlen;
2356 struct l2cap_hdr *lh;
2358 BT_DBG("chan %p len %zu", chan, len);
2361 return ERR_PTR(-ENOTCONN);
2363 hlen = L2CAP_HDR_SIZE;
2366 hlen += L2CAP_SDULEN_SIZE;
2368 count = min_t(unsigned int, (conn->mtu - hlen), len);
2370 skb = chan->ops->alloc_skb(chan, hlen, count,
2371 msg->msg_flags & MSG_DONTWAIT);
2375 /* Create L2CAP header */
2376 lh = skb_put(skb, L2CAP_HDR_SIZE);
2377 lh->cid = cpu_to_le16(chan->dcid);
2378 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2381 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2383 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2384 if (unlikely(err < 0)) {
2386 return ERR_PTR(err);
2392 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2393 struct sk_buff_head *seg_queue,
2394 struct msghdr *msg, size_t len)
2396 struct sk_buff *skb;
2400 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2403 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2409 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2411 __skb_queue_purge(seg_queue);
2412 return PTR_ERR(skb);
2415 __skb_queue_tail(seg_queue, skb);
2421 pdu_len += L2CAP_SDULEN_SIZE;
2428 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2432 BT_DBG("chan %p", chan);
2434 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2435 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2440 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2441 skb_queue_len(&chan->tx_q));
2444 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2446 struct sk_buff *skb;
2448 struct sk_buff_head seg_queue;
2453 /* Connectionless channel */
2454 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2455 skb = l2cap_create_connless_pdu(chan, msg, len);
2457 return PTR_ERR(skb);
2459 /* Channel lock is released before requesting new skb and then
2460 * reacquired thus we need to recheck channel state.
2462 if (chan->state != BT_CONNECTED) {
2467 l2cap_do_send(chan, skb);
2471 switch (chan->mode) {
2472 case L2CAP_MODE_LE_FLOWCTL:
2473 /* Check outgoing MTU */
2474 if (len > chan->omtu)
2477 __skb_queue_head_init(&seg_queue);
2479 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2481 if (chan->state != BT_CONNECTED) {
2482 __skb_queue_purge(&seg_queue);
2489 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2491 l2cap_le_flowctl_send(chan);
2493 if (!chan->tx_credits)
2494 chan->ops->suspend(chan);
2500 case L2CAP_MODE_BASIC:
2501 /* Check outgoing MTU */
2502 if (len > chan->omtu)
2505 /* Create a basic PDU */
2506 skb = l2cap_create_basic_pdu(chan, msg, len);
2508 return PTR_ERR(skb);
2510 /* Channel lock is released before requesting new skb and then
2511 * reacquired thus we need to recheck channel state.
2513 if (chan->state != BT_CONNECTED) {
2518 l2cap_do_send(chan, skb);
2522 case L2CAP_MODE_ERTM:
2523 case L2CAP_MODE_STREAMING:
2524 /* Check outgoing MTU */
2525 if (len > chan->omtu) {
2530 __skb_queue_head_init(&seg_queue);
2532 /* Do segmentation before calling in to the state machine,
2533 * since it's possible to block while waiting for memory
2536 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2538 /* The channel could have been closed while segmenting,
2539 * check that it is still connected.
2541 if (chan->state != BT_CONNECTED) {
2542 __skb_queue_purge(&seg_queue);
2549 if (chan->mode == L2CAP_MODE_ERTM)
2550 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2552 l2cap_streaming_send(chan, &seg_queue);
2556 /* If the skbs were not queued for sending, they'll still be in
2557 * seg_queue and need to be purged.
2559 __skb_queue_purge(&seg_queue);
2563 BT_DBG("bad state %1.1x", chan->mode);
2569 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2571 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2573 struct l2cap_ctrl control;
2576 BT_DBG("chan %p, txseq %u", chan, txseq);
2578 memset(&control, 0, sizeof(control));
2580 control.super = L2CAP_SUPER_SREJ;
2582 for (seq = chan->expected_tx_seq; seq != txseq;
2583 seq = __next_seq(chan, seq)) {
2584 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2585 control.reqseq = seq;
2586 l2cap_send_sframe(chan, &control);
2587 l2cap_seq_list_append(&chan->srej_list, seq);
2591 chan->expected_tx_seq = __next_seq(chan, txseq);
2594 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2596 struct l2cap_ctrl control;
2598 BT_DBG("chan %p", chan);
2600 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2603 memset(&control, 0, sizeof(control));
2605 control.super = L2CAP_SUPER_SREJ;
2606 control.reqseq = chan->srej_list.tail;
2607 l2cap_send_sframe(chan, &control);
2610 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2612 struct l2cap_ctrl control;
2616 BT_DBG("chan %p, txseq %u", chan, txseq);
2618 memset(&control, 0, sizeof(control));
2620 control.super = L2CAP_SUPER_SREJ;
2622 /* Capture initial list head to allow only one pass through the list. */
2623 initial_head = chan->srej_list.head;
2626 seq = l2cap_seq_list_pop(&chan->srej_list);
2627 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2630 control.reqseq = seq;
2631 l2cap_send_sframe(chan, &control);
2632 l2cap_seq_list_append(&chan->srej_list, seq);
2633 } while (chan->srej_list.head != initial_head);
2636 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2638 struct sk_buff *acked_skb;
2641 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2643 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2646 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2647 chan->expected_ack_seq, chan->unacked_frames);
2649 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2650 ackseq = __next_seq(chan, ackseq)) {
2652 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2654 skb_unlink(acked_skb, &chan->tx_q);
2655 kfree_skb(acked_skb);
2656 chan->unacked_frames--;
2660 chan->expected_ack_seq = reqseq;
2662 if (chan->unacked_frames == 0)
2663 __clear_retrans_timer(chan);
2665 BT_DBG("unacked_frames %u", chan->unacked_frames);
2668 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2670 BT_DBG("chan %p", chan);
2672 chan->expected_tx_seq = chan->buffer_seq;
2673 l2cap_seq_list_clear(&chan->srej_list);
2674 skb_queue_purge(&chan->srej_q);
2675 chan->rx_state = L2CAP_RX_STATE_RECV;
2678 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2679 struct l2cap_ctrl *control,
2680 struct sk_buff_head *skbs, u8 event)
2682 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2686 case L2CAP_EV_DATA_REQUEST:
2687 if (chan->tx_send_head == NULL)
2688 chan->tx_send_head = skb_peek(skbs);
2690 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2691 l2cap_ertm_send(chan);
2693 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2694 BT_DBG("Enter LOCAL_BUSY");
2695 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2697 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2698 /* The SREJ_SENT state must be aborted if we are to
2699 * enter the LOCAL_BUSY state.
2701 l2cap_abort_rx_srej_sent(chan);
2704 l2cap_send_ack(chan);
2707 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2708 BT_DBG("Exit LOCAL_BUSY");
2709 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2711 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2712 struct l2cap_ctrl local_control;
2714 memset(&local_control, 0, sizeof(local_control));
2715 local_control.sframe = 1;
2716 local_control.super = L2CAP_SUPER_RR;
2717 local_control.poll = 1;
2718 local_control.reqseq = chan->buffer_seq;
2719 l2cap_send_sframe(chan, &local_control);
2721 chan->retry_count = 1;
2722 __set_monitor_timer(chan);
2723 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2726 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2727 l2cap_process_reqseq(chan, control->reqseq);
2729 case L2CAP_EV_EXPLICIT_POLL:
2730 l2cap_send_rr_or_rnr(chan, 1);
2731 chan->retry_count = 1;
2732 __set_monitor_timer(chan);
2733 __clear_ack_timer(chan);
2734 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2736 case L2CAP_EV_RETRANS_TO:
2737 l2cap_send_rr_or_rnr(chan, 1);
2738 chan->retry_count = 1;
2739 __set_monitor_timer(chan);
2740 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2742 case L2CAP_EV_RECV_FBIT:
2743 /* Nothing to process */
2750 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2751 struct l2cap_ctrl *control,
2752 struct sk_buff_head *skbs, u8 event)
2754 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2758 case L2CAP_EV_DATA_REQUEST:
2759 if (chan->tx_send_head == NULL)
2760 chan->tx_send_head = skb_peek(skbs);
2761 /* Queue data, but don't send. */
2762 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2764 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2765 BT_DBG("Enter LOCAL_BUSY");
2766 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2768 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2769 /* The SREJ_SENT state must be aborted if we are to
2770 * enter the LOCAL_BUSY state.
2772 l2cap_abort_rx_srej_sent(chan);
2775 l2cap_send_ack(chan);
2778 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2779 BT_DBG("Exit LOCAL_BUSY");
2780 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2782 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2783 struct l2cap_ctrl local_control;
2784 memset(&local_control, 0, sizeof(local_control));
2785 local_control.sframe = 1;
2786 local_control.super = L2CAP_SUPER_RR;
2787 local_control.poll = 1;
2788 local_control.reqseq = chan->buffer_seq;
2789 l2cap_send_sframe(chan, &local_control);
2791 chan->retry_count = 1;
2792 __set_monitor_timer(chan);
2793 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2796 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2797 l2cap_process_reqseq(chan, control->reqseq);
2801 case L2CAP_EV_RECV_FBIT:
2802 if (control && control->final) {
2803 __clear_monitor_timer(chan);
2804 if (chan->unacked_frames > 0)
2805 __set_retrans_timer(chan);
2806 chan->retry_count = 0;
2807 chan->tx_state = L2CAP_TX_STATE_XMIT;
2808 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2811 case L2CAP_EV_EXPLICIT_POLL:
2814 case L2CAP_EV_MONITOR_TO:
2815 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2816 l2cap_send_rr_or_rnr(chan, 1);
2817 __set_monitor_timer(chan);
2818 chan->retry_count++;
2820 l2cap_send_disconn_req(chan, ECONNABORTED);
2828 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2829 struct sk_buff_head *skbs, u8 event)
2831 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2832 chan, control, skbs, event, chan->tx_state);
2834 switch (chan->tx_state) {
2835 case L2CAP_TX_STATE_XMIT:
2836 l2cap_tx_state_xmit(chan, control, skbs, event);
2838 case L2CAP_TX_STATE_WAIT_F:
2839 l2cap_tx_state_wait_f(chan, control, skbs, event);
2847 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2848 struct l2cap_ctrl *control)
2850 BT_DBG("chan %p, control %p", chan, control);
2851 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2854 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2855 struct l2cap_ctrl *control)
2857 BT_DBG("chan %p, control %p", chan, control);
2858 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2861 /* Copy frame to all raw sockets on that connection */
2862 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2864 struct sk_buff *nskb;
2865 struct l2cap_chan *chan;
2867 BT_DBG("conn %p", conn);
2869 mutex_lock(&conn->chan_lock);
2871 list_for_each_entry(chan, &conn->chan_l, list) {
2872 if (chan->chan_type != L2CAP_CHAN_RAW)
2875 /* Don't send frame to the channel it came from */
2876 if (bt_cb(skb)->l2cap.chan == chan)
2879 nskb = skb_clone(skb, GFP_KERNEL);
2882 if (chan->ops->recv(chan, nskb))
2886 mutex_unlock(&conn->chan_lock);
2889 /* ---- L2CAP signalling commands ---- */
2890 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2891 u8 ident, u16 dlen, void *data)
2893 struct sk_buff *skb, **frag;
2894 struct l2cap_cmd_hdr *cmd;
2895 struct l2cap_hdr *lh;
2898 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2899 conn, code, ident, dlen);
2901 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2904 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2905 count = min_t(unsigned int, conn->mtu, len);
2907 skb = bt_skb_alloc(count, GFP_KERNEL);
2911 lh = skb_put(skb, L2CAP_HDR_SIZE);
2912 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2914 if (conn->hcon->type == LE_LINK)
2915 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2917 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2919 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
2922 cmd->len = cpu_to_le16(dlen);
2925 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2926 skb_put_data(skb, data, count);
2932 /* Continuation fragments (no L2CAP header) */
2933 frag = &skb_shinfo(skb)->frag_list;
2935 count = min_t(unsigned int, conn->mtu, len);
2937 *frag = bt_skb_alloc(count, GFP_KERNEL);
2941 skb_put_data(*frag, data, count);
2946 frag = &(*frag)->next;
2956 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2959 struct l2cap_conf_opt *opt = *ptr;
2962 len = L2CAP_CONF_OPT_SIZE + opt->len;
2970 *val = *((u8 *) opt->val);
2974 *val = get_unaligned_le16(opt->val);
2978 *val = get_unaligned_le32(opt->val);
2982 *val = (unsigned long) opt->val;
2986 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2990 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2992 struct l2cap_conf_opt *opt = *ptr;
2994 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3001 *((u8 *) opt->val) = val;
3005 put_unaligned_le16(val, opt->val);
3009 put_unaligned_le32(val, opt->val);
3013 memcpy(opt->val, (void *) val, len);
3017 *ptr += L2CAP_CONF_OPT_SIZE + len;
3020 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3022 struct l2cap_conf_efs efs;
3024 switch (chan->mode) {
3025 case L2CAP_MODE_ERTM:
3026 efs.id = chan->local_id;
3027 efs.stype = chan->local_stype;
3028 efs.msdu = cpu_to_le16(chan->local_msdu);
3029 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3030 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3031 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3034 case L2CAP_MODE_STREAMING:
3036 efs.stype = L2CAP_SERV_BESTEFFORT;
3037 efs.msdu = cpu_to_le16(chan->local_msdu);
3038 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3047 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3048 (unsigned long) &efs);
3051 static void l2cap_ack_timeout(struct work_struct *work)
3053 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3057 BT_DBG("chan %p", chan);
3059 l2cap_chan_lock(chan);
3061 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3062 chan->last_acked_seq);
3065 l2cap_send_rr_or_rnr(chan, 0);
3067 l2cap_chan_unlock(chan);
3068 l2cap_chan_put(chan);
3071 int l2cap_ertm_init(struct l2cap_chan *chan)
3075 chan->next_tx_seq = 0;
3076 chan->expected_tx_seq = 0;
3077 chan->expected_ack_seq = 0;
3078 chan->unacked_frames = 0;
3079 chan->buffer_seq = 0;
3080 chan->frames_sent = 0;
3081 chan->last_acked_seq = 0;
3083 chan->sdu_last_frag = NULL;
3086 skb_queue_head_init(&chan->tx_q);
3088 chan->local_amp_id = AMP_ID_BREDR;
3089 chan->move_id = AMP_ID_BREDR;
3090 chan->move_state = L2CAP_MOVE_STABLE;
3091 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3093 if (chan->mode != L2CAP_MODE_ERTM)
3096 chan->rx_state = L2CAP_RX_STATE_RECV;
3097 chan->tx_state = L2CAP_TX_STATE_XMIT;
3099 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3100 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3101 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3103 skb_queue_head_init(&chan->srej_q);
3105 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3109 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3111 l2cap_seq_list_free(&chan->srej_list);
3116 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3119 case L2CAP_MODE_STREAMING:
3120 case L2CAP_MODE_ERTM:
3121 if (l2cap_mode_supported(mode, remote_feat_mask))
3125 return L2CAP_MODE_BASIC;
3129 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3131 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3132 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3135 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3137 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3138 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3141 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3142 struct l2cap_conf_rfc *rfc)
3144 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3145 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3147 /* Class 1 devices have must have ERTM timeouts
3148 * exceeding the Link Supervision Timeout. The
3149 * default Link Supervision Timeout for AMP
3150 * controllers is 10 seconds.
3152 * Class 1 devices use 0xffffffff for their
3153 * best-effort flush timeout, so the clamping logic
3154 * will result in a timeout that meets the above
3155 * requirement. ERTM timeouts are 16-bit values, so
3156 * the maximum timeout is 65.535 seconds.
3159 /* Convert timeout to milliseconds and round */
3160 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3162 /* This is the recommended formula for class 2 devices
3163 * that start ERTM timers when packets are sent to the
3166 ertm_to = 3 * ertm_to + 500;
3168 if (ertm_to > 0xffff)
3171 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3172 rfc->monitor_timeout = rfc->retrans_timeout;
3174 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3175 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3179 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3181 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3182 __l2cap_ews_supported(chan->conn)) {
3183 /* use extended control field */
3184 set_bit(FLAG_EXT_CTRL, &chan->flags);
3185 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3187 chan->tx_win = min_t(u16, chan->tx_win,
3188 L2CAP_DEFAULT_TX_WINDOW);
3189 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3191 chan->ack_win = chan->tx_win;
3194 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3196 struct l2cap_conf_req *req = data;
3197 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3198 void *ptr = req->data;
3201 BT_DBG("chan %p", chan);
3203 if (chan->num_conf_req || chan->num_conf_rsp)
3206 switch (chan->mode) {
3207 case L2CAP_MODE_STREAMING:
3208 case L2CAP_MODE_ERTM:
3209 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3212 if (__l2cap_efs_supported(chan->conn))
3213 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3217 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3222 if (chan->imtu != L2CAP_DEFAULT_MTU)
3223 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3225 switch (chan->mode) {
3226 case L2CAP_MODE_BASIC:
3230 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3231 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3234 rfc.mode = L2CAP_MODE_BASIC;
3236 rfc.max_transmit = 0;
3237 rfc.retrans_timeout = 0;
3238 rfc.monitor_timeout = 0;
3239 rfc.max_pdu_size = 0;
3241 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3242 (unsigned long) &rfc);
3245 case L2CAP_MODE_ERTM:
3246 rfc.mode = L2CAP_MODE_ERTM;
3247 rfc.max_transmit = chan->max_tx;
3249 __l2cap_set_ertm_timeouts(chan, &rfc);
3251 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3252 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3254 rfc.max_pdu_size = cpu_to_le16(size);
3256 l2cap_txwin_setup(chan);
3258 rfc.txwin_size = min_t(u16, chan->tx_win,
3259 L2CAP_DEFAULT_TX_WINDOW);
3261 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3262 (unsigned long) &rfc);
3264 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3265 l2cap_add_opt_efs(&ptr, chan);
3267 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3268 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3271 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3272 if (chan->fcs == L2CAP_FCS_NONE ||
3273 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3274 chan->fcs = L2CAP_FCS_NONE;
3275 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,