Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[sfrench/cifs-2.6.git] / net / bluetooth / l2cap_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39
40 #include "smp.h"
41 #include "a2mp.h"
42 #include "amp.h"
43 #include "6lowpan.h"
44
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46
47 bool disable_ertm;
48
49 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
50 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
51
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
54
55 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
56 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
57
58 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
59                                        u8 code, u8 ident, u16 dlen, void *data);
60 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
61                            void *data);
62 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
63 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
64
65 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
66                      struct sk_buff_head *skbs, u8 event);
67
68 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
69 {
70         if (hcon->type == LE_LINK) {
71                 if (type == ADDR_LE_DEV_PUBLIC)
72                         return BDADDR_LE_PUBLIC;
73                 else
74                         return BDADDR_LE_RANDOM;
75         }
76
77         return BDADDR_BREDR;
78 }
79
80 /* ---- L2CAP channels ---- */
81
82 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
83                                                    u16 cid)
84 {
85         struct l2cap_chan *c;
86
87         list_for_each_entry(c, &conn->chan_l, list) {
88                 if (c->dcid == cid)
89                         return c;
90         }
91         return NULL;
92 }
93
94 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
95                                                    u16 cid)
96 {
97         struct l2cap_chan *c;
98
99         list_for_each_entry(c, &conn->chan_l, list) {
100                 if (c->scid == cid)
101                         return c;
102         }
103         return NULL;
104 }
105
106 /* Find channel with given SCID.
107  * Returns locked channel. */
108 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
109                                                  u16 cid)
110 {
111         struct l2cap_chan *c;
112
113         mutex_lock(&conn->chan_lock);
114         c = __l2cap_get_chan_by_scid(conn, cid);
115         if (c)
116                 l2cap_chan_lock(c);
117         mutex_unlock(&conn->chan_lock);
118
119         return c;
120 }
121
122 /* Find channel with given DCID.
123  * Returns locked channel.
124  */
125 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
126                                                  u16 cid)
127 {
128         struct l2cap_chan *c;
129
130         mutex_lock(&conn->chan_lock);
131         c = __l2cap_get_chan_by_dcid(conn, cid);
132         if (c)
133                 l2cap_chan_lock(c);
134         mutex_unlock(&conn->chan_lock);
135
136         return c;
137 }
138
139 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
140                                                     u8 ident)
141 {
142         struct l2cap_chan *c;
143
144         list_for_each_entry(c, &conn->chan_l, list) {
145                 if (c->ident == ident)
146                         return c;
147         }
148         return NULL;
149 }
150
151 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
152                                                   u8 ident)
153 {
154         struct l2cap_chan *c;
155
156         mutex_lock(&conn->chan_lock);
157         c = __l2cap_get_chan_by_ident(conn, ident);
158         if (c)
159                 l2cap_chan_lock(c);
160         mutex_unlock(&conn->chan_lock);
161
162         return c;
163 }
164
165 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
166 {
167         struct l2cap_chan *c;
168
169         list_for_each_entry(c, &chan_list, global_l) {
170                 if (c->sport == psm && !bacmp(&c->src, src))
171                         return c;
172         }
173         return NULL;
174 }
175
176 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
177 {
178         int err;
179
180         write_lock(&chan_list_lock);
181
182         if (psm && __l2cap_global_chan_by_addr(psm, src)) {
183                 err = -EADDRINUSE;
184                 goto done;
185         }
186
187         if (psm) {
188                 chan->psm = psm;
189                 chan->sport = psm;
190                 err = 0;
191         } else {
192                 u16 p;
193
194                 err = -EINVAL;
195                 for (p = 0x1001; p < 0x1100; p += 2)
196                         if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
197                                 chan->psm   = cpu_to_le16(p);
198                                 chan->sport = cpu_to_le16(p);
199                                 err = 0;
200                                 break;
201                         }
202         }
203
204 done:
205         write_unlock(&chan_list_lock);
206         return err;
207 }
208
209 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
210 {
211         write_lock(&chan_list_lock);
212
213         chan->scid = scid;
214
215         write_unlock(&chan_list_lock);
216
217         return 0;
218 }
219
220 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
221 {
222         u16 cid, dyn_end;
223
224         if (conn->hcon->type == LE_LINK)
225                 dyn_end = L2CAP_CID_LE_DYN_END;
226         else
227                 dyn_end = L2CAP_CID_DYN_END;
228
229         for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
230                 if (!__l2cap_get_chan_by_scid(conn, cid))
231                         return cid;
232         }
233
234         return 0;
235 }
236
237 static void l2cap_state_change(struct l2cap_chan *chan, int state)
238 {
239         BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
240                state_to_string(state));
241
242         chan->state = state;
243         chan->ops->state_change(chan, state, 0);
244 }
245
246 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
247                                                 int state, int err)
248 {
249         chan->state = state;
250         chan->ops->state_change(chan, chan->state, err);
251 }
252
253 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
254 {
255         chan->ops->state_change(chan, chan->state, err);
256 }
257
258 static void __set_retrans_timer(struct l2cap_chan *chan)
259 {
260         if (!delayed_work_pending(&chan->monitor_timer) &&
261             chan->retrans_timeout) {
262                 l2cap_set_timer(chan, &chan->retrans_timer,
263                                 msecs_to_jiffies(chan->retrans_timeout));
264         }
265 }
266
267 static void __set_monitor_timer(struct l2cap_chan *chan)
268 {
269         __clear_retrans_timer(chan);
270         if (chan->monitor_timeout) {
271                 l2cap_set_timer(chan, &chan->monitor_timer,
272                                 msecs_to_jiffies(chan->monitor_timeout));
273         }
274 }
275
276 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
277                                                u16 seq)
278 {
279         struct sk_buff *skb;
280
281         skb_queue_walk(head, skb) {
282                 if (bt_cb(skb)->control.txseq == seq)
283                         return skb;
284         }
285
286         return NULL;
287 }
288
289 /* ---- L2CAP sequence number lists ---- */
290
291 /* For ERTM, ordered lists of sequence numbers must be tracked for
292  * SREJ requests that are received and for frames that are to be
293  * retransmitted. These seq_list functions implement a singly-linked
294  * list in an array, where membership in the list can also be checked
295  * in constant time. Items can also be added to the tail of the list
296  * and removed from the head in constant time, without further memory
297  * allocs or frees.
298  */
299
300 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
301 {
302         size_t alloc_size, i;
303
304         /* Allocated size is a power of 2 to map sequence numbers
305          * (which may be up to 14 bits) in to a smaller array that is
306          * sized for the negotiated ERTM transmit windows.
307          */
308         alloc_size = roundup_pow_of_two(size);
309
310         seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
311         if (!seq_list->list)
312                 return -ENOMEM;
313
314         seq_list->mask = alloc_size - 1;
315         seq_list->head = L2CAP_SEQ_LIST_CLEAR;
316         seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
317         for (i = 0; i < alloc_size; i++)
318                 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
319
320         return 0;
321 }
322
323 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
324 {
325         kfree(seq_list->list);
326 }
327
328 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
329                                            u16 seq)
330 {
331         /* Constant-time check for list membership */
332         return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
333 }
334
335 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
336 {
337         u16 seq = seq_list->head;
338         u16 mask = seq_list->mask;
339
340         seq_list->head = seq_list->list[seq & mask];
341         seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
342
343         if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
344                 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
345                 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
346         }
347
348         return seq;
349 }
350
351 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
352 {
353         u16 i;
354
355         if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
356                 return;
357
358         for (i = 0; i <= seq_list->mask; i++)
359                 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
360
361         seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362         seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
363 }
364
365 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
366 {
367         u16 mask = seq_list->mask;
368
369         /* All appends happen in constant time */
370
371         if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
372                 return;
373
374         if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
375                 seq_list->head = seq;
376         else
377                 seq_list->list[seq_list->tail & mask] = seq;
378
379         seq_list->tail = seq;
380         seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
381 }
382
383 static void l2cap_chan_timeout(struct work_struct *work)
384 {
385         struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
386                                                chan_timer.work);
387         struct l2cap_conn *conn = chan->conn;
388         int reason;
389
390         BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
391
392         mutex_lock(&conn->chan_lock);
393         l2cap_chan_lock(chan);
394
395         if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
396                 reason = ECONNREFUSED;
397         else if (chan->state == BT_CONNECT &&
398                  chan->sec_level != BT_SECURITY_SDP)
399                 reason = ECONNREFUSED;
400         else
401                 reason = ETIMEDOUT;
402
403         l2cap_chan_close(chan, reason);
404
405         l2cap_chan_unlock(chan);
406
407         chan->ops->close(chan);
408         mutex_unlock(&conn->chan_lock);
409
410         l2cap_chan_put(chan);
411 }
412
413 struct l2cap_chan *l2cap_chan_create(void)
414 {
415         struct l2cap_chan *chan;
416
417         chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
418         if (!chan)
419                 return NULL;
420
421         mutex_init(&chan->lock);
422
423         write_lock(&chan_list_lock);
424         list_add(&chan->global_l, &chan_list);
425         write_unlock(&chan_list_lock);
426
427         INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
428
429         chan->state = BT_OPEN;
430
431         kref_init(&chan->kref);
432
433         /* This flag is cleared in l2cap_chan_ready() */
434         set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
435
436         BT_DBG("chan %p", chan);
437
438         return chan;
439 }
440
441 static void l2cap_chan_destroy(struct kref *kref)
442 {
443         struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
444
445         BT_DBG("chan %p", chan);
446
447         write_lock(&chan_list_lock);
448         list_del(&chan->global_l);
449         write_unlock(&chan_list_lock);
450
451         kfree(chan);
452 }
453
454 void l2cap_chan_hold(struct l2cap_chan *c)
455 {
456         BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
457
458         kref_get(&c->kref);
459 }
460
461 void l2cap_chan_put(struct l2cap_chan *c)
462 {
463         BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
464
465         kref_put(&c->kref, l2cap_chan_destroy);
466 }
467
468 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
469 {
470         chan->fcs  = L2CAP_FCS_CRC16;
471         chan->max_tx = L2CAP_DEFAULT_MAX_TX;
472         chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
473         chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
474         chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
475         chan->sec_level = BT_SECURITY_LOW;
476
477         set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
478 }
479
480 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
481 {
482         chan->sdu = NULL;
483         chan->sdu_last_frag = NULL;
484         chan->sdu_len = 0;
485         chan->tx_credits = 0;
486         chan->rx_credits = le_max_credits;
487         chan->mps = min_t(u16, chan->imtu, le_default_mps);
488
489         skb_queue_head_init(&chan->tx_q);
490 }
491
492 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
493 {
494         BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
495                __le16_to_cpu(chan->psm), chan->dcid);
496
497         conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
498
499         chan->conn = conn;
500
501         switch (chan->chan_type) {
502         case L2CAP_CHAN_CONN_ORIENTED:
503                 /* Alloc CID for connection-oriented socket */
504                 chan->scid = l2cap_alloc_cid(conn);
505                 if (conn->hcon->type == ACL_LINK)
506                         chan->omtu = L2CAP_DEFAULT_MTU;
507                 break;
508
509         case L2CAP_CHAN_CONN_LESS:
510                 /* Connectionless socket */
511                 chan->scid = L2CAP_CID_CONN_LESS;
512                 chan->dcid = L2CAP_CID_CONN_LESS;
513                 chan->omtu = L2CAP_DEFAULT_MTU;
514                 break;
515
516         case L2CAP_CHAN_FIXED:
517                 /* Caller will set CID and CID specific MTU values */
518                 break;
519
520         default:
521                 /* Raw socket can send/recv signalling messages only */
522                 chan->scid = L2CAP_CID_SIGNALING;
523                 chan->dcid = L2CAP_CID_SIGNALING;
524                 chan->omtu = L2CAP_DEFAULT_MTU;
525         }
526
527         chan->local_id          = L2CAP_BESTEFFORT_ID;
528         chan->local_stype       = L2CAP_SERV_BESTEFFORT;
529         chan->local_msdu        = L2CAP_DEFAULT_MAX_SDU_SIZE;
530         chan->local_sdu_itime   = L2CAP_DEFAULT_SDU_ITIME;
531         chan->local_acc_lat     = L2CAP_DEFAULT_ACC_LAT;
532         chan->local_flush_to    = L2CAP_EFS_DEFAULT_FLUSH_TO;
533
534         l2cap_chan_hold(chan);
535
536         hci_conn_hold(conn->hcon);
537
538         list_add(&chan->list, &conn->chan_l);
539 }
540
541 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
542 {
543         mutex_lock(&conn->chan_lock);
544         __l2cap_chan_add(conn, chan);
545         mutex_unlock(&conn->chan_lock);
546 }
547
548 void l2cap_chan_del(struct l2cap_chan *chan, int err)
549 {
550         struct l2cap_conn *conn = chan->conn;
551
552         __clear_chan_timer(chan);
553
554         BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
555
556         if (conn) {
557                 struct amp_mgr *mgr = conn->hcon->amp_mgr;
558                 /* Delete from channel list */
559                 list_del(&chan->list);
560
561                 l2cap_chan_put(chan);
562
563                 chan->conn = NULL;
564
565                 if (chan->scid != L2CAP_CID_A2MP)
566                         hci_conn_drop(conn->hcon);
567
568                 if (mgr && mgr->bredr_chan == chan)
569                         mgr->bredr_chan = NULL;
570         }
571
572         if (chan->hs_hchan) {
573                 struct hci_chan *hs_hchan = chan->hs_hchan;
574
575                 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
576                 amp_disconnect_logical_link(hs_hchan);
577         }
578
579         chan->ops->teardown(chan, err);
580
581         if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
582                 return;
583
584         switch(chan->mode) {
585         case L2CAP_MODE_BASIC:
586                 break;
587
588         case L2CAP_MODE_LE_FLOWCTL:
589                 skb_queue_purge(&chan->tx_q);
590                 break;
591
592         case L2CAP_MODE_ERTM:
593                 __clear_retrans_timer(chan);
594                 __clear_monitor_timer(chan);
595                 __clear_ack_timer(chan);
596
597                 skb_queue_purge(&chan->srej_q);
598
599                 l2cap_seq_list_free(&chan->srej_list);
600                 l2cap_seq_list_free(&chan->retrans_list);
601
602                 /* fall through */
603
604         case L2CAP_MODE_STREAMING:
605                 skb_queue_purge(&chan->tx_q);
606                 break;
607         }
608
609         return;
610 }
611
612 void l2cap_conn_update_id_addr(struct hci_conn *hcon)
613 {
614         struct l2cap_conn *conn = hcon->l2cap_data;
615         struct l2cap_chan *chan;
616
617         mutex_lock(&conn->chan_lock);
618
619         list_for_each_entry(chan, &conn->chan_l, list) {
620                 l2cap_chan_lock(chan);
621                 bacpy(&chan->dst, &hcon->dst);
622                 chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
623                 l2cap_chan_unlock(chan);
624         }
625
626         mutex_unlock(&conn->chan_lock);
627 }
628
629 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
630 {
631         struct l2cap_conn *conn = chan->conn;
632         struct l2cap_le_conn_rsp rsp;
633         u16 result;
634
635         if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
636                 result = L2CAP_CR_AUTHORIZATION;
637         else
638                 result = L2CAP_CR_BAD_PSM;
639
640         l2cap_state_change(chan, BT_DISCONN);
641
642         rsp.dcid    = cpu_to_le16(chan->scid);
643         rsp.mtu     = cpu_to_le16(chan->imtu);
644         rsp.mps     = cpu_to_le16(chan->mps);
645         rsp.credits = cpu_to_le16(chan->rx_credits);
646         rsp.result  = cpu_to_le16(result);
647
648         l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
649                        &rsp);
650 }
651
652 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
653 {
654         struct l2cap_conn *conn = chan->conn;
655         struct l2cap_conn_rsp rsp;
656         u16 result;
657
658         if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
659                 result = L2CAP_CR_SEC_BLOCK;
660         else
661                 result = L2CAP_CR_BAD_PSM;
662
663         l2cap_state_change(chan, BT_DISCONN);
664
665         rsp.scid   = cpu_to_le16(chan->dcid);
666         rsp.dcid   = cpu_to_le16(chan->scid);
667         rsp.result = cpu_to_le16(result);
668         rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
669
670         l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
671 }
672
673 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
674 {
675         struct l2cap_conn *conn = chan->conn;
676
677         BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
678
679         switch (chan->state) {
680         case BT_LISTEN:
681                 chan->ops->teardown(chan, 0);
682                 break;
683
684         case BT_CONNECTED:
685         case BT_CONFIG:
686                 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
687                         __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
688                         l2cap_send_disconn_req(chan, reason);
689                 } else
690                         l2cap_chan_del(chan, reason);
691                 break;
692
693         case BT_CONNECT2:
694                 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
695                         if (conn->hcon->type == ACL_LINK)
696                                 l2cap_chan_connect_reject(chan);
697                         else if (conn->hcon->type == LE_LINK)
698                                 l2cap_chan_le_connect_reject(chan);
699                 }
700
701                 l2cap_chan_del(chan, reason);
702                 break;
703
704         case BT_CONNECT:
705         case BT_DISCONN:
706                 l2cap_chan_del(chan, reason);
707                 break;
708
709         default:
710                 chan->ops->teardown(chan, 0);
711                 break;
712         }
713 }
714
715 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
716 {
717         switch (chan->chan_type) {
718         case L2CAP_CHAN_RAW:
719                 switch (chan->sec_level) {
720                 case BT_SECURITY_HIGH:
721                 case BT_SECURITY_FIPS:
722                         return HCI_AT_DEDICATED_BONDING_MITM;
723                 case BT_SECURITY_MEDIUM:
724                         return HCI_AT_DEDICATED_BONDING;
725                 default:
726                         return HCI_AT_NO_BONDING;
727                 }
728                 break;
729         case L2CAP_CHAN_CONN_LESS:
730                 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
731                         if (chan->sec_level == BT_SECURITY_LOW)
732                                 chan->sec_level = BT_SECURITY_SDP;
733                 }
734                 if (chan->sec_level == BT_SECURITY_HIGH ||
735                     chan->sec_level == BT_SECURITY_FIPS)
736                         return HCI_AT_NO_BONDING_MITM;
737                 else
738                         return HCI_AT_NO_BONDING;
739                 break;
740         case L2CAP_CHAN_CONN_ORIENTED:
741                 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
742                         if (chan->sec_level == BT_SECURITY_LOW)
743                                 chan->sec_level = BT_SECURITY_SDP;
744
745                         if (chan->sec_level == BT_SECURITY_HIGH ||
746                             chan->sec_level == BT_SECURITY_FIPS)
747                                 return HCI_AT_NO_BONDING_MITM;
748                         else
749                                 return HCI_AT_NO_BONDING;
750                 }
751                 /* fall through */
752         default:
753                 switch (chan->sec_level) {
754                 case BT_SECURITY_HIGH:
755                 case BT_SECURITY_FIPS:
756                         return HCI_AT_GENERAL_BONDING_MITM;
757                 case BT_SECURITY_MEDIUM:
758                         return HCI_AT_GENERAL_BONDING;
759                 default:
760                         return HCI_AT_NO_BONDING;
761                 }
762                 break;
763         }
764 }
765
766 /* Service level security */
767 int l2cap_chan_check_security(struct l2cap_chan *chan)
768 {
769         struct l2cap_conn *conn = chan->conn;
770         __u8 auth_type;
771
772         if (conn->hcon->type == LE_LINK)
773                 return smp_conn_security(conn->hcon, chan->sec_level);
774
775         auth_type = l2cap_get_auth_type(chan);
776
777         return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
778 }
779
780 static u8 l2cap_get_ident(struct l2cap_conn *conn)
781 {
782         u8 id;
783
784         /* Get next available identificator.
785          *    1 - 128 are used by kernel.
786          *  129 - 199 are reserved.
787          *  200 - 254 are used by utilities like l2ping, etc.
788          */
789
790         spin_lock(&conn->lock);
791
792         if (++conn->tx_ident > 128)
793                 conn->tx_ident = 1;
794
795         id = conn->tx_ident;
796
797         spin_unlock(&conn->lock);
798
799         return id;
800 }
801
802 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
803                            void *data)
804 {
805         struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
806         u8 flags;
807
808         BT_DBG("code 0x%2.2x", code);
809
810         if (!skb)
811                 return;
812
813         if (lmp_no_flush_capable(conn->hcon->hdev))
814                 flags = ACL_START_NO_FLUSH;
815         else
816                 flags = ACL_START;
817
818         bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
819         skb->priority = HCI_PRIO_MAX;
820
821         hci_send_acl(conn->hchan, skb, flags);
822 }
823
824 static bool __chan_is_moving(struct l2cap_chan *chan)
825 {
826         return chan->move_state != L2CAP_MOVE_STABLE &&
827                chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
828 }
829
830 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
831 {
832         struct hci_conn *hcon = chan->conn->hcon;
833         u16 flags;
834
835         BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
836                skb->priority);
837
838         if (chan->hs_hcon && !__chan_is_moving(chan)) {
839                 if (chan->hs_hchan)
840                         hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
841                 else
842                         kfree_skb(skb);
843
844                 return;
845         }
846
847         if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
848             lmp_no_flush_capable(hcon->hdev))
849                 flags = ACL_START_NO_FLUSH;
850         else
851                 flags = ACL_START;
852
853         bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
854         hci_send_acl(chan->conn->hchan, skb, flags);
855 }
856
857 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
858 {
859         control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
860         control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
861
862         if (enh & L2CAP_CTRL_FRAME_TYPE) {
863                 /* S-Frame */
864                 control->sframe = 1;
865                 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
866                 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
867
868                 control->sar = 0;
869                 control->txseq = 0;
870         } else {
871                 /* I-Frame */
872                 control->sframe = 0;
873                 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
874                 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
875
876                 control->poll = 0;
877                 control->super = 0;
878         }
879 }
880
881 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
882 {
883         control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
884         control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
885
886         if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
887                 /* S-Frame */
888                 control->sframe = 1;
889                 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
890                 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
891
892                 control->sar = 0;
893                 control->txseq = 0;
894         } else {
895                 /* I-Frame */
896                 control->sframe = 0;
897                 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
898                 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
899
900                 control->poll = 0;
901                 control->super = 0;
902         }
903 }
904
905 static inline void __unpack_control(struct l2cap_chan *chan,
906                                     struct sk_buff *skb)
907 {
908         if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
909                 __unpack_extended_control(get_unaligned_le32(skb->data),
910                                           &bt_cb(skb)->control);
911                 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
912         } else {
913                 __unpack_enhanced_control(get_unaligned_le16(skb->data),
914                                           &bt_cb(skb)->control);
915                 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
916         }
917 }
918
919 static u32 __pack_extended_control(struct l2cap_ctrl *control)
920 {
921         u32 packed;
922
923         packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
924         packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
925
926         if (control->sframe) {
927                 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
928                 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
929                 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
930         } else {
931                 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
932                 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
933         }
934
935         return packed;
936 }
937
938 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
939 {
940         u16 packed;
941
942         packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
943         packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
944
945         if (control->sframe) {
946                 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
947                 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
948                 packed |= L2CAP_CTRL_FRAME_TYPE;
949         } else {
950                 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
951                 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
952         }
953
954         return packed;
955 }
956
957 static inline void __pack_control(struct l2cap_chan *chan,
958                                   struct l2cap_ctrl *control,
959                                   struct sk_buff *skb)
960 {
961         if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
962                 put_unaligned_le32(__pack_extended_control(control),
963                                    skb->data + L2CAP_HDR_SIZE);
964         } else {
965                 put_unaligned_le16(__pack_enhanced_control(control),
966                                    skb->data + L2CAP_HDR_SIZE);
967         }
968 }
969
970 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
971 {
972         if (test_bit(FLAG_EXT_CTRL, &chan->flags))
973                 return L2CAP_EXT_HDR_SIZE;
974         else
975                 return L2CAP_ENH_HDR_SIZE;
976 }
977
978 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
979                                                u32 control)
980 {
981         struct sk_buff *skb;
982         struct l2cap_hdr *lh;
983         int hlen = __ertm_hdr_size(chan);
984
985         if (chan->fcs == L2CAP_FCS_CRC16)
986                 hlen += L2CAP_FCS_SIZE;
987
988         skb = bt_skb_alloc(hlen, GFP_KERNEL);
989
990         if (!skb)
991                 return ERR_PTR(-ENOMEM);
992
993         lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
994         lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
995         lh->cid = cpu_to_le16(chan->dcid);
996
997         if (test_bit(FLAG_EXT_CTRL, &chan->flags))
998                 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
999         else
1000                 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1001
1002         if (chan->fcs == L2CAP_FCS_CRC16) {
1003                 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1004                 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1005         }
1006
1007         skb->priority = HCI_PRIO_MAX;
1008         return skb;
1009 }
1010
1011 static void l2cap_send_sframe(struct l2cap_chan *chan,
1012                               struct l2cap_ctrl *control)
1013 {
1014         struct sk_buff *skb;
1015         u32 control_field;
1016
1017         BT_DBG("chan %p, control %p", chan, control);
1018
1019         if (!control->sframe)
1020                 return;
1021
1022         if (__chan_is_moving(chan))
1023                 return;
1024
1025         if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1026             !control->poll)
1027                 control->final = 1;
1028
1029         if (control->super == L2CAP_SUPER_RR)
1030                 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1031         else if (control->super == L2CAP_SUPER_RNR)
1032                 set_bit(CONN_RNR_SENT, &chan->conn_state);
1033
1034         if (control->super != L2CAP_SUPER_SREJ) {
1035                 chan->last_acked_seq = control->reqseq;
1036                 __clear_ack_timer(chan);
1037         }
1038
1039         BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1040                control->final, control->poll, control->super);
1041
1042         if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1043                 control_field = __pack_extended_control(control);
1044         else
1045                 control_field = __pack_enhanced_control(control);
1046
1047         skb = l2cap_create_sframe_pdu(chan, control_field);
1048         if (!IS_ERR(skb))
1049                 l2cap_do_send(chan, skb);
1050 }
1051
1052 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1053 {
1054         struct l2cap_ctrl control;
1055
1056         BT_DBG("chan %p, poll %d", chan, poll);
1057
1058         memset(&control, 0, sizeof(control));
1059         control.sframe = 1;
1060         control.poll = poll;
1061
1062         if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1063                 control.super = L2CAP_SUPER_RNR;
1064         else
1065                 control.super = L2CAP_SUPER_RR;
1066
1067         control.reqseq = chan->buffer_seq;
1068         l2cap_send_sframe(chan, &control);
1069 }
1070
1071 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1072 {
1073         return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1074 }
1075
1076 static bool __amp_capable(struct l2cap_chan *chan)
1077 {
1078         struct l2cap_conn *conn = chan->conn;
1079         struct hci_dev *hdev;
1080         bool amp_available = false;
1081
1082         if (!conn->hs_enabled)
1083                 return false;
1084
1085         if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1086                 return false;
1087
1088         read_lock(&hci_dev_list_lock);
1089         list_for_each_entry(hdev, &hci_dev_list, list) {
1090                 if (hdev->amp_type != AMP_TYPE_BREDR &&
1091                     test_bit(HCI_UP, &hdev->flags)) {
1092                         amp_available = true;
1093                         break;
1094                 }
1095         }
1096         read_unlock(&hci_dev_list_lock);
1097
1098         if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1099                 return amp_available;
1100
1101         return false;
1102 }
1103
1104 static bool l2cap_check_efs(struct l2cap_chan *chan)
1105 {
1106         /* Check EFS parameters */
1107         return true;
1108 }
1109
1110 void l2cap_send_conn_req(struct l2cap_chan *chan)
1111 {
1112         struct l2cap_conn *conn = chan->conn;
1113         struct l2cap_conn_req req;
1114
1115         req.scid = cpu_to_le16(chan->scid);
1116         req.psm  = chan->psm;
1117
1118         chan->ident = l2cap_get_ident(conn);
1119
1120         set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1121
1122         l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1123 }
1124
1125 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1126 {
1127         struct l2cap_create_chan_req req;
1128         req.scid = cpu_to_le16(chan->scid);
1129         req.psm  = chan->psm;
1130         req.amp_id = amp_id;
1131
1132         chan->ident = l2cap_get_ident(chan->conn);
1133
1134         l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1135                        sizeof(req), &req);
1136 }
1137
1138 static void l2cap_move_setup(struct l2cap_chan *chan)
1139 {
1140         struct sk_buff *skb;
1141
1142         BT_DBG("chan %p", chan);
1143
1144         if (chan->mode != L2CAP_MODE_ERTM)
1145                 return;
1146
1147         __clear_retrans_timer(chan);
1148         __clear_monitor_timer(chan);
1149         __clear_ack_timer(chan);
1150
1151         chan->retry_count = 0;
1152         skb_queue_walk(&chan->tx_q, skb) {
1153                 if (bt_cb(skb)->control.retries)
1154                         bt_cb(skb)->control.retries = 1;
1155                 else
1156                         break;
1157         }
1158
1159         chan->expected_tx_seq = chan->buffer_seq;
1160
1161         clear_bit(CONN_REJ_ACT, &chan->conn_state);
1162         clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1163         l2cap_seq_list_clear(&chan->retrans_list);
1164         l2cap_seq_list_clear(&chan->srej_list);
1165         skb_queue_purge(&chan->srej_q);
1166
1167         chan->tx_state = L2CAP_TX_STATE_XMIT;
1168         chan->rx_state = L2CAP_RX_STATE_MOVE;
1169
1170         set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1171 }
1172
1173 static void l2cap_move_done(struct l2cap_chan *chan)
1174 {
1175         u8 move_role = chan->move_role;
1176         BT_DBG("chan %p", chan);
1177
1178         chan->move_state = L2CAP_MOVE_STABLE;
1179         chan->move_role = L2CAP_MOVE_ROLE_NONE;
1180
1181         if (chan->mode != L2CAP_MODE_ERTM)
1182                 return;
1183
1184         switch (move_role) {
1185         case L2CAP_MOVE_ROLE_INITIATOR:
1186                 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1187                 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1188                 break;
1189         case L2CAP_MOVE_ROLE_RESPONDER:
1190                 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1191                 break;
1192         }
1193 }
1194
1195 static void l2cap_chan_ready(struct l2cap_chan *chan)
1196 {
1197         /* This clears all conf flags, including CONF_NOT_COMPLETE */
1198         chan->conf_state = 0;
1199         __clear_chan_timer(chan);
1200
1201         if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1202                 chan->ops->suspend(chan);
1203
1204         chan->state = BT_CONNECTED;
1205
1206         chan->ops->ready(chan);
1207 }
1208
1209 static void l2cap_le_connect(struct l2cap_chan *chan)
1210 {
1211         struct l2cap_conn *conn = chan->conn;
1212         struct l2cap_le_conn_req req;
1213
1214         if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1215                 return;
1216
1217         req.psm     = chan->psm;
1218         req.scid    = cpu_to_le16(chan->scid);
1219         req.mtu     = cpu_to_le16(chan->imtu);
1220         req.mps     = cpu_to_le16(chan->mps);
1221         req.credits = cpu_to_le16(chan->rx_credits);
1222
1223         chan->ident = l2cap_get_ident(conn);
1224
1225         l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1226                        sizeof(req), &req);
1227 }
1228
1229 static void l2cap_le_start(struct l2cap_chan *chan)
1230 {
1231         struct l2cap_conn *conn = chan->conn;
1232
1233         if (!smp_conn_security(conn->hcon, chan->sec_level))
1234                 return;
1235
1236         if (!chan->psm) {
1237                 l2cap_chan_ready(chan);
1238                 return;
1239         }
1240
1241         if (chan->state == BT_CONNECT)
1242                 l2cap_le_connect(chan);
1243 }
1244
1245 static void l2cap_start_connection(struct l2cap_chan *chan)
1246 {
1247         if (__amp_capable(chan)) {
1248                 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1249                 a2mp_discover_amp(chan);
1250         } else if (chan->conn->hcon->type == LE_LINK) {
1251                 l2cap_le_start(chan);
1252         } else {
1253                 l2cap_send_conn_req(chan);
1254         }
1255 }
1256
1257 static void l2cap_do_start(struct l2cap_chan *chan)
1258 {
1259         struct l2cap_conn *conn = chan->conn;
1260
1261         if (conn->hcon->type == LE_LINK) {
1262                 l2cap_le_start(chan);
1263                 return;
1264         }
1265
1266         if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1267                 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1268                         return;
1269
1270                 if (l2cap_chan_check_security(chan) &&
1271                     __l2cap_no_conn_pending(chan)) {
1272                         l2cap_start_connection(chan);
1273                 }
1274         } else {
1275                 struct l2cap_info_req req;
1276                 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1277
1278                 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1279                 conn->info_ident = l2cap_get_ident(conn);
1280
1281                 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1282
1283                 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1284                                sizeof(req), &req);
1285         }
1286 }
1287
1288 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1289 {
1290         u32 local_feat_mask = l2cap_feat_mask;
1291         if (!disable_ertm)
1292                 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1293
1294         switch (mode) {
1295         case L2CAP_MODE_ERTM:
1296                 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1297         case L2CAP_MODE_STREAMING:
1298                 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1299         default:
1300                 return 0x00;
1301         }
1302 }
1303
1304 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1305 {
1306         struct l2cap_conn *conn = chan->conn;
1307         struct l2cap_disconn_req req;
1308
1309         if (!conn)
1310                 return;
1311
1312         if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1313                 __clear_retrans_timer(chan);
1314                 __clear_monitor_timer(chan);
1315                 __clear_ack_timer(chan);
1316         }
1317
1318         if (chan->scid == L2CAP_CID_A2MP) {
1319                 l2cap_state_change(chan, BT_DISCONN);
1320                 return;
1321         }
1322
1323         req.dcid = cpu_to_le16(chan->dcid);
1324         req.scid = cpu_to_le16(chan->scid);
1325         l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1326                        sizeof(req), &req);
1327
1328         l2cap_state_change_and_error(chan, BT_DISCONN, err);
1329 }
1330
1331 /* ---- L2CAP connections ---- */
1332 static void l2cap_conn_start(struct l2cap_conn *conn)
1333 {
1334         struct l2cap_chan *chan, *tmp;
1335
1336         BT_DBG("conn %p", conn);
1337
1338         mutex_lock(&conn->chan_lock);
1339
1340         list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1341                 l2cap_chan_lock(chan);
1342
1343                 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1344                         l2cap_chan_unlock(chan);
1345                         continue;
1346                 }
1347
1348                 if (chan->state == BT_CONNECT) {
1349                         if (!l2cap_chan_check_security(chan) ||
1350                             !__l2cap_no_conn_pending(chan)) {
1351                                 l2cap_chan_unlock(chan);
1352                                 continue;
1353                         }
1354
1355                         if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1356                             && test_bit(CONF_STATE2_DEVICE,
1357                                         &chan->conf_state)) {
1358                                 l2cap_chan_close(chan, ECONNRESET);
1359                                 l2cap_chan_unlock(chan);
1360                                 continue;
1361                         }
1362
1363                         l2cap_start_connection(chan);
1364
1365                 } else if (chan->state == BT_CONNECT2) {
1366                         struct l2cap_conn_rsp rsp;
1367                         char buf[128];
1368                         rsp.scid = cpu_to_le16(chan->dcid);
1369                         rsp.dcid = cpu_to_le16(chan->scid);
1370
1371                         if (l2cap_chan_check_security(chan)) {
1372                                 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1373                                         rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1374                                         rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1375                                         chan->ops->defer(chan);
1376
1377                                 } else {
1378                                         l2cap_state_change(chan, BT_CONFIG);
1379                                         rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1380                                         rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1381                                 }
1382                         } else {
1383                                 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1384                                 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1385                         }
1386
1387                         l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1388                                        sizeof(rsp), &rsp);
1389
1390                         if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1391                             rsp.result != L2CAP_CR_SUCCESS) {
1392                                 l2cap_chan_unlock(chan);
1393                                 continue;
1394                         }
1395
1396                         set_bit(CONF_REQ_SENT, &chan->conf_state);
1397                         l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1398                                        l2cap_build_conf_req(chan, buf), buf);
1399                         chan->num_conf_req++;
1400                 }
1401
1402                 l2cap_chan_unlock(chan);
1403         }
1404
1405         mutex_unlock(&conn->chan_lock);
1406 }
1407
1408 /* Find socket with cid and source/destination bdaddr.
1409  * Returns closest match, locked.
1410  */
1411 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1412                                                     bdaddr_t *src,
1413                                                     bdaddr_t *dst)
1414 {
1415         struct l2cap_chan *c, *c1 = NULL;
1416
1417         read_lock(&chan_list_lock);
1418
1419         list_for_each_entry(c, &chan_list, global_l) {
1420                 if (state && c->state != state)
1421                         continue;
1422
1423                 if (c->scid == cid) {
1424                         int src_match, dst_match;
1425                         int src_any, dst_any;
1426
1427                         /* Exact match. */
1428                         src_match = !bacmp(&c->src, src);
1429                         dst_match = !bacmp(&c->dst, dst);
1430                         if (src_match && dst_match) {
1431                                 read_unlock(&chan_list_lock);
1432                                 return c;
1433                         }
1434
1435                         /* Closest match */
1436                         src_any = !bacmp(&c->src, BDADDR_ANY);
1437                         dst_any = !bacmp(&c->dst, BDADDR_ANY);
1438                         if ((src_match && dst_any) || (src_any && dst_match) ||
1439                             (src_any && dst_any))
1440                                 c1 = c;
1441                 }
1442         }
1443
1444         read_unlock(&chan_list_lock);
1445
1446         return c1;
1447 }
1448
1449 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1450 {
1451         struct hci_conn *hcon = conn->hcon;
1452         struct l2cap_chan *chan, *pchan;
1453         u8 dst_type;
1454
1455         BT_DBG("");
1456
1457         bt_6lowpan_add_conn(conn);
1458
1459         /* Check if we have socket listening on cid */
1460         pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1461                                           &hcon->src, &hcon->dst);
1462         if (!pchan)
1463                 return;
1464
1465         /* Client ATT sockets should override the server one */
1466         if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1467                 return;
1468
1469         dst_type = bdaddr_type(hcon, hcon->dst_type);
1470
1471         /* If device is blocked, do not create a channel for it */
1472         if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1473                 return;
1474
1475         l2cap_chan_lock(pchan);
1476
1477         chan = pchan->ops->new_connection(pchan);
1478         if (!chan)
1479                 goto clean;
1480
1481         bacpy(&chan->src, &hcon->src);
1482         bacpy(&chan->dst, &hcon->dst);
1483         chan->src_type = bdaddr_type(hcon, hcon->src_type);
1484         chan->dst_type = dst_type;
1485
1486         __l2cap_chan_add(conn, chan);
1487
1488 clean:
1489         l2cap_chan_unlock(pchan);
1490 }
1491
1492 static void l2cap_conn_ready(struct l2cap_conn *conn)
1493 {
1494         struct l2cap_chan *chan;
1495         struct hci_conn *hcon = conn->hcon;
1496
1497         BT_DBG("conn %p", conn);
1498
1499         /* For outgoing pairing which doesn't necessarily have an
1500          * associated socket (e.g. mgmt_pair_device).
1501          */
1502         if (hcon->out && hcon->type == LE_LINK)
1503                 smp_conn_security(hcon, hcon->pending_sec_level);
1504
1505         mutex_lock(&conn->chan_lock);
1506
1507         if (hcon->type == LE_LINK)
1508                 l2cap_le_conn_ready(conn);
1509
1510         list_for_each_entry(chan, &conn->chan_l, list) {
1511
1512                 l2cap_chan_lock(chan);
1513
1514                 if (chan->scid == L2CAP_CID_A2MP) {
1515                         l2cap_chan_unlock(chan);
1516                         continue;
1517                 }
1518
1519                 if (hcon->type == LE_LINK) {
1520                         l2cap_le_start(chan);
1521                 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1522                         l2cap_chan_ready(chan);
1523
1524                 } else if (chan->state == BT_CONNECT) {
1525                         l2cap_do_start(chan);
1526                 }
1527
1528                 l2cap_chan_unlock(chan);
1529         }
1530
1531         mutex_unlock(&conn->chan_lock);
1532
1533         queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1534 }
1535
1536 /* Notify sockets that we cannot guaranty reliability anymore */
1537 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1538 {
1539         struct l2cap_chan *chan;
1540
1541         BT_DBG("conn %p", conn);
1542
1543         mutex_lock(&conn->chan_lock);
1544
1545         list_for_each_entry(chan, &conn->chan_l, list) {
1546                 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1547                         l2cap_chan_set_err(chan, err);
1548         }
1549
1550         mutex_unlock(&conn->chan_lock);
1551 }
1552
1553 static void l2cap_info_timeout(struct work_struct *work)
1554 {
1555         struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1556                                                info_timer.work);
1557
1558         conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1559         conn->info_ident = 0;
1560
1561         l2cap_conn_start(conn);
1562 }
1563
1564 /*
1565  * l2cap_user
1566  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1567  * callback is called during registration. The ->remove callback is called
1568  * during unregistration.
1569  * An l2cap_user object can either be explicitly unregistered or when the
1570  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1571  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1572  * External modules must own a reference to the l2cap_conn object if they intend
1573  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1574  * any time if they don't.
1575  */
1576
1577 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1578 {
1579         struct hci_dev *hdev = conn->hcon->hdev;
1580         int ret;
1581
1582         /* We need to check whether l2cap_conn is registered. If it is not, we
1583          * must not register the l2cap_user. l2cap_conn_del() is unregisters
1584          * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1585          * relies on the parent hci_conn object to be locked. This itself relies
1586          * on the hci_dev object to be locked. So we must lock the hci device
1587          * here, too. */
1588
1589         hci_dev_lock(hdev);
1590
1591         if (user->list.next || user->list.prev) {
1592                 ret = -EINVAL;
1593                 goto out_unlock;
1594         }
1595
1596         /* conn->hchan is NULL after l2cap_conn_del() was called */
1597         if (!conn->hchan) {
1598                 ret = -ENODEV;
1599                 goto out_unlock;
1600         }
1601
1602         ret = user->probe(conn, user);
1603         if (ret)
1604                 goto out_unlock;
1605
1606         list_add(&user->list, &conn->users);
1607         ret = 0;
1608
1609 out_unlock:
1610         hci_dev_unlock(hdev);
1611         return ret;
1612 }
1613 EXPORT_SYMBOL(l2cap_register_user);
1614
1615 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1616 {
1617         struct hci_dev *hdev = conn->hcon->hdev;
1618
1619         hci_dev_lock(hdev);
1620
1621         if (!user->list.next || !user->list.prev)
1622                 goto out_unlock;
1623
1624         list_del(&user->list);
1625         user->list.next = NULL;
1626         user->list.prev = NULL;
1627         user->remove(conn, user);
1628
1629 out_unlock:
1630         hci_dev_unlock(hdev);
1631 }
1632 EXPORT_SYMBOL(l2cap_unregister_user);
1633
1634 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1635 {
1636         struct l2cap_user *user;
1637
1638         while (!list_empty(&conn->users)) {
1639                 user = list_first_entry(&conn->users, struct l2cap_user, list);
1640                 list_del(&user->list);
1641                 user->list.next = NULL;
1642                 user->list.prev = NULL;
1643                 user->remove(conn, user);
1644         }
1645 }
1646
1647 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1648 {
1649         struct l2cap_conn *conn = hcon->l2cap_data;
1650         struct l2cap_chan *chan, *l;
1651
1652         if (!conn)
1653                 return;
1654
1655         BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1656
1657         kfree_skb(conn->rx_skb);
1658
1659         skb_queue_purge(&conn->pending_rx);
1660         flush_work(&conn->pending_rx_work);
1661
1662         l2cap_unregister_all_users(conn);
1663
1664         mutex_lock(&conn->chan_lock);
1665
1666         /* Kill channels */
1667         list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1668                 l2cap_chan_hold(chan);
1669                 l2cap_chan_lock(chan);
1670
1671                 l2cap_chan_del(chan, err);
1672
1673                 l2cap_chan_unlock(chan);
1674
1675                 chan->ops->close(chan);
1676                 l2cap_chan_put(chan);
1677         }
1678
1679         mutex_unlock(&conn->chan_lock);
1680
1681         hci_chan_del(conn->hchan);
1682
1683         if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1684                 cancel_delayed_work_sync(&conn->info_timer);
1685
1686         if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1687                 cancel_delayed_work_sync(&conn->security_timer);
1688                 smp_chan_destroy(conn);
1689         }
1690
1691         hcon->l2cap_data = NULL;
1692         conn->hchan = NULL;
1693         l2cap_conn_put(conn);
1694 }
1695
1696 static void security_timeout(struct work_struct *work)
1697 {
1698         struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1699                                                security_timer.work);
1700
1701         BT_DBG("conn %p", conn);
1702
1703         if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1704                 smp_chan_destroy(conn);
1705                 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1706         }
1707 }
1708
1709 static void l2cap_conn_free(struct kref *ref)
1710 {
1711         struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1712
1713         hci_conn_put(conn->hcon);
1714         kfree(conn);
1715 }
1716
1717 void l2cap_conn_get(struct l2cap_conn *conn)
1718 {
1719         kref_get(&conn->ref);
1720 }
1721 EXPORT_SYMBOL(l2cap_conn_get);
1722
1723 void l2cap_conn_put(struct l2cap_conn *conn)
1724 {
1725         kref_put(&conn->ref, l2cap_conn_free);
1726 }
1727 EXPORT_SYMBOL(l2cap_conn_put);
1728
1729 /* ---- Socket interface ---- */
1730
1731 /* Find socket with psm and source / destination bdaddr.
1732  * Returns closest match.
1733  */
1734 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1735                                                    bdaddr_t *src,
1736                                                    bdaddr_t *dst,
1737                                                    u8 link_type)
1738 {
1739         struct l2cap_chan *c, *c1 = NULL;
1740
1741         read_lock(&chan_list_lock);
1742
1743         list_for_each_entry(c, &chan_list, global_l) {
1744                 if (state && c->state != state)
1745                         continue;
1746
1747                 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1748                         continue;
1749
1750                 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1751                         continue;
1752
1753                 if (c->psm == psm) {
1754                         int src_match, dst_match;
1755                         int src_any, dst_any;
1756
1757                         /* Exact match. */
1758                         src_match = !bacmp(&c->src, src);
1759                         dst_match = !bacmp(&c->dst, dst);
1760                         if (src_match && dst_match) {
1761                                 read_unlock(&chan_list_lock);
1762                                 return c;
1763                         }
1764
1765                         /* Closest match */
1766                         src_any = !bacmp(&c->src, BDADDR_ANY);
1767                         dst_any = !bacmp(&c->dst, BDADDR_ANY);
1768                         if ((src_match && dst_any) || (src_any && dst_match) ||
1769                             (src_any && dst_any))
1770                                 c1 = c;
1771                 }
1772         }
1773
1774         read_unlock(&chan_list_lock);
1775
1776         return c1;
1777 }
1778
1779 static void l2cap_monitor_timeout(struct work_struct *work)
1780 {
1781         struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1782                                                monitor_timer.work);
1783
1784         BT_DBG("chan %p", chan);
1785
1786         l2cap_chan_lock(chan);
1787
1788         if (!chan->conn) {
1789                 l2cap_chan_unlock(chan);
1790                 l2cap_chan_put(chan);
1791                 return;
1792         }
1793
1794         l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1795
1796         l2cap_chan_unlock(chan);
1797         l2cap_chan_put(chan);
1798 }
1799
1800 static void l2cap_retrans_timeout(struct work_struct *work)
1801 {
1802         struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1803                                                retrans_timer.work);
1804
1805         BT_DBG("chan %p", chan);
1806
1807         l2cap_chan_lock(chan);
1808
1809         if (!chan->conn) {
1810                 l2cap_chan_unlock(chan);
1811                 l2cap_chan_put(chan);
1812                 return;
1813         }
1814
1815         l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1816         l2cap_chan_unlock(chan);
1817         l2cap_chan_put(chan);
1818 }
1819
1820 static void l2cap_streaming_send(struct l2cap_chan *chan,
1821                                  struct sk_buff_head *skbs)
1822 {
1823         struct sk_buff *skb;
1824         struct l2cap_ctrl *control;
1825
1826         BT_DBG("chan %p, skbs %p", chan, skbs);
1827
1828         if (__chan_is_moving(chan))
1829                 return;
1830
1831         skb_queue_splice_tail_init(skbs, &chan->tx_q);
1832
1833         while (!skb_queue_empty(&chan->tx_q)) {
1834
1835                 skb = skb_dequeue(&chan->tx_q);
1836
1837                 bt_cb(skb)->control.retries = 1;
1838                 control = &bt_cb(skb)->control;
1839
1840                 control->reqseq = 0;
1841                 control->txseq = chan->next_tx_seq;
1842
1843                 __pack_control(chan, control, skb);
1844
1845                 if (chan->fcs == L2CAP_FCS_CRC16) {
1846                         u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1847                         put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1848                 }
1849
1850                 l2cap_do_send(chan, skb);
1851
1852                 BT_DBG("Sent txseq %u", control->txseq);
1853
1854                 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1855                 chan->frames_sent++;
1856         }
1857 }
1858
1859 static int l2cap_ertm_send(struct l2cap_chan *chan)
1860 {
1861         struct sk_buff *skb, *tx_skb;
1862         struct l2cap_ctrl *control;
1863         int sent = 0;
1864
1865         BT_DBG("chan %p", chan);
1866
1867         if (chan->state != BT_CONNECTED)
1868                 return -ENOTCONN;
1869
1870         if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1871                 return 0;
1872
1873         if (__chan_is_moving(chan))
1874                 return 0;
1875
1876         while (chan->tx_send_head &&
1877                chan->unacked_frames < chan->remote_tx_win &&
1878                chan->tx_state == L2CAP_TX_STATE_XMIT) {
1879
1880                 skb = chan->tx_send_head;
1881
1882                 bt_cb(skb)->control.retries = 1;
1883                 control = &bt_cb(skb)->control;
1884
1885                 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1886                         control->final = 1;
1887
1888                 control->reqseq = chan->buffer_seq;
1889                 chan->last_acked_seq = chan->buffer_seq;
1890                 control->txseq = chan->next_tx_seq;
1891
1892                 __pack_control(chan, control, skb);
1893
1894                 if (chan->fcs == L2CAP_FCS_CRC16) {
1895                         u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1896                         put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1897                 }
1898
1899                 /* Clone after data has been modified. Data is assumed to be
1900                    read-only (for locking purposes) on cloned sk_buffs.
1901                  */
1902                 tx_skb = skb_clone(skb, GFP_KERNEL);
1903
1904                 if (!tx_skb)
1905                         break;
1906
1907                 __set_retrans_timer(chan);
1908
1909                 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1910                 chan->unacked_frames++;
1911                 chan->frames_sent++;
1912                 sent++;
1913
1914                 if (skb_queue_is_last(&chan->tx_q, skb))
1915                         chan->tx_send_head = NULL;
1916                 else
1917                         chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1918
1919                 l2cap_do_send(chan, tx_skb);
1920                 BT_DBG("Sent txseq %u", control->txseq);
1921         }
1922
1923         BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1924                chan->unacked_frames, skb_queue_len(&chan->tx_q));
1925
1926         return sent;
1927 }
1928
1929 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1930 {
1931         struct l2cap_ctrl control;
1932         struct sk_buff *skb;
1933         struct sk_buff *tx_skb;
1934         u16 seq;
1935
1936         BT_DBG("chan %p", chan);
1937
1938         if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1939                 return;
1940
1941         if (__chan_is_moving(chan))
1942                 return;
1943
1944         while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1945                 seq = l2cap_seq_list_pop(&chan->retrans_list);
1946
1947                 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1948                 if (!skb) {
1949                         BT_DBG("Error: Can't retransmit seq %d, frame missing",
1950                                seq);
1951                         continue;
1952                 }
1953
1954                 bt_cb(skb)->control.retries++;
1955                 control = bt_cb(skb)->control;
1956
1957                 if (chan->max_tx != 0 &&
1958                     bt_cb(skb)->control.retries > chan->max_tx) {
1959                         BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1960                         l2cap_send_disconn_req(chan, ECONNRESET);
1961                         l2cap_seq_list_clear(&chan->retrans_list);
1962                         break;
1963                 }
1964
1965                 control.reqseq = chan->buffer_seq;
1966                 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1967                         control.final = 1;
1968                 else
1969                         control.final = 0;
1970
1971                 if (skb_cloned(skb)) {
1972                         /* Cloned sk_buffs are read-only, so we need a
1973                          * writeable copy
1974                          */
1975                         tx_skb = skb_copy(skb, GFP_KERNEL);
1976                 } else {
1977                         tx_skb = skb_clone(skb, GFP_KERNEL);
1978                 }
1979
1980                 if (!tx_skb) {
1981                         l2cap_seq_list_clear(&chan->retrans_list);
1982                         break;
1983                 }
1984
1985                 /* Update skb contents */
1986                 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1987                         put_unaligned_le32(__pack_extended_control(&control),
1988                                            tx_skb->data + L2CAP_HDR_SIZE);
1989                 } else {
1990                         put_unaligned_le16(__pack_enhanced_control(&control),
1991                                            tx_skb->data + L2CAP_HDR_SIZE);
1992                 }
1993
1994                 if (chan->fcs == L2CAP_FCS_CRC16) {
1995                         u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1996                         put_unaligned_le16(fcs, skb_put(tx_skb,
1997                                                         L2CAP_FCS_SIZE));
1998                 }
1999
2000                 l2cap_do_send(chan, tx_skb);
2001
2002                 BT_DBG("Resent txseq %d", control.txseq);
2003
2004                 chan->last_acked_seq = chan->buffer_seq;
2005         }
2006 }
2007
2008 static void l2cap_retransmit(struct l2cap_chan *chan,
2009                              struct l2cap_ctrl *control)
2010 {
2011         BT_DBG("chan %p, control %p", chan, control);
2012
2013         l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2014         l2cap_ertm_resend(chan);
2015 }
2016
2017 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2018                                  struct l2cap_ctrl *control)
2019 {
2020         struct sk_buff *skb;
2021
2022         BT_DBG("chan %p, control %p", chan, control);
2023
2024         if (control->poll)
2025                 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2026
2027         l2cap_seq_list_clear(&chan->retrans_list);
2028
2029         if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2030                 return;
2031
2032         if (chan->unacked_frames) {
2033                 skb_queue_walk(&chan->tx_q, skb) {
2034                         if (bt_cb(skb)->control.txseq == control->reqseq ||
2035                             skb == chan->tx_send_head)
2036                                 break;
2037                 }
2038
2039                 skb_queue_walk_from(&chan->tx_q, skb) {
2040                         if (skb == chan->tx_send_head)
2041                                 break;
2042
2043                         l2cap_seq_list_append(&chan->retrans_list,
2044                                               bt_cb(skb)->control.txseq);
2045                 }
2046
2047                 l2cap_ertm_resend(chan);
2048         }
2049 }
2050
2051 static void l2cap_send_ack(struct l2cap_chan *chan)
2052 {
2053         struct l2cap_ctrl control;
2054         u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2055                                          chan->last_acked_seq);
2056         int threshold;
2057
2058         BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2059                chan, chan->last_acked_seq, chan->buffer_seq);
2060
2061         memset(&control, 0, sizeof(control));
2062         control.sframe = 1;
2063
2064         if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2065             chan->rx_state == L2CAP_RX_STATE_RECV) {
2066                 __clear_ack_timer(chan);
2067                 control.super = L2CAP_SUPER_RNR;
2068                 control.reqseq = chan->buffer_seq;
2069                 l2cap_send_sframe(chan, &control);
2070         } else {
2071                 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2072                         l2cap_ertm_send(chan);
2073                         /* If any i-frames were sent, they included an ack */
2074                         if (chan->buffer_seq == chan->last_acked_seq)
2075                                 frames_to_ack = 0;
2076                 }
2077
2078                 /* Ack now if the window is 3/4ths full.
2079                  * Calculate without mul or div
2080                  */
2081                 threshold = chan->ack_win;
2082                 threshold += threshold << 1;
2083                 threshold >>= 2;
2084
2085                 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2086                        threshold);
2087
2088                 if (frames_to_ack >= threshold) {
2089                         __clear_ack_timer(chan);
2090                         control.super = L2CAP_SUPER_RR;
2091                         control.reqseq = chan->buffer_seq;
2092                         l2cap_send_sframe(chan, &control);
2093                         frames_to_ack = 0;
2094                 }
2095
2096                 if (frames_to_ack)
2097                         __set_ack_timer(chan);
2098         }
2099 }
2100
2101 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2102                                          struct msghdr *msg, int len,
2103                                          int count, struct sk_buff *skb)
2104 {
2105         struct l2cap_conn *conn = chan->conn;
2106         struct sk_buff **frag;
2107         int sent = 0;
2108
2109         if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2110                 return -EFAULT;
2111
2112         sent += count;
2113         len  -= count;
2114
2115         /* Continuation fragments (no L2CAP header) */
2116         frag = &skb_shinfo(skb)->frag_list;
2117         while (len) {
2118                 struct sk_buff *tmp;
2119
2120                 count = min_t(unsigned int, conn->mtu, len);
2121
2122                 tmp = chan->ops->alloc_skb(chan, count,
2123                                            msg->msg_flags & MSG_DONTWAIT);
2124                 if (IS_ERR(tmp))
2125                         return PTR_ERR(tmp);
2126
2127                 *frag = tmp;
2128
2129                 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2130                         return -EFAULT;
2131
2132                 (*frag)->priority = skb->priority;
2133
2134                 sent += count;
2135                 len  -= count;
2136
2137                 skb->len += (*frag)->len;
2138                 skb->data_len += (*frag)->len;
2139
2140                 frag = &(*frag)->next;
2141         }
2142
2143         return sent;
2144 }
2145
2146 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2147                                                  struct msghdr *msg, size_t len,
2148                                                  u32 priority)
2149 {
2150         struct l2cap_conn *conn = chan->conn;
2151         struct sk_buff *skb;
2152         int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2153         struct l2cap_hdr *lh;
2154
2155         BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2156                __le16_to_cpu(chan->psm), len, priority);
2157
2158         count = min_t(unsigned int, (conn->mtu - hlen), len);
2159
2160         skb = chan->ops->alloc_skb(chan, count + hlen,
2161                                    msg->msg_flags & MSG_DONTWAIT);
2162         if (IS_ERR(skb))
2163                 return skb;
2164
2165         skb->priority = priority;
2166
2167         /* Create L2CAP header */
2168         lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2169         lh->cid = cpu_to_le16(chan->dcid);
2170         lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2171         put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2172
2173         err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2174         if (unlikely(err < 0)) {
2175                 kfree_skb(skb);
2176                 return ERR_PTR(err);
2177         }
2178         return skb;
2179 }
2180
2181 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2182                                               struct msghdr *msg, size_t len,
2183                                               u32 priority)
2184 {
2185         struct l2cap_conn *conn = chan->conn;
2186         struct sk_buff *skb;
2187         int err, count;
2188         struct l2cap_hdr *lh;
2189
2190         BT_DBG("chan %p len %zu", chan, len);
2191
2192         count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2193
2194         skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2195                                    msg->msg_flags & MSG_DONTWAIT);
2196         if (IS_ERR(skb))
2197                 return skb;
2198
2199         skb->priority = priority;
2200
2201         /* Create L2CAP header */
2202         lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2203         lh->cid = cpu_to_le16(chan->dcid);
2204         lh->len = cpu_to_le16(len);
2205
2206         err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2207         if (unlikely(err < 0)) {
2208                 kfree_skb(skb);
2209                 return ERR_PTR(err);
2210         }
2211         return skb;
2212 }
2213
2214 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2215                                                struct msghdr *msg, size_t len,
2216                                                u16 sdulen)
2217 {
2218         struct l2cap_conn *conn = chan->conn;
2219         struct sk_buff *skb;
2220         int err, count, hlen;
2221         struct l2cap_hdr *lh;
2222
2223         BT_DBG("chan %p len %zu", chan, len);
2224
2225         if (!conn)
2226                 return ERR_PTR(-ENOTCONN);
2227
2228         hlen = __ertm_hdr_size(chan);
2229
2230         if (sdulen)
2231                 hlen += L2CAP_SDULEN_SIZE;
2232
2233         if (chan->fcs == L2CAP_FCS_CRC16)
2234                 hlen += L2CAP_FCS_SIZE;
2235
2236         count = min_t(unsigned int, (conn->mtu - hlen), len);
2237
2238         skb = chan->ops->alloc_skb(chan, count + hlen,
2239                                    msg->msg_flags & MSG_DONTWAIT);
2240         if (IS_ERR(skb))
2241                 return skb;
2242
2243         /* Create L2CAP header */
2244         lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2245         lh->cid = cpu_to_le16(chan->dcid);
2246         lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2247
2248         /* Control header is populated later */
2249         if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2250                 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2251         else
2252                 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2253
2254         if (sdulen)
2255                 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2256
2257         err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2258         if (unlikely(err < 0)) {
2259                 kfree_skb(skb);
2260                 return ERR_PTR(err);
2261         }
2262
2263         bt_cb(skb)->control.fcs = chan->fcs;
2264         bt_cb(skb)->control.retries = 0;
2265         return skb;
2266 }
2267
2268 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2269                              struct sk_buff_head *seg_queue,
2270                              struct msghdr *msg, size_t len)
2271 {
2272         struct sk_buff *skb;
2273         u16 sdu_len;
2274         size_t pdu_len;
2275         u8 sar;
2276
2277         BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2278
2279         /* It is critical that ERTM PDUs fit in a single HCI fragment,
2280          * so fragmented skbs are not used.  The HCI layer's handling
2281          * of fragmented skbs is not compatible with ERTM's queueing.
2282          */
2283
2284         /* PDU size is derived from the HCI MTU */
2285         pdu_len = chan->conn->mtu;
2286
2287         /* Constrain PDU size for BR/EDR connections */
2288         if (!chan->hs_hcon)
2289                 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2290
2291         /* Adjust for largest possible L2CAP overhead. */
2292         if (chan->fcs)
2293                 pdu_len -= L2CAP_FCS_SIZE;
2294
2295         pdu_len -= __ertm_hdr_size(chan);
2296
2297         /* Remote device may have requested smaller PDUs */
2298         pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2299
2300         if (len <= pdu_len) {
2301                 sar = L2CAP_SAR_UNSEGMENTED;
2302                 sdu_len = 0;
2303                 pdu_len = len;
2304         } else {
2305                 sar = L2CAP_SAR_START;
2306                 sdu_len = len;
2307                 pdu_len -= L2CAP_SDULEN_SIZE;
2308         }
2309
2310         while (len > 0) {
2311                 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2312
2313                 if (IS_ERR(skb)) {
2314                         __skb_queue_purge(seg_queue);
2315                         return PTR_ERR(skb);
2316                 }
2317
2318                 bt_cb(skb)->control.sar = sar;
2319                 __skb_queue_tail(seg_queue, skb);
2320
2321                 len -= pdu_len;
2322                 if (sdu_len) {
2323                         sdu_len = 0;
2324                         pdu_len += L2CAP_SDULEN_SIZE;
2325                 }
2326
2327                 if (len <= pdu_len) {
2328                         sar = L2CAP_SAR_END;
2329                         pdu_len = len;
2330                 } else {
2331                         sar = L2CAP_SAR_CONTINUE;
2332                 }
2333         }
2334
2335         return 0;
2336 }
2337
2338 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2339                                                    struct msghdr *msg,
2340                                                    size_t len, u16 sdulen)
2341 {
2342         struct l2cap_conn *conn = chan->conn;
2343         struct sk_buff *skb;
2344         int err, count, hlen;
2345         struct l2cap_hdr *lh;
2346
2347         BT_DBG("chan %p len %zu", chan, len);
2348
2349         if (!conn)
2350                 return ERR_PTR(-ENOTCONN);
2351
2352         hlen = L2CAP_HDR_SIZE;
2353
2354         if (sdulen)
2355                 hlen += L2CAP_SDULEN_SIZE;
2356
2357         count = min_t(unsigned int, (conn->mtu - hlen), len);
2358
2359         skb = chan->ops->alloc_skb(chan, count + hlen,
2360                                    msg->msg_flags & MSG_DONTWAIT);
2361         if (IS_ERR(skb))
2362                 return skb;
2363
2364         /* Create L2CAP header */
2365         lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2366         lh->cid = cpu_to_le16(chan->dcid);
2367         lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2368
2369         if (sdulen)
2370                 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2371
2372         err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2373         if (unlikely(err < 0)) {
2374                 kfree_skb(skb);
2375                 return ERR_PTR(err);
2376         }
2377
2378         return skb;
2379 }
2380
2381 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2382                                 struct sk_buff_head *seg_queue,
2383                                 struct msghdr *msg, size_t len)
2384 {
2385         struct sk_buff *skb;
2386         size_t pdu_len;
2387         u16 sdu_len;
2388
2389         BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2390
2391         pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
2392
2393         pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2394
2395         sdu_len = len;
2396         pdu_len -= L2CAP_SDULEN_SIZE;
2397
2398         while (len > 0) {
2399                 if (len <= pdu_len)
2400                         pdu_len = len;
2401
2402                 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2403                 if (IS_ERR(skb)) {
2404                         __skb_queue_purge(seg_queue);
2405                         return PTR_ERR(skb);
2406                 }
2407
2408                 __skb_queue_tail(seg_queue, skb);
2409
2410                 len -= pdu_len;
2411
2412                 if (sdu_len) {
2413                         sdu_len = 0;
2414                         pdu_len += L2CAP_SDULEN_SIZE;
2415                 }
2416         }
2417
2418         return 0;
2419 }
2420
2421 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2422                     u32 priority)
2423 {
2424         struct sk_buff *skb;
2425         int err;
2426         struct sk_buff_head seg_queue;
2427
2428         if (!chan->conn)
2429                 return -ENOTCONN;
2430
2431         /* Connectionless channel */
2432         if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2433                 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2434                 if (IS_ERR(skb))
2435                         return PTR_ERR(skb);
2436
2437                 /* Channel lock is released before requesting new skb and then
2438                  * reacquired thus we need to recheck channel state.
2439                  */
2440                 if (chan->state != BT_CONNECTED) {
2441                         kfree_skb(skb);
2442                         return -ENOTCONN;
2443                 }
2444
2445                 l2cap_do_send(chan, skb);
2446                 return len;
2447         }
2448
2449         switch (chan->mode) {
2450         case L2CAP_MODE_LE_FLOWCTL:
2451                 /* Check outgoing MTU */
2452                 if (len > chan->omtu)
2453                         return -EMSGSIZE;
2454
2455                 if (!chan->tx_credits)
2456                         return -EAGAIN;
2457
2458                 __skb_queue_head_init(&seg_queue);
2459
2460                 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2461
2462                 if (chan->state != BT_CONNECTED) {
2463                         __skb_queue_purge(&seg_queue);
2464                         err = -ENOTCONN;
2465                 }
2466
2467                 if (err)
2468                         return err;
2469
2470                 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2471
2472                 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2473                         l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2474                         chan->tx_credits--;
2475                 }
2476
2477                 if (!chan->tx_credits)
2478                         chan->ops->suspend(chan);
2479
2480                 err = len;
2481
2482                 break;
2483
2484         case L2CAP_MODE_BASIC:
2485                 /* Check outgoing MTU */
2486                 if (len > chan->omtu)
2487                         return -EMSGSIZE;
2488
2489                 /* Create a basic PDU */
2490                 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2491                 if (IS_ERR(skb))
2492                         return PTR_ERR(skb);
2493
2494                 /* Channel lock is released before requesting new skb and then
2495                  * reacquired thus we need to recheck channel state.
2496                  */
2497                 if (chan->state != BT_CONNECTED) {
2498                         kfree_skb(skb);
2499                         return -ENOTCONN;
2500                 }
2501
2502                 l2cap_do_send(chan, skb);
2503                 err = len;
2504                 break;
2505
2506         case L2CAP_MODE_ERTM:
2507         case L2CAP_MODE_STREAMING:
2508                 /* Check outgoing MTU */
2509                 if (len > chan->omtu) {
2510                         err = -EMSGSIZE;
2511                         break;
2512                 }
2513
2514                 __skb_queue_head_init(&seg_queue);
2515
2516                 /* Do segmentation before calling in to the state machine,
2517                  * since it's possible to block while waiting for memory
2518                  * allocation.
2519                  */
2520                 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2521
2522                 /* The channel could have been closed while segmenting,
2523                  * check that it is still connected.
2524                  */
2525                 if (chan->state != BT_CONNECTED) {
2526                         __skb_queue_purge(&seg_queue);
2527                         err = -ENOTCONN;
2528                 }
2529
2530                 if (err)
2531                         break;
2532
2533                 if (chan->mode == L2CAP_MODE_ERTM)
2534                         l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2535                 else
2536                         l2cap_streaming_send(chan, &seg_queue);
2537
2538                 err = len;
2539
2540                 /* If the skbs were not queued for sending, they'll still be in
2541                  * seg_queue and need to be purged.
2542                  */
2543                 __skb_queue_purge(&seg_queue);
2544                 break;
2545
2546         default:
2547                 BT_DBG("bad state %1.1x", chan->mode);
2548                 err = -EBADFD;
2549         }
2550
2551         return err;
2552 }
2553
2554 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2555 {
2556         struct l2cap_ctrl control;
2557         u16 seq;
2558
2559         BT_DBG("chan %p, txseq %u", chan, txseq);
2560
2561         memset(&control, 0, sizeof(control));
2562         control.sframe = 1;
2563         control.super = L2CAP_SUPER_SREJ;
2564
2565         for (seq = chan->expected_tx_seq; seq != txseq;
2566              seq = __next_seq(chan, seq)) {
2567                 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2568                         control.reqseq = seq;
2569                         l2cap_send_sframe(chan, &control);
2570                         l2cap_seq_list_append(&chan->srej_list, seq);
2571                 }
2572         }
2573
2574         chan->expected_tx_seq = __next_seq(chan, txseq);
2575 }
2576
2577 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2578 {
2579         struct l2cap_ctrl control;
2580
2581         BT_DBG("chan %p", chan);
2582
2583         if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2584                 return;
2585
2586         memset(&control, 0, sizeof(control));
2587         control.sframe = 1;
2588         control.super = L2CAP_SUPER_SREJ;
2589         control.reqseq = chan->srej_list.tail;
2590         l2cap_send_sframe(chan, &control);
2591 }
2592
2593 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2594 {
2595         struct l2cap_ctrl control;
2596         u16 initial_head;
2597         u16 seq;
2598
2599         BT_DBG("chan %p, txseq %u", chan, txseq);
2600
2601         memset(&control, 0, sizeof(control));
2602         control.sframe = 1;
2603         control.super = L2CAP_SUPER_SREJ;
2604
2605         /* Capture initial list head to allow only one pass through the list. */
2606         initial_head = chan->srej_list.head;
2607
2608         do {
2609                 seq = l2cap_seq_list_pop(&chan->srej_list);
2610                 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2611                         break;
2612
2613                 control.reqseq = seq;
2614                 l2cap_send_sframe(chan, &control);
2615                 l2cap_seq_list_append(&chan->srej_list, seq);
2616         } while (chan->srej_list.head != initial_head);
2617 }
2618
2619 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2620 {
2621         struct sk_buff *acked_skb;
2622         u16 ackseq;
2623
2624         BT_DBG("chan %p, reqseq %u", chan, reqseq);
2625
2626         if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2627                 return;
2628
2629         BT_DBG("expected_ack_seq %u, unacked_frames %u",
2630                chan->expected_ack_seq, chan->unacked_frames);
2631
2632         for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2633              ackseq = __next_seq(chan, ackseq)) {
2634
2635                 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2636                 if (acked_skb) {
2637                         skb_unlink(acked_skb, &chan->tx_q);
2638                         kfree_skb(acked_skb);
2639                         chan->unacked_frames--;
2640                 }
2641         }
2642
2643         chan->expected_ack_seq = reqseq;
2644
2645         if (chan->unacked_frames == 0)
2646                 __clear_retrans_timer(chan);
2647
2648         BT_DBG("unacked_frames %u", chan->unacked_frames);
2649 }
2650
2651 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2652 {
2653         BT_DBG("chan %p", chan);
2654
2655         chan->expected_tx_seq = chan->buffer_seq;
2656         l2cap_seq_list_clear(&chan->srej_list);
2657         skb_queue_purge(&chan->srej_q);
2658         chan->rx_state = L2CAP_RX_STATE_RECV;
2659 }
2660
2661 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2662                                 struct l2cap_ctrl *control,
2663                                 struct sk_buff_head *skbs, u8 event)
2664 {
2665         BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2666                event);
2667
2668         switch (event) {
2669         case L2CAP_EV_DATA_REQUEST:
2670                 if (chan->tx_send_head == NULL)
2671                         chan->tx_send_head = skb_peek(skbs);
2672
2673                 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2674                 l2cap_ertm_send(chan);
2675                 break;
2676         case L2CAP_EV_LOCAL_BUSY_DETECTED:
2677                 BT_DBG("Enter LOCAL_BUSY");
2678                 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2679
2680                 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2681                         /* The SREJ_SENT state must be aborted if we are to
2682                          * enter the LOCAL_BUSY state.
2683                          */
2684                         l2cap_abort_rx_srej_sent(chan);
2685                 }
2686
2687                 l2cap_send_ack(chan);
2688
2689                 break;
2690         case L2CAP_EV_LOCAL_BUSY_CLEAR:
2691                 BT_DBG("Exit LOCAL_BUSY");
2692                 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2693
2694                 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2695                         struct l2cap_ctrl local_control;
2696
2697                         memset(&local_control, 0, sizeof(local_control));
2698                         local_control.sframe = 1;
2699                         local_control.super = L2CAP_SUPER_RR;
2700                         local_control.poll = 1;
2701                         local_control.reqseq = chan->buffer_seq;
2702                         l2cap_send_sframe(chan, &local_control);
2703
2704                         chan->retry_count = 1;
2705                         __set_monitor_timer(chan);
2706                         chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2707                 }
2708                 break;
2709         case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2710                 l2cap_process_reqseq(chan, control->reqseq);
2711                 break;
2712         case L2CAP_EV_EXPLICIT_POLL:
2713                 l2cap_send_rr_or_rnr(chan, 1);
2714                 chan->retry_count = 1;
2715                 __set_monitor_timer(chan);
2716                 __clear_ack_timer(chan);
2717                 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2718                 break;
2719         case L2CAP_EV_RETRANS_TO:
2720                 l2cap_send_rr_or_rnr(chan, 1);
2721                 chan->retry_count = 1;
2722                 __set_monitor_timer(chan);
2723                 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2724                 break;
2725         case L2CAP_EV_RECV_FBIT:
2726                 /* Nothing to process */
2727                 break;
2728         default:
2729                 break;
2730         }
2731 }
2732
2733 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2734                                   struct l2cap_ctrl *control,
2735                                   struct sk_buff_head *skbs, u8 event)
2736 {
2737         BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2738                event);
2739
2740         switch (event) {
2741         case L2CAP_EV_DATA_REQUEST:
2742                 if (chan->tx_send_head == NULL)
2743                         chan->tx_send_head = skb_peek(skbs);
2744                 /* Queue data, but don't send. */
2745                 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2746                 break;
2747         case L2CAP_EV_LOCAL_BUSY_DETECTED:
2748                 BT_DBG("Enter LOCAL_BUSY");
2749                 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2750
2751                 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2752                         /* The SREJ_SENT state must be aborted if we are to
2753                          * enter the LOCAL_BUSY state.
2754                          */
2755                         l2cap_abort_rx_srej_sent(chan);
2756                 }
2757
2758                 l2cap_send_ack(chan);
2759
2760                 break;
2761         case L2CAP_EV_LOCAL_BUSY_CLEAR:
2762                 BT_DBG("Exit LOCAL_BUSY");
2763                 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2764
2765                 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2766                         struct l2cap_ctrl local_control;
2767                         memset(&local_control, 0, sizeof(local_control));
2768                         local_control.sframe = 1;
2769                         local_control.super = L2CAP_SUPER_RR;
2770                         local_control.poll = 1;
2771                         local_control.reqseq = chan->buffer_seq;
2772                         l2cap_send_sframe(chan, &local_control);
2773
2774                         chan->retry_count = 1;
2775                         __set_monitor_timer(chan);
2776                         chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2777                 }
2778                 break;
2779         case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2780                 l2cap_process_reqseq(chan, control->reqseq);
2781
2782                 /* Fall through */
2783
2784         case L2CAP_EV_RECV_FBIT:
2785                 if (control && control->final) {
2786                         __clear_monitor_timer(chan);
2787                         if (chan->unacked_frames > 0)
2788                                 __set_retrans_timer(chan);
2789                         chan->retry_count = 0;
2790                         chan->tx_state = L2CAP_TX_STATE_XMIT;
2791                         BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2792                 }
2793                 break;
2794         case L2CAP_EV_EXPLICIT_POLL:
2795                 /* Ignore */
2796                 break;
2797         case L2CAP_EV_MONITOR_TO:
2798                 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2799                         l2cap_send_rr_or_rnr(chan, 1);
2800                         __set_monitor_timer(chan);
2801                         chan->retry_count++;
2802                 } else {
2803                         l2cap_send_disconn_req(chan, ECONNABORTED);
2804                 }
2805                 break;
2806         default:
2807                 break;
2808         }
2809 }
2810
2811 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2812                      struct sk_buff_head *skbs, u8 event)
2813 {
2814         BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2815                chan, control, skbs, event, chan->tx_state);
2816
2817         switch (chan->tx_state) {
2818         case L2CAP_TX_STATE_XMIT:
2819                 l2cap_tx_state_xmit(chan, control, skbs, event);
2820                 break;
2821         case L2CAP_TX_STATE_WAIT_F:
2822                 l2cap_tx_state_wait_f(chan, control, skbs, event);
2823                 break;
2824         default:
2825                 /* Ignore event */
2826                 break;
2827         }
2828 }
2829
2830 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2831                              struct l2cap_ctrl *control)
2832 {
2833         BT_DBG("chan %p, control %p", chan, control);
2834         l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2835 }
2836
2837 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2838                                   struct l2cap_ctrl *control)
2839 {
2840         BT_DBG("chan %p, control %p", chan, control);
2841         l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2842 }
2843
2844 /* Copy frame to all raw sockets on that connection */
2845 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2846 {
2847         struct sk_buff *nskb;
2848         struct l2cap_chan *chan;
2849
2850         BT_DBG("conn %p", conn);
2851
2852         mutex_lock(&conn->chan_lock);
2853
2854         list_for_each_entry(chan, &conn->chan_l, list) {
2855                 if (chan->chan_type != L2CAP_CHAN_RAW)
2856                         continue;
2857
2858                 /* Don't send frame to the channel it came from */
2859                 if (bt_cb(skb)->chan == chan)
2860                         continue;
2861
2862                 nskb = skb_clone(skb, GFP_KERNEL);
2863                 if (!nskb)
2864                         continue;
2865                 if (chan->ops->recv(chan, nskb))
2866                         kfree_skb(nskb);
2867         }
2868
2869         mutex_unlock(&conn->chan_lock);
2870 }
2871
2872 /* ---- L2CAP signalling commands ---- */
2873 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2874                                        u8 ident, u16 dlen, void *data)
2875 {
2876         struct sk_buff *skb, **frag;
2877         struct l2cap_cmd_hdr *cmd;
2878         struct l2cap_hdr *lh;
2879         int len, count;
2880
2881         BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2882                conn, code, ident, dlen);
2883
2884         if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2885                 return NULL;
2886
2887         len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2888         count = min_t(unsigned int, conn->mtu, len);
2889
2890         skb = bt_skb_alloc(count, GFP_KERNEL);
2891         if (!skb)
2892                 return NULL;
2893
2894         lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2895         lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2896
2897         if (conn->hcon->type == LE_LINK)
2898                 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2899         else
2900                 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2901
2902         cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2903         cmd->code  = code;
2904         cmd->ident = ident;
2905         cmd->len   = cpu_to_le16(dlen);
2906
2907         if (dlen) {
2908                 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2909                 memcpy(skb_put(skb, count), data, count);
2910                 data += count;
2911         }
2912
2913         len -= skb->len;
2914
2915         /* Continuation fragments (no L2CAP header) */
2916         frag = &skb_shinfo(skb)->frag_list;
2917         while (len) {
2918                 count = min_t(unsigned int, conn->mtu, len);
2919
2920                 *frag = bt_skb_alloc(count, GFP_KERNEL);
2921                 if (!*frag)
2922                         goto fail;
2923
2924                 memcpy(skb_put(*frag, count), data, count);
2925
2926                 len  -= count;
2927                 data += count;
2928
2929                 frag = &(*frag)->next;
2930         }
2931
2932         return skb;
2933
2934 fail:
2935         kfree_skb(skb);
2936         return NULL;
2937 }
2938
2939 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2940                                      unsigned long *val)
2941 {
2942         struct l2cap_conf_opt *opt = *ptr;
2943         int len;
2944
2945         len = L2CAP_CONF_OPT_SIZE + opt->len;
2946         *ptr += len;
2947
2948         *type = opt->type;
2949         *olen = opt->len;
2950
2951         switch (opt->len) {
2952         case 1:
2953                 *val = *((u8 *) opt->val);
2954                 break;
2955
2956         case 2:
2957                 *val = get_unaligned_le16(opt->val);
2958                 break;
2959
2960         case 4:
2961                 *val = get_unaligned_le32(opt->val);
2962                 break;
2963
2964         default:
2965                 *val = (unsigned long) opt->val;
2966                 break;
2967         }
2968
2969         BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2970         return len;
2971 }
2972
2973 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2974 {
2975         struct l2cap_conf_opt *opt = *ptr;
2976
2977         BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2978
2979         opt->type = type;
2980         opt->len  = len;
2981
2982         switch (len) {
2983         case 1:
2984                 *((u8 *) opt->val)  = val;
2985                 break;
2986
2987         case 2:
2988                 put_unaligned_le16(val, opt->val);
2989                 break;
2990
2991         case 4:
2992                 put_unaligned_le32(val, opt->val);
2993                 break;
2994
2995         default:
2996                 memcpy(opt->val, (void *) val, len);
2997                 break;
2998         }
2999
3000         *ptr += L2CAP_CONF_OPT_SIZE + len;
3001 }
3002
3003 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3004 {
3005         struct l2cap_conf_efs efs;
3006
3007         switch (chan->mode) {
3008         case L2CAP_MODE_ERTM:
3009                 efs.id          = chan->local_id;
3010                 efs.stype       = chan->local_stype;
3011                 efs.msdu        = cpu_to_le16(chan->local_msdu);
3012                 efs.sdu_itime   = cpu_to_le32(chan->local_sdu_itime);
3013                 efs.acc_lat     = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3014                 efs.flush_to    = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3015                 break;
3016
3017         case L2CAP_MODE_STREAMING:
3018                 efs.id          = 1;
3019                 efs.stype       = L2CAP_SERV_BESTEFFORT;
3020                 efs.msdu        = cpu_to_le16(chan->local_msdu);
3021                 efs.sdu_itime   = cpu_to_le32(chan->local_sdu_itime);
3022                 efs.acc_lat     = 0;
3023                 efs.flush_to    = 0;
3024                 break;
3025
3026         default:
3027                 return;
3028         }
3029
3030         l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3031                            (unsigned long) &efs);
3032 }
3033
3034 static void l2cap_ack_timeout(struct work_struct *work)
3035 {
3036         struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3037                                                ack_timer.work);
3038         u16 frames_to_ack;
3039
3040         BT_DBG("chan %p", chan);
3041
3042         l2cap_chan_lock(chan);
3043
3044         frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3045                                      chan->last_acked_seq);
3046
3047         if (frames_to_ack)
3048                 l2cap_send_rr_or_rnr(chan, 0);
3049
3050         l2cap_chan_unlock(chan);
3051         l2cap_chan_put(chan);
3052 }
3053
3054 int l2cap_ertm_init(struct l2cap_chan *chan)
3055 {
3056         int err;
3057
3058         chan->next_tx_seq = 0;
3059         chan->expected_tx_seq = 0;
3060         chan->expected_ack_seq = 0;
3061         chan->unacked_frames = 0;
3062         chan->buffer_seq = 0;
3063         chan->frames_sent = 0;
3064         chan->last_acked_seq = 0;
3065         chan->sdu = NULL;
3066         chan->sdu_last_frag = NULL;
3067         chan->sdu_len = 0;
3068
3069         skb_queue_head_init(&chan->tx_q);
3070
3071         chan->local_amp_id = AMP_ID_BREDR;
3072         chan->move_id = AMP_ID_BREDR;
3073         chan->move_state = L2CAP_MOVE_STABLE;
3074         chan->move_role = L2CAP_MOVE_ROLE_NONE;
3075
3076         if (chan->mode != L2CAP_MODE_ERTM)
3077                 return 0;
3078
3079         chan->rx_state = L2CAP_RX_STATE_RECV;
3080         chan->tx_state = L2CAP_TX_STATE_XMIT;
3081
3082         INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3083         INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3084         INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3085
3086         skb_queue_head_init(&chan->srej_q);
3087
3088         err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3089         if (err < 0)
3090                 return err;
3091
3092         err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3093         if (err < 0)
3094                 l2cap_seq_list_free(&chan->srej_list);
3095
3096         return err;
3097 }
3098
3099 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3100 {
3101         switch (mode) {
3102         case L2CAP_MODE_STREAMING:
3103         case L2CAP_MODE_ERTM:
3104                 if (l2cap_mode_supported(mode, remote_feat_mask))
3105                         return mode;
3106                 /* fall through */
3107         default:
3108                 return L2CAP_MODE_BASIC;
3109         }
3110 }
3111
3112 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3113 {
3114         return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3115 }
3116
3117 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3118 {
3119         return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3120 }
3121
3122 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3123                                       struct l2cap_conf_rfc *rfc)
3124 {
3125         if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3126                 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3127
3128                 /* Class 1 devices have must have ERTM timeouts
3129                  * exceeding the Link Supervision Timeout.  The
3130                  * default Link Supervision Timeout for AMP
3131                  * controllers is 10 seconds.
3132                  *
3133                  * Class 1 devices use 0xffffffff for their
3134                  * best-effort flush timeout, so the clamping logic
3135                  * will result in a timeout that meets the above
3136                  * requirement.  ERTM timeouts are 16-bit values, so
3137                  * the maximum timeout is 65.535 seconds.
3138                  */
3139
3140                 /* Convert timeout to milliseconds and round */
3141                 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3142
3143                 /* This is the recommended formula for class 2 devices
3144                  * that start ERTM timers when packets are sent to the
3145                  * controller.
3146                  */
3147                 ertm_to = 3 * ertm_to + 500;
3148
3149                 if (ertm_to > 0xffff)
3150                         ertm_to = 0xffff;
3151
3152                 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3153                 rfc->monitor_timeout = rfc->retrans_timeout;
3154         } else {
3155                 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3156                 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3157         }
3158 }
3159
3160 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3161 {
3162         if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3163             __l2cap_ews_supported(chan->conn)) {
3164                 /* use extended control field */
3165                 set_bit(FLAG_EXT_CTRL, &chan->flags);
3166                 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3167         } else {
3168                 chan->tx_win = min_t(u16, chan->tx_win,
3169                                      L2CAP_DEFAULT_TX_WINDOW);
3170                 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3171         }
3172         chan->ack_win = chan->tx_win;
3173 }
3174
3175 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3176 {
3177         struct l2cap_conf_req *req = data;
3178         struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3179         void *ptr = req->data;
3180         u16 size;
3181
3182         BT_DBG("chan %p", chan);
3183
3184         if (chan->num_conf_req || chan->num_conf_rsp)
3185                 goto done;
3186
3187         switch (chan->mode) {
3188         case L2CAP_MODE_STREAMING:
3189         case L2CAP_MODE_ERTM:
3190                 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3191                         break;
3192
3193                 if (__l2cap_efs_supported(chan->conn))
3194                         set_bit(FLAG_EFS_ENABLE, &chan->flags);
3195
3196                 /* fall through */
3197         default:
3198                 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3199                 break;
3200         }
3201
3202 done:
3203         if (chan->imtu != L2CAP_DEFAULT_MTU)
3204                 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3205
3206         switch (chan->mode) {
3207         case L2CAP_MODE_BASIC:
3208                 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3209                     !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3210                         break;
3211
3212                 rfc.mode            = L2CAP_MODE_BASIC;
3213                 rfc.txwin_size      = 0;
3214                 rfc.max_transmit    = 0;
3215                 rfc.retrans_timeout = 0;
3216                 rfc.monitor_timeout = 0;
3217                 rfc.max_pdu_size    = 0;
3218
3219                 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3220                                    (unsigned long) &rfc);
3221                 break;
3222
3223         case L2CAP_MODE_ERTM:
3224                 rfc.mode            = L2CAP_MODE_ERTM;
3225                 rfc.max_transmit    = chan->max_tx;
3226
3227                 __l2cap_set_ertm_timeouts(chan, &rfc);
3228
3229                 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3230                              L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3231                              L2CAP_FCS_SIZE);
3232                 rfc.max_pdu_size = cpu_to_le16(size);
3233
3234                 l2cap_txwin_setup(chan);
3235
3236                 rfc.txwin_size = min_t(u16, chan->tx_win,
3237                                        L2CAP_DEFAULT_TX_WINDOW);
3238
3239                 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3240                                    (unsigned long) &rfc);
3241
3242                 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3243                         l2cap_add_opt_efs(&ptr, chan);
3244
3245                 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3246                         l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3247                                            chan->tx_win);
3248
3249                 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3250                         if (chan->fcs == L2CAP_FCS_NONE ||
3251                             test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3252                                 chan->fcs = L2CAP_FCS_NONE;
3253                                 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3254                                                    chan->fcs);
3255                         }
3256                 break;
3257
3258         case L2CAP_MODE_STREAMING:
3259                 l2cap_txwin_setup(chan);
3260                 rfc.mode            = L2CAP_MODE_STREAMING;
3261                 rfc.txwin_size      = 0;
3262                 rfc.max_transmit    = 0;
3263                 rfc.retrans_timeout = 0;
3264                 rfc.monitor_timeout = 0;
3265
3266                 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3267                              L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3268                              L2CAP_FCS_SIZE);