Bluetooth: Fix missing L2CAP EWS Conf parameter
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41 #include <net/bluetooth/amp.h>
42
43 bool disable_ertm;
44
45 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
46 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
47
48 static LIST_HEAD(chan_list);
49 static DEFINE_RWLOCK(chan_list_lock);
50
51 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
52 u8 code, u8 ident, u16 dlen, void *data);
53 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
54 void *data);
55 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
56 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
57
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
60
61 /* ---- L2CAP channels ---- */
62
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
64 u16 cid)
65 {
66 struct l2cap_chan *c;
67
68 list_for_each_entry(c, &conn->chan_l, list) {
69 if (c->dcid == cid)
70 return c;
71 }
72 return NULL;
73 }
74
75 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
76 u16 cid)
77 {
78 struct l2cap_chan *c;
79
80 list_for_each_entry(c, &conn->chan_l, list) {
81 if (c->scid == cid)
82 return c;
83 }
84 return NULL;
85 }
86
87 /* Find channel with given SCID.
88 * Returns locked channel. */
89 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
90 u16 cid)
91 {
92 struct l2cap_chan *c;
93
94 mutex_lock(&conn->chan_lock);
95 c = __l2cap_get_chan_by_scid(conn, cid);
96 if (c)
97 l2cap_chan_lock(c);
98 mutex_unlock(&conn->chan_lock);
99
100 return c;
101 }
102
103 /* Find channel with given DCID.
104 * Returns locked channel.
105 */
106 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
107 u16 cid)
108 {
109 struct l2cap_chan *c;
110
111 mutex_lock(&conn->chan_lock);
112 c = __l2cap_get_chan_by_dcid(conn, cid);
113 if (c)
114 l2cap_chan_lock(c);
115 mutex_unlock(&conn->chan_lock);
116
117 return c;
118 }
119
120 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
121 u8 ident)
122 {
123 struct l2cap_chan *c;
124
125 list_for_each_entry(c, &conn->chan_l, list) {
126 if (c->ident == ident)
127 return c;
128 }
129 return NULL;
130 }
131
132 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
133 u8 ident)
134 {
135 struct l2cap_chan *c;
136
137 mutex_lock(&conn->chan_lock);
138 c = __l2cap_get_chan_by_ident(conn, ident);
139 if (c)
140 l2cap_chan_lock(c);
141 mutex_unlock(&conn->chan_lock);
142
143 return c;
144 }
145
146 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
147 {
148 struct l2cap_chan *c;
149
150 list_for_each_entry(c, &chan_list, global_l) {
151 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
152 return c;
153 }
154 return NULL;
155 }
156
157 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
158 {
159 int err;
160
161 write_lock(&chan_list_lock);
162
163 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
164 err = -EADDRINUSE;
165 goto done;
166 }
167
168 if (psm) {
169 chan->psm = psm;
170 chan->sport = psm;
171 err = 0;
172 } else {
173 u16 p;
174
175 err = -EINVAL;
176 for (p = 0x1001; p < 0x1100; p += 2)
177 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
178 chan->psm = cpu_to_le16(p);
179 chan->sport = cpu_to_le16(p);
180 err = 0;
181 break;
182 }
183 }
184
185 done:
186 write_unlock(&chan_list_lock);
187 return err;
188 }
189
190 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
191 {
192 write_lock(&chan_list_lock);
193
194 chan->scid = scid;
195
196 write_unlock(&chan_list_lock);
197
198 return 0;
199 }
200
201 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
202 {
203 u16 cid = L2CAP_CID_DYN_START;
204
205 for (; cid < L2CAP_CID_DYN_END; cid++) {
206 if (!__l2cap_get_chan_by_scid(conn, cid))
207 return cid;
208 }
209
210 return 0;
211 }
212
213 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
214 {
215 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
216 state_to_string(state));
217
218 chan->state = state;
219 chan->ops->state_change(chan, state);
220 }
221
222 static void l2cap_state_change(struct l2cap_chan *chan, int state)
223 {
224 struct sock *sk = chan->sk;
225
226 lock_sock(sk);
227 __l2cap_state_change(chan, state);
228 release_sock(sk);
229 }
230
231 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
232 {
233 struct sock *sk = chan->sk;
234
235 sk->sk_err = err;
236 }
237
238 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
239 {
240 struct sock *sk = chan->sk;
241
242 lock_sock(sk);
243 __l2cap_chan_set_err(chan, err);
244 release_sock(sk);
245 }
246
247 static void __set_retrans_timer(struct l2cap_chan *chan)
248 {
249 if (!delayed_work_pending(&chan->monitor_timer) &&
250 chan->retrans_timeout) {
251 l2cap_set_timer(chan, &chan->retrans_timer,
252 msecs_to_jiffies(chan->retrans_timeout));
253 }
254 }
255
256 static void __set_monitor_timer(struct l2cap_chan *chan)
257 {
258 __clear_retrans_timer(chan);
259 if (chan->monitor_timeout) {
260 l2cap_set_timer(chan, &chan->monitor_timer,
261 msecs_to_jiffies(chan->monitor_timeout));
262 }
263 }
264
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
266 u16 seq)
267 {
268 struct sk_buff *skb;
269
270 skb_queue_walk(head, skb) {
271 if (bt_cb(skb)->control.txseq == seq)
272 return skb;
273 }
274
275 return NULL;
276 }
277
278 /* ---- L2CAP sequence number lists ---- */
279
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
286 * allocs or frees.
287 */
288
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
290 {
291 size_t alloc_size, i;
292
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
296 */
297 alloc_size = roundup_pow_of_two(size);
298
299 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
300 if (!seq_list->list)
301 return -ENOMEM;
302
303 seq_list->mask = alloc_size - 1;
304 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 for (i = 0; i < alloc_size; i++)
307 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
308
309 return 0;
310 }
311
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
313 {
314 kfree(seq_list->list);
315 }
316
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
318 u16 seq)
319 {
320 /* Constant-time check for list membership */
321 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
322 }
323
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
325 {
326 u16 mask = seq_list->mask;
327
328 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR;
331 } else if (seq_list->head == seq) {
332 /* Head can be removed in constant time */
333 seq_list->head = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
335
336 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 }
340 } else {
341 /* Walk the list to find the sequence number */
342 u16 prev = seq_list->head;
343 while (seq_list->list[prev & mask] != seq) {
344 prev = seq_list->list[prev & mask];
345 if (prev == L2CAP_SEQ_LIST_TAIL)
346 return L2CAP_SEQ_LIST_CLEAR;
347 }
348
349 /* Unlink the number from the list and clear it */
350 seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 if (seq_list->tail == seq)
353 seq_list->tail = prev;
354 }
355 return seq;
356 }
357
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
359 {
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list, seq_list->head);
362 }
363
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
365 {
366 u16 i;
367
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
369 return;
370
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
373
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
376 }
377
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
379 {
380 u16 mask = seq_list->mask;
381
382 /* All appends happen in constant time */
383
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
385 return;
386
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
389 else
390 seq_list->list[seq_list->tail & mask] = seq;
391
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
394 }
395
396 static void l2cap_chan_timeout(struct work_struct *work)
397 {
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
399 chan_timer.work);
400 struct l2cap_conn *conn = chan->conn;
401 int reason;
402
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
404
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
407
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
413 else
414 reason = ETIMEDOUT;
415
416 l2cap_chan_close(chan, reason);
417
418 l2cap_chan_unlock(chan);
419
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
422
423 l2cap_chan_put(chan);
424 }
425
426 struct l2cap_chan *l2cap_chan_create(void)
427 {
428 struct l2cap_chan *chan;
429
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
431 if (!chan)
432 return NULL;
433
434 mutex_init(&chan->lock);
435
436 write_lock(&chan_list_lock);
437 list_add(&chan->global_l, &chan_list);
438 write_unlock(&chan_list_lock);
439
440 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
441
442 chan->state = BT_OPEN;
443
444 kref_init(&chan->kref);
445
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
448
449 BT_DBG("chan %p", chan);
450
451 return chan;
452 }
453
454 static void l2cap_chan_destroy(struct kref *kref)
455 {
456 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
457
458 BT_DBG("chan %p", chan);
459
460 write_lock(&chan_list_lock);
461 list_del(&chan->global_l);
462 write_unlock(&chan_list_lock);
463
464 kfree(chan);
465 }
466
467 void l2cap_chan_hold(struct l2cap_chan *c)
468 {
469 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
470
471 kref_get(&c->kref);
472 }
473
474 void l2cap_chan_put(struct l2cap_chan *c)
475 {
476 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
477
478 kref_put(&c->kref, l2cap_chan_destroy);
479 }
480
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
482 {
483 chan->fcs = L2CAP_FCS_CRC16;
484 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 chan->sec_level = BT_SECURITY_LOW;
489
490 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
491 }
492
493 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
494 {
495 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
496 __le16_to_cpu(chan->psm), chan->dcid);
497
498 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
499
500 chan->conn = conn;
501
502 switch (chan->chan_type) {
503 case L2CAP_CHAN_CONN_ORIENTED:
504 if (conn->hcon->type == LE_LINK) {
505 /* LE connection */
506 chan->omtu = L2CAP_DEFAULT_MTU;
507 chan->scid = L2CAP_CID_LE_DATA;
508 chan->dcid = L2CAP_CID_LE_DATA;
509 } else {
510 /* Alloc CID for connection-oriented socket */
511 chan->scid = l2cap_alloc_cid(conn);
512 chan->omtu = L2CAP_DEFAULT_MTU;
513 }
514 break;
515
516 case L2CAP_CHAN_CONN_LESS:
517 /* Connectionless socket */
518 chan->scid = L2CAP_CID_CONN_LESS;
519 chan->dcid = L2CAP_CID_CONN_LESS;
520 chan->omtu = L2CAP_DEFAULT_MTU;
521 break;
522
523 case L2CAP_CHAN_CONN_FIX_A2MP:
524 chan->scid = L2CAP_CID_A2MP;
525 chan->dcid = L2CAP_CID_A2MP;
526 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
527 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
528 break;
529
530 default:
531 /* Raw socket can send/recv signalling messages only */
532 chan->scid = L2CAP_CID_SIGNALING;
533 chan->dcid = L2CAP_CID_SIGNALING;
534 chan->omtu = L2CAP_DEFAULT_MTU;
535 }
536
537 chan->local_id = L2CAP_BESTEFFORT_ID;
538 chan->local_stype = L2CAP_SERV_BESTEFFORT;
539 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
540 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
541 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
542 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
543
544 l2cap_chan_hold(chan);
545
546 list_add(&chan->list, &conn->chan_l);
547 }
548
549 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
550 {
551 mutex_lock(&conn->chan_lock);
552 __l2cap_chan_add(conn, chan);
553 mutex_unlock(&conn->chan_lock);
554 }
555
556 void l2cap_chan_del(struct l2cap_chan *chan, int err)
557 {
558 struct l2cap_conn *conn = chan->conn;
559
560 __clear_chan_timer(chan);
561
562 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
563
564 if (conn) {
565 struct amp_mgr *mgr = conn->hcon->amp_mgr;
566 /* Delete from channel list */
567 list_del(&chan->list);
568
569 l2cap_chan_put(chan);
570
571 chan->conn = NULL;
572
573 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
574 hci_conn_put(conn->hcon);
575
576 if (mgr && mgr->bredr_chan == chan)
577 mgr->bredr_chan = NULL;
578 }
579
580 if (chan->hs_hchan) {
581 struct hci_chan *hs_hchan = chan->hs_hchan;
582
583 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
584 amp_disconnect_logical_link(hs_hchan);
585 }
586
587 chan->ops->teardown(chan, err);
588
589 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
590 return;
591
592 switch(chan->mode) {
593 case L2CAP_MODE_BASIC:
594 break;
595
596 case L2CAP_MODE_ERTM:
597 __clear_retrans_timer(chan);
598 __clear_monitor_timer(chan);
599 __clear_ack_timer(chan);
600
601 skb_queue_purge(&chan->srej_q);
602
603 l2cap_seq_list_free(&chan->srej_list);
604 l2cap_seq_list_free(&chan->retrans_list);
605
606 /* fall through */
607
608 case L2CAP_MODE_STREAMING:
609 skb_queue_purge(&chan->tx_q);
610 break;
611 }
612
613 return;
614 }
615
616 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
617 {
618 struct l2cap_conn *conn = chan->conn;
619 struct sock *sk = chan->sk;
620
621 BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
622 sk);
623
624 switch (chan->state) {
625 case BT_LISTEN:
626 chan->ops->teardown(chan, 0);
627 break;
628
629 case BT_CONNECTED:
630 case BT_CONFIG:
631 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
632 conn->hcon->type == ACL_LINK) {
633 __set_chan_timer(chan, sk->sk_sndtimeo);
634 l2cap_send_disconn_req(chan, reason);
635 } else
636 l2cap_chan_del(chan, reason);
637 break;
638
639 case BT_CONNECT2:
640 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
641 conn->hcon->type == ACL_LINK) {
642 struct l2cap_conn_rsp rsp;
643 __u16 result;
644
645 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
646 result = L2CAP_CR_SEC_BLOCK;
647 else
648 result = L2CAP_CR_BAD_PSM;
649 l2cap_state_change(chan, BT_DISCONN);
650
651 rsp.scid = cpu_to_le16(chan->dcid);
652 rsp.dcid = cpu_to_le16(chan->scid);
653 rsp.result = cpu_to_le16(result);
654 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
655 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
656 sizeof(rsp), &rsp);
657 }
658
659 l2cap_chan_del(chan, reason);
660 break;
661
662 case BT_CONNECT:
663 case BT_DISCONN:
664 l2cap_chan_del(chan, reason);
665 break;
666
667 default:
668 chan->ops->teardown(chan, 0);
669 break;
670 }
671 }
672
673 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
674 {
675 if (chan->chan_type == L2CAP_CHAN_RAW) {
676 switch (chan->sec_level) {
677 case BT_SECURITY_HIGH:
678 return HCI_AT_DEDICATED_BONDING_MITM;
679 case BT_SECURITY_MEDIUM:
680 return HCI_AT_DEDICATED_BONDING;
681 default:
682 return HCI_AT_NO_BONDING;
683 }
684 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
685 if (chan->sec_level == BT_SECURITY_LOW)
686 chan->sec_level = BT_SECURITY_SDP;
687
688 if (chan->sec_level == BT_SECURITY_HIGH)
689 return HCI_AT_NO_BONDING_MITM;
690 else
691 return HCI_AT_NO_BONDING;
692 } else {
693 switch (chan->sec_level) {
694 case BT_SECURITY_HIGH:
695 return HCI_AT_GENERAL_BONDING_MITM;
696 case BT_SECURITY_MEDIUM:
697 return HCI_AT_GENERAL_BONDING;
698 default:
699 return HCI_AT_NO_BONDING;
700 }
701 }
702 }
703
704 /* Service level security */
705 int l2cap_chan_check_security(struct l2cap_chan *chan)
706 {
707 struct l2cap_conn *conn = chan->conn;
708 __u8 auth_type;
709
710 auth_type = l2cap_get_auth_type(chan);
711
712 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
713 }
714
715 static u8 l2cap_get_ident(struct l2cap_conn *conn)
716 {
717 u8 id;
718
719 /* Get next available identificator.
720 * 1 - 128 are used by kernel.
721 * 129 - 199 are reserved.
722 * 200 - 254 are used by utilities like l2ping, etc.
723 */
724
725 spin_lock(&conn->lock);
726
727 if (++conn->tx_ident > 128)
728 conn->tx_ident = 1;
729
730 id = conn->tx_ident;
731
732 spin_unlock(&conn->lock);
733
734 return id;
735 }
736
737 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
738 void *data)
739 {
740 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
741 u8 flags;
742
743 BT_DBG("code 0x%2.2x", code);
744
745 if (!skb)
746 return;
747
748 if (lmp_no_flush_capable(conn->hcon->hdev))
749 flags = ACL_START_NO_FLUSH;
750 else
751 flags = ACL_START;
752
753 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
754 skb->priority = HCI_PRIO_MAX;
755
756 hci_send_acl(conn->hchan, skb, flags);
757 }
758
759 static bool __chan_is_moving(struct l2cap_chan *chan)
760 {
761 return chan->move_state != L2CAP_MOVE_STABLE &&
762 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
763 }
764
765 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
766 {
767 struct hci_conn *hcon = chan->conn->hcon;
768 u16 flags;
769
770 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
771 skb->priority);
772
773 if (chan->hs_hcon && !__chan_is_moving(chan)) {
774 if (chan->hs_hchan)
775 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
776 else
777 kfree_skb(skb);
778
779 return;
780 }
781
782 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
783 lmp_no_flush_capable(hcon->hdev))
784 flags = ACL_START_NO_FLUSH;
785 else
786 flags = ACL_START;
787
788 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
789 hci_send_acl(chan->conn->hchan, skb, flags);
790 }
791
792 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
793 {
794 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
795 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
796
797 if (enh & L2CAP_CTRL_FRAME_TYPE) {
798 /* S-Frame */
799 control->sframe = 1;
800 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
801 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
802
803 control->sar = 0;
804 control->txseq = 0;
805 } else {
806 /* I-Frame */
807 control->sframe = 0;
808 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
809 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
810
811 control->poll = 0;
812 control->super = 0;
813 }
814 }
815
816 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
817 {
818 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
819 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
820
821 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
822 /* S-Frame */
823 control->sframe = 1;
824 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
825 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
826
827 control->sar = 0;
828 control->txseq = 0;
829 } else {
830 /* I-Frame */
831 control->sframe = 0;
832 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
833 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
834
835 control->poll = 0;
836 control->super = 0;
837 }
838 }
839
840 static inline void __unpack_control(struct l2cap_chan *chan,
841 struct sk_buff *skb)
842 {
843 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
844 __unpack_extended_control(get_unaligned_le32(skb->data),
845 &bt_cb(skb)->control);
846 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
847 } else {
848 __unpack_enhanced_control(get_unaligned_le16(skb->data),
849 &bt_cb(skb)->control);
850 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
851 }
852 }
853
854 static u32 __pack_extended_control(struct l2cap_ctrl *control)
855 {
856 u32 packed;
857
858 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
859 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
860
861 if (control->sframe) {
862 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
863 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
864 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
865 } else {
866 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
867 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
868 }
869
870 return packed;
871 }
872
873 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
874 {
875 u16 packed;
876
877 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
878 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
879
880 if (control->sframe) {
881 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
882 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
883 packed |= L2CAP_CTRL_FRAME_TYPE;
884 } else {
885 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
886 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
887 }
888
889 return packed;
890 }
891
892 static inline void __pack_control(struct l2cap_chan *chan,
893 struct l2cap_ctrl *control,
894 struct sk_buff *skb)
895 {
896 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
897 put_unaligned_le32(__pack_extended_control(control),
898 skb->data + L2CAP_HDR_SIZE);
899 } else {
900 put_unaligned_le16(__pack_enhanced_control(control),
901 skb->data + L2CAP_HDR_SIZE);
902 }
903 }
904
905 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
906 {
907 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
908 return L2CAP_EXT_HDR_SIZE;
909 else
910 return L2CAP_ENH_HDR_SIZE;
911 }
912
913 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
914 u32 control)
915 {
916 struct sk_buff *skb;
917 struct l2cap_hdr *lh;
918 int hlen = __ertm_hdr_size(chan);
919
920 if (chan->fcs == L2CAP_FCS_CRC16)
921 hlen += L2CAP_FCS_SIZE;
922
923 skb = bt_skb_alloc(hlen, GFP_KERNEL);
924
925 if (!skb)
926 return ERR_PTR(-ENOMEM);
927
928 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
929 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
930 lh->cid = cpu_to_le16(chan->dcid);
931
932 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
933 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
934 else
935 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
936
937 if (chan->fcs == L2CAP_FCS_CRC16) {
938 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
939 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
940 }
941
942 skb->priority = HCI_PRIO_MAX;
943 return skb;
944 }
945
946 static void l2cap_send_sframe(struct l2cap_chan *chan,
947 struct l2cap_ctrl *control)
948 {
949 struct sk_buff *skb;
950 u32 control_field;
951
952 BT_DBG("chan %p, control %p", chan, control);
953
954 if (!control->sframe)
955 return;
956
957 if (__chan_is_moving(chan))
958 return;
959
960 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
961 !control->poll)
962 control->final = 1;
963
964 if (control->super == L2CAP_SUPER_RR)
965 clear_bit(CONN_RNR_SENT, &chan->conn_state);
966 else if (control->super == L2CAP_SUPER_RNR)
967 set_bit(CONN_RNR_SENT, &chan->conn_state);
968
969 if (control->super != L2CAP_SUPER_SREJ) {
970 chan->last_acked_seq = control->reqseq;
971 __clear_ack_timer(chan);
972 }
973
974 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
975 control->final, control->poll, control->super);
976
977 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
978 control_field = __pack_extended_control(control);
979 else
980 control_field = __pack_enhanced_control(control);
981
982 skb = l2cap_create_sframe_pdu(chan, control_field);
983 if (!IS_ERR(skb))
984 l2cap_do_send(chan, skb);
985 }
986
987 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
988 {
989 struct l2cap_ctrl control;
990
991 BT_DBG("chan %p, poll %d", chan, poll);
992
993 memset(&control, 0, sizeof(control));
994 control.sframe = 1;
995 control.poll = poll;
996
997 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
998 control.super = L2CAP_SUPER_RNR;
999 else
1000 control.super = L2CAP_SUPER_RR;
1001
1002 control.reqseq = chan->buffer_seq;
1003 l2cap_send_sframe(chan, &control);
1004 }
1005
1006 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1007 {
1008 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1009 }
1010
1011 static bool __amp_capable(struct l2cap_chan *chan)
1012 {
1013 struct l2cap_conn *conn = chan->conn;
1014
1015 if (enable_hs &&
1016 hci_amp_capable() &&
1017 chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
1018 conn->fixed_chan_mask & L2CAP_FC_A2MP)
1019 return true;
1020 else
1021 return false;
1022 }
1023
1024 static bool l2cap_check_efs(struct l2cap_chan *chan)
1025 {
1026 /* Check EFS parameters */
1027 return true;
1028 }
1029
1030 void l2cap_send_conn_req(struct l2cap_chan *chan)
1031 {
1032 struct l2cap_conn *conn = chan->conn;
1033 struct l2cap_conn_req req;
1034
1035 req.scid = cpu_to_le16(chan->scid);
1036 req.psm = chan->psm;
1037
1038 chan->ident = l2cap_get_ident(conn);
1039
1040 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1041
1042 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1043 }
1044
1045 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1046 {
1047 struct l2cap_create_chan_req req;
1048 req.scid = cpu_to_le16(chan->scid);
1049 req.psm = chan->psm;
1050 req.amp_id = amp_id;
1051
1052 chan->ident = l2cap_get_ident(chan->conn);
1053
1054 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1055 sizeof(req), &req);
1056 }
1057
1058 static void l2cap_move_setup(struct l2cap_chan *chan)
1059 {
1060 struct sk_buff *skb;
1061
1062 BT_DBG("chan %p", chan);
1063
1064 if (chan->mode != L2CAP_MODE_ERTM)
1065 return;
1066
1067 __clear_retrans_timer(chan);
1068 __clear_monitor_timer(chan);
1069 __clear_ack_timer(chan);
1070
1071 chan->retry_count = 0;
1072 skb_queue_walk(&chan->tx_q, skb) {
1073 if (bt_cb(skb)->control.retries)
1074 bt_cb(skb)->control.retries = 1;
1075 else
1076 break;
1077 }
1078
1079 chan->expected_tx_seq = chan->buffer_seq;
1080
1081 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1082 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1083 l2cap_seq_list_clear(&chan->retrans_list);
1084 l2cap_seq_list_clear(&chan->srej_list);
1085 skb_queue_purge(&chan->srej_q);
1086
1087 chan->tx_state = L2CAP_TX_STATE_XMIT;
1088 chan->rx_state = L2CAP_RX_STATE_MOVE;
1089
1090 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1091 }
1092
1093 static void l2cap_move_done(struct l2cap_chan *chan)
1094 {
1095 u8 move_role = chan->move_role;
1096 BT_DBG("chan %p", chan);
1097
1098 chan->move_state = L2CAP_MOVE_STABLE;
1099 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1100
1101 if (chan->mode != L2CAP_MODE_ERTM)
1102 return;
1103
1104 switch (move_role) {
1105 case L2CAP_MOVE_ROLE_INITIATOR:
1106 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1107 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1108 break;
1109 case L2CAP_MOVE_ROLE_RESPONDER:
1110 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1111 break;
1112 }
1113 }
1114
1115 static void l2cap_chan_ready(struct l2cap_chan *chan)
1116 {
1117 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1118 chan->conf_state = 0;
1119 __clear_chan_timer(chan);
1120
1121 chan->state = BT_CONNECTED;
1122
1123 chan->ops->ready(chan);
1124 }
1125
1126 static void l2cap_start_connection(struct l2cap_chan *chan)
1127 {
1128 if (__amp_capable(chan)) {
1129 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1130 a2mp_discover_amp(chan);
1131 } else {
1132 l2cap_send_conn_req(chan);
1133 }
1134 }
1135
1136 static void l2cap_do_start(struct l2cap_chan *chan)
1137 {
1138 struct l2cap_conn *conn = chan->conn;
1139
1140 if (conn->hcon->type == LE_LINK) {
1141 l2cap_chan_ready(chan);
1142 return;
1143 }
1144
1145 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1146 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1147 return;
1148
1149 if (l2cap_chan_check_security(chan) &&
1150 __l2cap_no_conn_pending(chan)) {
1151 l2cap_start_connection(chan);
1152 }
1153 } else {
1154 struct l2cap_info_req req;
1155 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1156
1157 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1158 conn->info_ident = l2cap_get_ident(conn);
1159
1160 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1161
1162 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1163 sizeof(req), &req);
1164 }
1165 }
1166
1167 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1168 {
1169 u32 local_feat_mask = l2cap_feat_mask;
1170 if (!disable_ertm)
1171 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1172
1173 switch (mode) {
1174 case L2CAP_MODE_ERTM:
1175 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1176 case L2CAP_MODE_STREAMING:
1177 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1178 default:
1179 return 0x00;
1180 }
1181 }
1182
1183 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1184 {
1185 struct sock *sk = chan->sk;
1186 struct l2cap_conn *conn = chan->conn;
1187 struct l2cap_disconn_req req;
1188
1189 if (!conn)
1190 return;
1191
1192 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1193 __clear_retrans_timer(chan);
1194 __clear_monitor_timer(chan);
1195 __clear_ack_timer(chan);
1196 }
1197
1198 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1199 l2cap_state_change(chan, BT_DISCONN);
1200 return;
1201 }
1202
1203 req.dcid = cpu_to_le16(chan->dcid);
1204 req.scid = cpu_to_le16(chan->scid);
1205 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1206 sizeof(req), &req);
1207
1208 lock_sock(sk);
1209 __l2cap_state_change(chan, BT_DISCONN);
1210 __l2cap_chan_set_err(chan, err);
1211 release_sock(sk);
1212 }
1213
1214 /* ---- L2CAP connections ---- */
1215 static void l2cap_conn_start(struct l2cap_conn *conn)
1216 {
1217 struct l2cap_chan *chan, *tmp;
1218
1219 BT_DBG("conn %p", conn);
1220
1221 mutex_lock(&conn->chan_lock);
1222
1223 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1224 struct sock *sk = chan->sk;
1225
1226 l2cap_chan_lock(chan);
1227
1228 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1229 l2cap_chan_unlock(chan);
1230 continue;
1231 }
1232
1233 if (chan->state == BT_CONNECT) {
1234 if (!l2cap_chan_check_security(chan) ||
1235 !__l2cap_no_conn_pending(chan)) {
1236 l2cap_chan_unlock(chan);
1237 continue;
1238 }
1239
1240 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1241 && test_bit(CONF_STATE2_DEVICE,
1242 &chan->conf_state)) {
1243 l2cap_chan_close(chan, ECONNRESET);
1244 l2cap_chan_unlock(chan);
1245 continue;
1246 }
1247
1248 l2cap_start_connection(chan);
1249
1250 } else if (chan->state == BT_CONNECT2) {
1251 struct l2cap_conn_rsp rsp;
1252 char buf[128];
1253 rsp.scid = cpu_to_le16(chan->dcid);
1254 rsp.dcid = cpu_to_le16(chan->scid);
1255
1256 if (l2cap_chan_check_security(chan)) {
1257 lock_sock(sk);
1258 if (test_bit(BT_SK_DEFER_SETUP,
1259 &bt_sk(sk)->flags)) {
1260 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1261 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1262 chan->ops->defer(chan);
1263
1264 } else {
1265 __l2cap_state_change(chan, BT_CONFIG);
1266 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1267 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1268 }
1269 release_sock(sk);
1270 } else {
1271 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1272 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1273 }
1274
1275 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1276 sizeof(rsp), &rsp);
1277
1278 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1279 rsp.result != L2CAP_CR_SUCCESS) {
1280 l2cap_chan_unlock(chan);
1281 continue;
1282 }
1283
1284 set_bit(CONF_REQ_SENT, &chan->conf_state);
1285 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1286 l2cap_build_conf_req(chan, buf), buf);
1287 chan->num_conf_req++;
1288 }
1289
1290 l2cap_chan_unlock(chan);
1291 }
1292
1293 mutex_unlock(&conn->chan_lock);
1294 }
1295
1296 /* Find socket with cid and source/destination bdaddr.
1297 * Returns closest match, locked.
1298 */
1299 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1300 bdaddr_t *src,
1301 bdaddr_t *dst)
1302 {
1303 struct l2cap_chan *c, *c1 = NULL;
1304
1305 read_lock(&chan_list_lock);
1306
1307 list_for_each_entry(c, &chan_list, global_l) {
1308 struct sock *sk = c->sk;
1309
1310 if (state && c->state != state)
1311 continue;
1312
1313 if (c->scid == cid) {
1314 int src_match, dst_match;
1315 int src_any, dst_any;
1316
1317 /* Exact match. */
1318 src_match = !bacmp(&bt_sk(sk)->src, src);
1319 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1320 if (src_match && dst_match) {
1321 read_unlock(&chan_list_lock);
1322 return c;
1323 }
1324
1325 /* Closest match */
1326 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1327 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1328 if ((src_match && dst_any) || (src_any && dst_match) ||
1329 (src_any && dst_any))
1330 c1 = c;
1331 }
1332 }
1333
1334 read_unlock(&chan_list_lock);
1335
1336 return c1;
1337 }
1338
1339 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1340 {
1341 struct sock *parent, *sk;
1342 struct l2cap_chan *chan, *pchan;
1343
1344 BT_DBG("");
1345
1346 /* Check if we have socket listening on cid */
1347 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1348 conn->src, conn->dst);
1349 if (!pchan)
1350 return;
1351
1352 parent = pchan->sk;
1353
1354 lock_sock(parent);
1355
1356 chan = pchan->ops->new_connection(pchan);
1357 if (!chan)
1358 goto clean;
1359
1360 sk = chan->sk;
1361
1362 hci_conn_hold(conn->hcon);
1363 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
1364
1365 bacpy(&bt_sk(sk)->src, conn->src);
1366 bacpy(&bt_sk(sk)->dst, conn->dst);
1367
1368 l2cap_chan_add(conn, chan);
1369
1370 l2cap_chan_ready(chan);
1371
1372 clean:
1373 release_sock(parent);
1374 }
1375
1376 static void l2cap_conn_ready(struct l2cap_conn *conn)
1377 {
1378 struct l2cap_chan *chan;
1379 struct hci_conn *hcon = conn->hcon;
1380
1381 BT_DBG("conn %p", conn);
1382
1383 if (!hcon->out && hcon->type == LE_LINK)
1384 l2cap_le_conn_ready(conn);
1385
1386 if (hcon->out && hcon->type == LE_LINK)
1387 smp_conn_security(hcon, hcon->pending_sec_level);
1388
1389 mutex_lock(&conn->chan_lock);
1390
1391 list_for_each_entry(chan, &conn->chan_l, list) {
1392
1393 l2cap_chan_lock(chan);
1394
1395 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1396 l2cap_chan_unlock(chan);
1397 continue;
1398 }
1399
1400 if (hcon->type == LE_LINK) {
1401 if (smp_conn_security(hcon, chan->sec_level))
1402 l2cap_chan_ready(chan);
1403
1404 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1405 struct sock *sk = chan->sk;
1406 __clear_chan_timer(chan);
1407 lock_sock(sk);
1408 __l2cap_state_change(chan, BT_CONNECTED);
1409 sk->sk_state_change(sk);
1410 release_sock(sk);
1411
1412 } else if (chan->state == BT_CONNECT)
1413 l2cap_do_start(chan);
1414
1415 l2cap_chan_unlock(chan);
1416 }
1417
1418 mutex_unlock(&conn->chan_lock);
1419 }
1420
1421 /* Notify sockets that we cannot guaranty reliability anymore */
1422 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1423 {
1424 struct l2cap_chan *chan;
1425
1426 BT_DBG("conn %p", conn);
1427
1428 mutex_lock(&conn->chan_lock);
1429
1430 list_for_each_entry(chan, &conn->chan_l, list) {
1431 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1432 l2cap_chan_set_err(chan, err);
1433 }
1434
1435 mutex_unlock(&conn->chan_lock);
1436 }
1437
1438 static void l2cap_info_timeout(struct work_struct *work)
1439 {
1440 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1441 info_timer.work);
1442
1443 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1444 conn->info_ident = 0;
1445
1446 l2cap_conn_start(conn);
1447 }
1448
1449 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1450 {
1451 struct l2cap_conn *conn = hcon->l2cap_data;
1452 struct l2cap_chan *chan, *l;
1453
1454 if (!conn)
1455 return;
1456
1457 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1458
1459 kfree_skb(conn->rx_skb);
1460
1461 mutex_lock(&conn->chan_lock);
1462
1463 /* Kill channels */
1464 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1465 l2cap_chan_hold(chan);
1466 l2cap_chan_lock(chan);
1467
1468 l2cap_chan_del(chan, err);
1469
1470 l2cap_chan_unlock(chan);
1471
1472 chan->ops->close(chan);
1473 l2cap_chan_put(chan);
1474 }
1475
1476 mutex_unlock(&conn->chan_lock);
1477
1478 hci_chan_del(conn->hchan);
1479
1480 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1481 cancel_delayed_work_sync(&conn->info_timer);
1482
1483 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1484 cancel_delayed_work_sync(&conn->security_timer);
1485 smp_chan_destroy(conn);
1486 }
1487
1488 hcon->l2cap_data = NULL;
1489 kfree(conn);
1490 }
1491
1492 static void security_timeout(struct work_struct *work)
1493 {
1494 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1495 security_timer.work);
1496
1497 BT_DBG("conn %p", conn);
1498
1499 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1500 smp_chan_destroy(conn);
1501 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1502 }
1503 }
1504
1505 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1506 {
1507 struct l2cap_conn *conn = hcon->l2cap_data;
1508 struct hci_chan *hchan;
1509
1510 if (conn || status)
1511 return conn;
1512
1513 hchan = hci_chan_create(hcon);
1514 if (!hchan)
1515 return NULL;
1516
1517 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1518 if (!conn) {
1519 hci_chan_del(hchan);
1520 return NULL;
1521 }
1522
1523 hcon->l2cap_data = conn;
1524 conn->hcon = hcon;
1525 conn->hchan = hchan;
1526
1527 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1528
1529 switch (hcon->type) {
1530 case AMP_LINK:
1531 conn->mtu = hcon->hdev->block_mtu;
1532 break;
1533
1534 case LE_LINK:
1535 if (hcon->hdev->le_mtu) {
1536 conn->mtu = hcon->hdev->le_mtu;
1537 break;
1538 }
1539 /* fall through */
1540
1541 default:
1542 conn->mtu = hcon->hdev->acl_mtu;
1543 break;
1544 }
1545
1546 conn->src = &hcon->hdev->bdaddr;
1547 conn->dst = &hcon->dst;
1548
1549 conn->feat_mask = 0;
1550
1551 spin_lock_init(&conn->lock);
1552 mutex_init(&conn->chan_lock);
1553
1554 INIT_LIST_HEAD(&conn->chan_l);
1555
1556 if (hcon->type == LE_LINK)
1557 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1558 else
1559 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1560
1561 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1562
1563 return conn;
1564 }
1565
1566 /* ---- Socket interface ---- */
1567
1568 /* Find socket with psm and source / destination bdaddr.
1569 * Returns closest match.
1570 */
1571 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1572 bdaddr_t *src,
1573 bdaddr_t *dst)
1574 {
1575 struct l2cap_chan *c, *c1 = NULL;
1576
1577 read_lock(&chan_list_lock);
1578
1579 list_for_each_entry(c, &chan_list, global_l) {
1580 struct sock *sk = c->sk;
1581
1582 if (state && c->state != state)
1583 continue;
1584
1585 if (c->psm == psm) {
1586 int src_match, dst_match;
1587 int src_any, dst_any;
1588
1589 /* Exact match. */
1590 src_match = !bacmp(&bt_sk(sk)->src, src);
1591 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1592 if (src_match && dst_match) {
1593 read_unlock(&chan_list_lock);
1594 return c;
1595 }
1596
1597 /* Closest match */
1598 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1599 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1600 if ((src_match && dst_any) || (src_any && dst_match) ||
1601 (src_any && dst_any))
1602 c1 = c;
1603 }
1604 }
1605
1606 read_unlock(&chan_list_lock);
1607
1608 return c1;
1609 }
1610
1611 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1612 bdaddr_t *dst, u8 dst_type)
1613 {
1614 struct sock *sk = chan->sk;
1615 bdaddr_t *src = &bt_sk(sk)->src;
1616 struct l2cap_conn *conn;
1617 struct hci_conn *hcon;
1618 struct hci_dev *hdev;
1619 __u8 auth_type;
1620 int err;
1621
1622 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1623 dst_type, __le16_to_cpu(psm));
1624
1625 hdev = hci_get_route(dst, src);
1626 if (!hdev)
1627 return -EHOSTUNREACH;
1628
1629 hci_dev_lock(hdev);
1630
1631 l2cap_chan_lock(chan);
1632
1633 /* PSM must be odd and lsb of upper byte must be 0 */
1634 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1635 chan->chan_type != L2CAP_CHAN_RAW) {
1636 err = -EINVAL;
1637 goto done;
1638 }
1639
1640 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1641 err = -EINVAL;
1642 goto done;
1643 }
1644
1645 switch (chan->mode) {
1646 case L2CAP_MODE_BASIC:
1647 break;
1648 case L2CAP_MODE_ERTM:
1649 case L2CAP_MODE_STREAMING:
1650 if (!disable_ertm)
1651 break;
1652 /* fall through */
1653 default:
1654 err = -ENOTSUPP;
1655 goto done;
1656 }
1657
1658 switch (chan->state) {
1659 case BT_CONNECT:
1660 case BT_CONNECT2:
1661 case BT_CONFIG:
1662 /* Already connecting */
1663 err = 0;
1664 goto done;
1665
1666 case BT_CONNECTED:
1667 /* Already connected */
1668 err = -EISCONN;
1669 goto done;
1670
1671 case BT_OPEN:
1672 case BT_BOUND:
1673 /* Can connect */
1674 break;
1675
1676 default:
1677 err = -EBADFD;
1678 goto done;
1679 }
1680
1681 /* Set destination address and psm */
1682 lock_sock(sk);
1683 bacpy(&bt_sk(sk)->dst, dst);
1684 release_sock(sk);
1685
1686 chan->psm = psm;
1687 chan->dcid = cid;
1688
1689 auth_type = l2cap_get_auth_type(chan);
1690
1691 if (chan->dcid == L2CAP_CID_LE_DATA)
1692 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1693 chan->sec_level, auth_type);
1694 else
1695 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1696 chan->sec_level, auth_type);
1697
1698 if (IS_ERR(hcon)) {
1699 err = PTR_ERR(hcon);
1700 goto done;
1701 }
1702
1703 conn = l2cap_conn_add(hcon, 0);
1704 if (!conn) {
1705 hci_conn_put(hcon);
1706 err = -ENOMEM;
1707 goto done;
1708 }
1709
1710 if (hcon->type == LE_LINK) {
1711 err = 0;
1712
1713 if (!list_empty(&conn->chan_l)) {
1714 err = -EBUSY;
1715 hci_conn_put(hcon);
1716 }
1717
1718 if (err)
1719 goto done;
1720 }
1721
1722 /* Update source addr of the socket */
1723 bacpy(src, conn->src);
1724
1725 l2cap_chan_unlock(chan);
1726 l2cap_chan_add(conn, chan);
1727 l2cap_chan_lock(chan);
1728
1729 l2cap_state_change(chan, BT_CONNECT);
1730 __set_chan_timer(chan, sk->sk_sndtimeo);
1731
1732 if (hcon->state == BT_CONNECTED) {
1733 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1734 __clear_chan_timer(chan);
1735 if (l2cap_chan_check_security(chan))
1736 l2cap_state_change(chan, BT_CONNECTED);
1737 } else
1738 l2cap_do_start(chan);
1739 }
1740
1741 err = 0;
1742
1743 done:
1744 l2cap_chan_unlock(chan);
1745 hci_dev_unlock(hdev);
1746 hci_dev_put(hdev);
1747 return err;
1748 }
1749
1750 int __l2cap_wait_ack(struct sock *sk)
1751 {
1752 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1753 DECLARE_WAITQUEUE(wait, current);
1754 int err = 0;
1755 int timeo = HZ/5;
1756
1757 add_wait_queue(sk_sleep(sk), &wait);
1758 set_current_state(TASK_INTERRUPTIBLE);
1759 while (chan->unacked_frames > 0 && chan->conn) {
1760 if (!timeo)
1761 timeo = HZ/5;
1762
1763 if (signal_pending(current)) {
1764 err = sock_intr_errno(timeo);
1765 break;
1766 }
1767
1768 release_sock(sk);
1769 timeo = schedule_timeout(timeo);
1770 lock_sock(sk);
1771 set_current_state(TASK_INTERRUPTIBLE);
1772
1773 err = sock_error(sk);
1774 if (err)
1775 break;
1776 }
1777 set_current_state(TASK_RUNNING);
1778 remove_wait_queue(sk_sleep(sk), &wait);
1779 return err;
1780 }
1781
1782 static void l2cap_monitor_timeout(struct work_struct *work)
1783 {
1784 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1785 monitor_timer.work);
1786
1787 BT_DBG("chan %p", chan);
1788
1789 l2cap_chan_lock(chan);
1790
1791 if (!chan->conn) {
1792 l2cap_chan_unlock(chan);
1793 l2cap_chan_put(chan);
1794 return;
1795 }
1796
1797 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1798
1799 l2cap_chan_unlock(chan);
1800 l2cap_chan_put(chan);
1801 }
1802
1803 static void l2cap_retrans_timeout(struct work_struct *work)
1804 {
1805 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1806 retrans_timer.work);
1807
1808 BT_DBG("chan %p", chan);
1809
1810 l2cap_chan_lock(chan);
1811
1812 if (!chan->conn) {
1813 l2cap_chan_unlock(chan);
1814 l2cap_chan_put(chan);
1815 return;
1816 }
1817
1818 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1819 l2cap_chan_unlock(chan);
1820 l2cap_chan_put(chan);
1821 }
1822
1823 static void l2cap_streaming_send(struct l2cap_chan *chan,
1824 struct sk_buff_head *skbs)
1825 {
1826 struct sk_buff *skb;
1827 struct l2cap_ctrl *control;
1828
1829 BT_DBG("chan %p, skbs %p", chan, skbs);
1830
1831 if (__chan_is_moving(chan))
1832 return;
1833
1834 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1835
1836 while (!skb_queue_empty(&chan->tx_q)) {
1837
1838 skb = skb_dequeue(&chan->tx_q);
1839
1840 bt_cb(skb)->control.retries = 1;
1841 control = &bt_cb(skb)->control;
1842
1843 control->reqseq = 0;
1844 control->txseq = chan->next_tx_seq;
1845
1846 __pack_control(chan, control, skb);
1847
1848 if (chan->fcs == L2CAP_FCS_CRC16) {
1849 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1850 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1851 }
1852
1853 l2cap_do_send(chan, skb);
1854
1855 BT_DBG("Sent txseq %u", control->txseq);
1856
1857 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1858 chan->frames_sent++;
1859 }
1860 }
1861
1862 static int l2cap_ertm_send(struct l2cap_chan *chan)
1863 {
1864 struct sk_buff *skb, *tx_skb;
1865 struct l2cap_ctrl *control;
1866 int sent = 0;
1867
1868 BT_DBG("chan %p", chan);
1869
1870 if (chan->state != BT_CONNECTED)
1871 return -ENOTCONN;
1872
1873 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1874 return 0;
1875
1876 if (__chan_is_moving(chan))
1877 return 0;
1878
1879 while (chan->tx_send_head &&
1880 chan->unacked_frames < chan->remote_tx_win &&
1881 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1882
1883 skb = chan->tx_send_head;
1884
1885 bt_cb(skb)->control.retries = 1;
1886 control = &bt_cb(skb)->control;
1887
1888 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1889 control->final = 1;
1890
1891 control->reqseq = chan->buffer_seq;
1892 chan->last_acked_seq = chan->buffer_seq;
1893 control->txseq = chan->next_tx_seq;
1894
1895 __pack_control(chan, control, skb);
1896
1897 if (chan->fcs == L2CAP_FCS_CRC16) {
1898 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1899 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1900 }
1901
1902 /* Clone after data has been modified. Data is assumed to be
1903 read-only (for locking purposes) on cloned sk_buffs.
1904 */
1905 tx_skb = skb_clone(skb, GFP_KERNEL);
1906
1907 if (!tx_skb)
1908 break;
1909
1910 __set_retrans_timer(chan);
1911
1912 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1913 chan->unacked_frames++;
1914 chan->frames_sent++;
1915 sent++;
1916
1917 if (skb_queue_is_last(&chan->tx_q, skb))
1918 chan->tx_send_head = NULL;
1919 else
1920 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1921
1922 l2cap_do_send(chan, tx_skb);
1923 BT_DBG("Sent txseq %u", control->txseq);
1924 }
1925
1926 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1927 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1928
1929 return sent;
1930 }
1931
1932 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1933 {
1934 struct l2cap_ctrl control;
1935 struct sk_buff *skb;
1936 struct sk_buff *tx_skb;
1937 u16 seq;
1938
1939 BT_DBG("chan %p", chan);
1940
1941 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1942 return;
1943
1944 if (__chan_is_moving(chan))
1945 return;
1946
1947 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1948 seq = l2cap_seq_list_pop(&chan->retrans_list);
1949
1950 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1951 if (!skb) {
1952 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1953 seq);
1954 continue;
1955 }
1956
1957 bt_cb(skb)->control.retries++;
1958 control = bt_cb(skb)->control;
1959
1960 if (chan->max_tx != 0 &&
1961 bt_cb(skb)->control.retries > chan->max_tx) {
1962 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1963 l2cap_send_disconn_req(chan, ECONNRESET);
1964 l2cap_seq_list_clear(&chan->retrans_list);
1965 break;
1966 }
1967
1968 control.reqseq = chan->buffer_seq;
1969 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1970 control.final = 1;
1971 else
1972 control.final = 0;
1973
1974 if (skb_cloned(skb)) {
1975 /* Cloned sk_buffs are read-only, so we need a
1976 * writeable copy
1977 */
1978 tx_skb = skb_copy(skb, GFP_KERNEL);
1979 } else {
1980 tx_skb = skb_clone(skb, GFP_KERNEL);
1981 }
1982
1983 if (!tx_skb) {
1984 l2cap_seq_list_clear(&chan->retrans_list);
1985 break;
1986 }
1987
1988 /* Update skb contents */
1989 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1990 put_unaligned_le32(__pack_extended_control(&control),
1991 tx_skb->data + L2CAP_HDR_SIZE);
1992 } else {
1993 put_unaligned_le16(__pack_enhanced_control(&control),
1994 tx_skb->data + L2CAP_HDR_SIZE);
1995 }
1996
1997 if (chan->fcs == L2CAP_FCS_CRC16) {
1998 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1999 put_unaligned_le16(fcs, skb_put(tx_skb,
2000 L2CAP_FCS_SIZE));
2001 }
2002
2003 l2cap_do_send(chan, tx_skb);
2004
2005 BT_DBG("Resent txseq %d", control.txseq);
2006
2007 chan->last_acked_seq = chan->buffer_seq;
2008 }
2009 }
2010
2011 static void l2cap_retransmit(struct l2cap_chan *chan,
2012 struct l2cap_ctrl *control)
2013 {
2014 BT_DBG("chan %p, control %p", chan, control);
2015
2016 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2017 l2cap_ertm_resend(chan);
2018 }
2019
2020 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2021 struct l2cap_ctrl *control)
2022 {
2023 struct sk_buff *skb;
2024
2025 BT_DBG("chan %p, control %p", chan, control);
2026
2027 if (control->poll)
2028 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2029
2030 l2cap_seq_list_clear(&chan->retrans_list);
2031
2032 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2033 return;
2034
2035 if (chan->unacked_frames) {
2036 skb_queue_walk(&chan->tx_q, skb) {
2037 if (bt_cb(skb)->control.txseq == control->reqseq ||
2038 skb == chan->tx_send_head)
2039 break;
2040 }
2041
2042 skb_queue_walk_from(&chan->tx_q, skb) {
2043 if (skb == chan->tx_send_head)
2044 break;
2045
2046 l2cap_seq_list_append(&chan->retrans_list,
2047 bt_cb(skb)->control.txseq);
2048 }
2049
2050 l2cap_ertm_resend(chan);
2051 }
2052 }
2053
2054 static void l2cap_send_ack(struct l2cap_chan *chan)
2055 {
2056 struct l2cap_ctrl control;
2057 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2058 chan->last_acked_seq);
2059 int threshold;
2060
2061 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2062 chan, chan->last_acked_seq, chan->buffer_seq);
2063
2064 memset(&control, 0, sizeof(control));
2065 control.sframe = 1;
2066
2067 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2068 chan->rx_state == L2CAP_RX_STATE_RECV) {
2069 __clear_ack_timer(chan);
2070 control.super = L2CAP_SUPER_RNR;
2071 control.reqseq = chan->buffer_seq;
2072 l2cap_send_sframe(chan, &control);
2073 } else {
2074 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2075 l2cap_ertm_send(chan);
2076 /* If any i-frames were sent, they included an ack */
2077 if (chan->buffer_seq == chan->last_acked_seq)
2078 frames_to_ack = 0;
2079 }
2080
2081 /* Ack now if the window is 3/4ths full.
2082 * Calculate without mul or div
2083 */
2084 threshold = chan->ack_win;
2085 threshold += threshold << 1;
2086 threshold >>= 2;
2087
2088 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2089 threshold);
2090
2091 if (frames_to_ack >= threshold) {
2092 __clear_ack_timer(chan);
2093 control.super = L2CAP_SUPER_RR;
2094 control.reqseq = chan->buffer_seq;
2095 l2cap_send_sframe(chan, &control);
2096 frames_to_ack = 0;
2097 }
2098
2099 if (frames_to_ack)
2100 __set_ack_timer(chan);
2101 }
2102 }
2103
2104 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2105 struct msghdr *msg, int len,
2106 int count, struct sk_buff *skb)
2107 {
2108 struct l2cap_conn *conn = chan->conn;
2109 struct sk_buff **frag;
2110 int sent = 0;
2111
2112 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2113 return -EFAULT;
2114
2115 sent += count;
2116 len -= count;
2117
2118 /* Continuation fragments (no L2CAP header) */
2119 frag = &skb_shinfo(skb)->frag_list;
2120 while (len) {
2121 struct sk_buff *tmp;
2122
2123 count = min_t(unsigned int, conn->mtu, len);
2124
2125 tmp = chan->ops->alloc_skb(chan, count,
2126 msg->msg_flags & MSG_DONTWAIT);
2127 if (IS_ERR(tmp))
2128 return PTR_ERR(tmp);
2129
2130 *frag = tmp;
2131
2132 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2133 return -EFAULT;
2134
2135 (*frag)->priority = skb->priority;
2136
2137 sent += count;
2138 len -= count;
2139
2140 skb->len += (*frag)->len;
2141 skb->data_len += (*frag)->len;
2142
2143 frag = &(*frag)->next;
2144 }
2145
2146 return sent;
2147 }
2148
2149 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2150 struct msghdr *msg, size_t len,
2151 u32 priority)
2152 {
2153 struct l2cap_conn *conn = chan->conn;
2154 struct sk_buff *skb;
2155 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2156 struct l2cap_hdr *lh;
2157
2158 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
2159
2160 count = min_t(unsigned int, (conn->mtu - hlen), len);
2161
2162 skb = chan->ops->alloc_skb(chan, count + hlen,
2163 msg->msg_flags & MSG_DONTWAIT);
2164 if (IS_ERR(skb))
2165 return skb;
2166
2167 skb->priority = priority;
2168
2169 /* Create L2CAP header */
2170 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2171 lh->cid = cpu_to_le16(chan->dcid);
2172 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2173 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2174
2175 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2176 if (unlikely(err < 0)) {
2177 kfree_skb(skb);
2178 return ERR_PTR(err);
2179 }
2180 return skb;
2181 }
2182
2183 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2184 struct msghdr *msg, size_t len,
2185 u32 priority)
2186 {
2187 struct l2cap_conn *conn = chan->conn;
2188 struct sk_buff *skb;
2189 int err, count;
2190 struct l2cap_hdr *lh;
2191
2192 BT_DBG("chan %p len %zu", chan, len);
2193
2194 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2195
2196 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2197 msg->msg_flags & MSG_DONTWAIT);
2198 if (IS_ERR(skb))
2199 return skb;
2200
2201 skb->priority = priority;
2202
2203 /* Create L2CAP header */
2204 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2205 lh->cid = cpu_to_le16(chan->dcid);
2206 lh->len = cpu_to_le16(len);
2207
2208 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2209 if (unlikely(err < 0)) {
2210 kfree_skb(skb);
2211 return ERR_PTR(err);
2212 }
2213 return skb;
2214 }
2215
2216 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2217 struct msghdr *msg, size_t len,
2218 u16 sdulen)
2219 {
2220 struct l2cap_conn *conn = chan->conn;
2221 struct sk_buff *skb;
2222 int err, count, hlen;
2223 struct l2cap_hdr *lh;
2224
2225 BT_DBG("chan %p len %zu", chan, len);
2226
2227 if (!conn)
2228 return ERR_PTR(-ENOTCONN);
2229
2230 hlen = __ertm_hdr_size(chan);
2231
2232 if (sdulen)
2233 hlen += L2CAP_SDULEN_SIZE;
2234
2235 if (chan->fcs == L2CAP_FCS_CRC16)
2236 hlen += L2CAP_FCS_SIZE;
2237
2238 count = min_t(unsigned int, (conn->mtu - hlen), len);
2239
2240 skb = chan->ops->alloc_skb(chan, count + hlen,
2241 msg->msg_flags & MSG_DONTWAIT);
2242 if (IS_ERR(skb))
2243 return skb;
2244
2245 /* Create L2CAP header */
2246 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2247 lh->cid = cpu_to_le16(chan->dcid);
2248 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2249
2250 /* Control header is populated later */
2251 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2252 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2253 else
2254 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2255
2256 if (sdulen)
2257 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2258
2259 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2260 if (unlikely(err < 0)) {
2261 kfree_skb(skb);
2262 return ERR_PTR(err);
2263 }
2264
2265 bt_cb(skb)->control.fcs = chan->fcs;
2266 bt_cb(skb)->control.retries = 0;
2267 return skb;
2268 }
2269
2270 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2271 struct sk_buff_head *seg_queue,
2272 struct msghdr *msg, size_t len)
2273 {
2274 struct sk_buff *skb;
2275 u16 sdu_len;
2276 size_t pdu_len;
2277 u8 sar;
2278
2279 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2280
2281 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2282 * so fragmented skbs are not used. The HCI layer's handling
2283 * of fragmented skbs is not compatible with ERTM's queueing.
2284 */
2285
2286 /* PDU size is derived from the HCI MTU */
2287 pdu_len = chan->conn->mtu;
2288
2289 /* Constrain PDU size for BR/EDR connections */
2290 if (!chan->hs_hcon)
2291 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2292
2293 /* Adjust for largest possible L2CAP overhead. */
2294 if (chan->fcs)
2295 pdu_len -= L2CAP_FCS_SIZE;
2296
2297 pdu_len -= __ertm_hdr_size(chan);
2298
2299 /* Remote device may have requested smaller PDUs */
2300 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2301
2302 if (len <= pdu_len) {
2303 sar = L2CAP_SAR_UNSEGMENTED;
2304 sdu_len = 0;
2305 pdu_len = len;
2306 } else {
2307 sar = L2CAP_SAR_START;
2308 sdu_len = len;
2309 pdu_len -= L2CAP_SDULEN_SIZE;
2310 }
2311
2312 while (len > 0) {
2313 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2314
2315 if (IS_ERR(skb)) {
2316 __skb_queue_purge(seg_queue);
2317 return PTR_ERR(skb);
2318 }
2319
2320 bt_cb(skb)->control.sar = sar;
2321 __skb_queue_tail(seg_queue, skb);
2322
2323 len -= pdu_len;
2324 if (sdu_len) {
2325 sdu_len = 0;
2326 pdu_len += L2CAP_SDULEN_SIZE;
2327 }
2328
2329 if (len <= pdu_len) {
2330 sar = L2CAP_SAR_END;
2331 pdu_len = len;
2332 } else {
2333 sar = L2CAP_SAR_CONTINUE;
2334 }
2335 }
2336
2337 return 0;
2338 }
2339
2340 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2341 u32 priority)
2342 {
2343 struct sk_buff *skb;
2344 int err;
2345 struct sk_buff_head seg_queue;
2346
2347 /* Connectionless channel */
2348 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2349 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2350 if (IS_ERR(skb))
2351 return PTR_ERR(skb);
2352
2353 l2cap_do_send(chan, skb);
2354 return len;
2355 }
2356
2357 switch (chan->mode) {
2358 case L2CAP_MODE_BASIC:
2359 /* Check outgoing MTU */
2360 if (len > chan->omtu)
2361 return -EMSGSIZE;
2362
2363 /* Create a basic PDU */
2364 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2365 if (IS_ERR(skb))
2366 return PTR_ERR(skb);
2367
2368 l2cap_do_send(chan, skb);
2369 err = len;
2370 break;
2371
2372 case L2CAP_MODE_ERTM:
2373 case L2CAP_MODE_STREAMING:
2374 /* Check outgoing MTU */
2375 if (len > chan->omtu) {
2376 err = -EMSGSIZE;
2377 break;
2378 }
2379
2380 __skb_queue_head_init(&seg_queue);
2381
2382 /* Do segmentation before calling in to the state machine,
2383 * since it's possible to block while waiting for memory
2384 * allocation.
2385 */
2386 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2387
2388 /* The channel could have been closed while segmenting,
2389 * check that it is still connected.
2390 */
2391 if (chan->state != BT_CONNECTED) {
2392 __skb_queue_purge(&seg_queue);
2393 err = -ENOTCONN;
2394 }
2395
2396 if (err)
2397 break;
2398
2399 if (chan->mode == L2CAP_MODE_ERTM)
2400 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2401 else
2402 l2cap_streaming_send(chan, &seg_queue);
2403
2404 err = len;
2405
2406 /* If the skbs were not queued for sending, they'll still be in
2407 * seg_queue and need to be purged.
2408 */
2409 __skb_queue_purge(&seg_queue);
2410 break;
2411
2412 default:
2413 BT_DBG("bad state %1.1x", chan->mode);
2414 err = -EBADFD;
2415 }
2416
2417 return err;
2418 }
2419
2420 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2421 {
2422 struct l2cap_ctrl control;
2423 u16 seq;
2424
2425 BT_DBG("chan %p, txseq %u", chan, txseq);
2426
2427 memset(&control, 0, sizeof(control));
2428 control.sframe = 1;
2429 control.super = L2CAP_SUPER_SREJ;
2430
2431 for (seq = chan->expected_tx_seq; seq != txseq;
2432 seq = __next_seq(chan, seq)) {
2433 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2434 control.reqseq = seq;
2435 l2cap_send_sframe(chan, &control);
2436 l2cap_seq_list_append(&chan->srej_list, seq);
2437 }
2438 }
2439
2440 chan->expected_tx_seq = __next_seq(chan, txseq);
2441 }
2442
2443 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2444 {
2445 struct l2cap_ctrl control;
2446
2447 BT_DBG("chan %p", chan);
2448
2449 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2450 return;
2451
2452 memset(&control, 0, sizeof(control));
2453 control.sframe = 1;
2454 control.super = L2CAP_SUPER_SREJ;
2455 control.reqseq = chan->srej_list.tail;
2456 l2cap_send_sframe(chan, &control);
2457 }
2458
2459 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2460 {
2461 struct l2cap_ctrl control;
2462 u16 initial_head;
2463 u16 seq;
2464
2465 BT_DBG("chan %p, txseq %u", chan, txseq);
2466
2467 memset(&control, 0, sizeof(control));
2468 control.sframe = 1;
2469 control.super = L2CAP_SUPER_SREJ;
2470
2471 /* Capture initial list head to allow only one pass through the list. */
2472 initial_head = chan->srej_list.head;
2473
2474 do {
2475 seq = l2cap_seq_list_pop(&chan->srej_list);
2476 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2477 break;
2478
2479 control.reqseq = seq;
2480 l2cap_send_sframe(chan, &control);
2481 l2cap_seq_list_append(&chan->srej_list, seq);
2482 } while (chan->srej_list.head != initial_head);
2483 }
2484
2485 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2486 {
2487 struct sk_buff *acked_skb;
2488 u16 ackseq;
2489
2490 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2491
2492 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2493 return;
2494
2495 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2496 chan->expected_ack_seq, chan->unacked_frames);
2497
2498 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2499 ackseq = __next_seq(chan, ackseq)) {
2500
2501 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2502 if (acked_skb) {
2503 skb_unlink(acked_skb, &chan->tx_q);
2504 kfree_skb(acked_skb);
2505 chan->unacked_frames--;
2506 }
2507 }
2508
2509 chan->expected_ack_seq = reqseq;
2510
2511 if (chan->unacked_frames == 0)
2512 __clear_retrans_timer(chan);
2513
2514 BT_DBG("unacked_frames %u", chan->unacked_frames);
2515 }
2516
2517 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2518 {
2519 BT_DBG("chan %p", chan);
2520
2521 chan->expected_tx_seq = chan->buffer_seq;
2522 l2cap_seq_list_clear(&chan->srej_list);
2523 skb_queue_purge(&chan->srej_q);
2524 chan->rx_state = L2CAP_RX_STATE_RECV;
2525 }
2526
2527 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2528 struct l2cap_ctrl *control,
2529 struct sk_buff_head *skbs, u8 event)
2530 {
2531 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2532 event);
2533
2534 switch (event) {
2535 case L2CAP_EV_DATA_REQUEST:
2536 if (chan->tx_send_head == NULL)
2537 chan->tx_send_head = skb_peek(skbs);
2538
2539 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2540 l2cap_ertm_send(chan);
2541 break;
2542 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2543 BT_DBG("Enter LOCAL_BUSY");
2544 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2545
2546 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2547 /* The SREJ_SENT state must be aborted if we are to
2548 * enter the LOCAL_BUSY state.
2549 */
2550 l2cap_abort_rx_srej_sent(chan);
2551 }
2552
2553 l2cap_send_ack(chan);
2554
2555 break;
2556 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2557 BT_DBG("Exit LOCAL_BUSY");
2558 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2559
2560 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2561 struct l2cap_ctrl local_control;
2562
2563 memset(&local_control, 0, sizeof(local_control));
2564 local_control.sframe = 1;
2565 local_control.super = L2CAP_SUPER_RR;
2566 local_control.poll = 1;
2567 local_control.reqseq = chan->buffer_seq;
2568 l2cap_send_sframe(chan, &local_control);
2569
2570 chan->retry_count = 1;
2571 __set_monitor_timer(chan);
2572 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2573 }
2574 break;
2575 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2576 l2cap_process_reqseq(chan, control->reqseq);
2577 break;
2578 case L2CAP_EV_EXPLICIT_POLL:
2579 l2cap_send_rr_or_rnr(chan, 1);
2580 chan->retry_count = 1;
2581 __set_monitor_timer(chan);
2582 __clear_ack_timer(chan);
2583 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2584 break;
2585 case L2CAP_EV_RETRANS_TO:
2586 l2cap_send_rr_or_rnr(chan, 1);
2587 chan->retry_count = 1;
2588 __set_monitor_timer(chan);
2589 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2590 break;
2591 case L2CAP_EV_RECV_FBIT:
2592 /* Nothing to process */
2593 break;
2594 default:
2595 break;
2596 }
2597 }
2598
2599 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2600 struct l2cap_ctrl *control,
2601 struct sk_buff_head *skbs, u8 event)
2602 {
2603 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2604 event);
2605
2606 switch (event) {
2607 case L2CAP_EV_DATA_REQUEST:
2608 if (chan->tx_send_head == NULL)
2609 chan->tx_send_head = skb_peek(skbs);
2610 /* Queue data, but don't send. */
2611 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2612 break;
2613 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2614 BT_DBG("Enter LOCAL_BUSY");
2615 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2616
2617 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2618 /* The SREJ_SENT state must be aborted if we are to
2619 * enter the LOCAL_BUSY state.
2620 */
2621 l2cap_abort_rx_srej_sent(chan);
2622 }
2623
2624 l2cap_send_ack(chan);
2625
2626 break;
2627 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2628 BT_DBG("Exit LOCAL_BUSY");
2629 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2630
2631 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2632 struct l2cap_ctrl local_control;
2633 memset(&local_control, 0, sizeof(local_control));
2634 local_control.sframe = 1;
2635 local_control.super = L2CAP_SUPER_RR;
2636 local_control.poll = 1;
2637 local_control.reqseq = chan->buffer_seq;
2638 l2cap_send_sframe(chan, &local_control);
2639
2640 chan->retry_count = 1;
2641 __set_monitor_timer(chan);
2642 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2643 }
2644 break;
2645 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2646 l2cap_process_reqseq(chan, control->reqseq);
2647
2648 /* Fall through */
2649
2650 case L2CAP_EV_RECV_FBIT:
2651 if (control && control->final) {
2652 __clear_monitor_timer(chan);
2653 if (chan->unacked_frames > 0)
2654 __set_retrans_timer(chan);
2655 chan->retry_count = 0;
2656 chan->tx_state = L2CAP_TX_STATE_XMIT;
2657 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2658 }
2659 break;
2660 case L2CAP_EV_EXPLICIT_POLL:
2661 /* Ignore */
2662 break;
2663 case L2CAP_EV_MONITOR_TO:
2664 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2665 l2cap_send_rr_or_rnr(chan, 1);
2666 __set_monitor_timer(chan);
2667 chan->retry_count++;
2668 } else {
2669 l2cap_send_disconn_req(chan, ECONNABORTED);
2670 }
2671 break;
2672 default:
2673 break;
2674 }
2675 }
2676
2677 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2678 struct sk_buff_head *skbs, u8 event)
2679 {
2680 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2681 chan, control, skbs, event, chan->tx_state);
2682
2683 switch (chan->tx_state) {
2684 case L2CAP_TX_STATE_XMIT:
2685 l2cap_tx_state_xmit(chan, control, skbs, event);
2686 break;
2687 case L2CAP_TX_STATE_WAIT_F:
2688 l2cap_tx_state_wait_f(chan, control, skbs, event);
2689 break;
2690 default:
2691 /* Ignore event */
2692 break;
2693 }
2694 }
2695
2696 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2697 struct l2cap_ctrl *control)
2698 {
2699 BT_DBG("chan %p, control %p", chan, control);
2700 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2701 }
2702
2703 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2704 struct l2cap_ctrl *control)
2705 {
2706 BT_DBG("chan %p, control %p", chan, control);
2707 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2708 }
2709
2710 /* Copy frame to all raw sockets on that connection */
2711 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2712 {
2713 struct sk_buff *nskb;
2714 struct l2cap_chan *chan;
2715
2716 BT_DBG("conn %p", conn);
2717
2718 mutex_lock(&conn->chan_lock);
2719
2720 list_for_each_entry(chan, &conn->chan_l, list) {
2721 struct sock *sk = chan->sk;
2722 if (chan->chan_type != L2CAP_CHAN_RAW)
2723 continue;
2724
2725 /* Don't send frame to the socket it came from */
2726 if (skb->sk == sk)
2727 continue;
2728 nskb = skb_clone(skb, GFP_KERNEL);
2729 if (!nskb)
2730 continue;
2731
2732 if (chan->ops->recv(chan, nskb))
2733 kfree_skb(nskb);
2734 }
2735
2736 mutex_unlock(&conn->chan_lock);
2737 }
2738
2739 /* ---- L2CAP signalling commands ---- */
2740 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2741 u8 ident, u16 dlen, void *data)
2742 {
2743 struct sk_buff *skb, **frag;
2744 struct l2cap_cmd_hdr *cmd;
2745 struct l2cap_hdr *lh;
2746 int len, count;
2747
2748 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2749 conn, code, ident, dlen);
2750
2751 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2752 count = min_t(unsigned int, conn->mtu, len);
2753
2754 skb = bt_skb_alloc(count, GFP_KERNEL);
2755 if (!skb)
2756 return NULL;
2757
2758 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2759 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2760
2761 if (conn->hcon->type == LE_LINK)
2762 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2763 else
2764 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2765
2766 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2767 cmd->code = code;
2768 cmd->ident = ident;
2769 cmd->len = cpu_to_le16(dlen);
2770
2771 if (dlen) {
2772 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2773 memcpy(skb_put(skb, count), data, count);
2774 data += count;
2775 }
2776
2777 len -= skb->len;
2778
2779 /* Continuation fragments (no L2CAP header) */
2780 frag = &skb_shinfo(skb)->frag_list;
2781 while (len) {
2782 count = min_t(unsigned int, conn->mtu, len);
2783
2784 *frag = bt_skb_alloc(count, GFP_KERNEL);
2785 if (!*frag)
2786 goto fail;
2787
2788 memcpy(skb_put(*frag, count), data, count);
2789
2790 len -= count;
2791 data += count;
2792
2793 frag = &(*frag)->next;
2794 }
2795
2796 return skb;
2797
2798 fail:
2799 kfree_skb(skb);
2800 return NULL;
2801 }
2802
2803 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2804 unsigned long *val)
2805 {
2806 struct l2cap_conf_opt *opt = *ptr;
2807 int len;
2808
2809 len = L2CAP_CONF_OPT_SIZE + opt->len;
2810 *ptr += len;
2811
2812 *type = opt->type;
2813 *olen = opt->len;
2814
2815 switch (opt->len) {
2816 case 1:
2817 *val = *((u8 *) opt->val);
2818 break;
2819
2820 case 2:
2821 *val = get_unaligned_le16(opt->val);
2822 break;
2823
2824 case 4:
2825 *val = get_unaligned_le32(opt->val);
2826 break;
2827
2828 default:
2829 *val = (unsigned long) opt->val;
2830 break;
2831 }
2832
2833 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2834 return len;
2835 }
2836
2837 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2838 {
2839 struct l2cap_conf_opt *opt = *ptr;
2840
2841 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2842
2843 opt->type = type;
2844 opt->len = len;
2845
2846 switch (len) {
2847 case 1:
2848 *((u8 *) opt->val) = val;
2849 break;
2850
2851 case 2:
2852 put_unaligned_le16(val, opt->val);
2853 break;
2854
2855 case 4:
2856 put_unaligned_le32(val, opt->val);
2857 break;
2858
2859 default:
2860 memcpy(opt->val, (void *) val, len);
2861 break;
2862 }
2863
2864 *ptr += L2CAP_CONF_OPT_SIZE + len;
2865 }
2866
2867 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2868 {
2869 struct l2cap_conf_efs efs;
2870
2871 switch (chan->mode) {
2872 case L2CAP_MODE_ERTM:
2873 efs.id = chan->local_id;
2874 efs.stype = chan->local_stype;
2875 efs.msdu = cpu_to_le16(chan->local_msdu);
2876 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2877 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2878 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2879 break;
2880
2881 case L2CAP_MODE_STREAMING:
2882 efs.id = 1;
2883 efs.stype = L2CAP_SERV_BESTEFFORT;
2884 efs.msdu = cpu_to_le16(chan->local_msdu);
2885 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2886 efs.acc_lat = 0;
2887 efs.flush_to = 0;
2888 break;
2889
2890 default:
2891 return;
2892 }
2893
2894 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2895 (unsigned long) &efs);
2896 }
2897
2898 static void l2cap_ack_timeout(struct work_struct *work)
2899 {
2900 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2901 ack_timer.work);
2902 u16 frames_to_ack;
2903
2904 BT_DBG("chan %p", chan);
2905
2906 l2cap_chan_lock(chan);
2907
2908 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2909 chan->last_acked_seq);
2910
2911 if (frames_to_ack)
2912 l2cap_send_rr_or_rnr(chan, 0);
2913
2914 l2cap_chan_unlock(chan);
2915 l2cap_chan_put(chan);
2916 }
2917
2918 int l2cap_ertm_init(struct l2cap_chan *chan)
2919 {
2920 int err;
2921
2922 chan->next_tx_seq = 0;
2923 chan->expected_tx_seq = 0;
2924 chan->expected_ack_seq = 0;
2925 chan->unacked_frames = 0;
2926 chan->buffer_seq = 0;
2927 chan->frames_sent = 0;
2928 chan->last_acked_seq = 0;
2929 chan->sdu = NULL;
2930 chan->sdu_last_frag = NULL;
2931 chan->sdu_len = 0;
2932
2933 skb_queue_head_init(&chan->tx_q);
2934
2935 chan->local_amp_id = 0;
2936 chan->move_id = 0;
2937 chan->move_state = L2CAP_MOVE_STABLE;
2938 chan->move_role = L2CAP_MOVE_ROLE_NONE;
2939
2940 if (chan->mode != L2CAP_MODE_ERTM)
2941 return 0;
2942
2943 chan->rx_state = L2CAP_RX_STATE_RECV;
2944 chan->tx_state = L2CAP_TX_STATE_XMIT;
2945
2946 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2947 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2948 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2949
2950 skb_queue_head_init(&chan->srej_q);
2951
2952 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2953 if (err < 0)
2954 return err;
2955
2956 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2957 if (err < 0)
2958 l2cap_seq_list_free(&chan->srej_list);
2959
2960 return err;
2961 }
2962
2963 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2964 {
2965 switch (mode) {
2966 case L2CAP_MODE_STREAMING:
2967 case L2CAP_MODE_ERTM:
2968 if (l2cap_mode_supported(mode, remote_feat_mask))
2969 return mode;
2970 /* fall through */
2971 default:
2972 return L2CAP_MODE_BASIC;
2973 }
2974 }
2975
2976 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2977 {
2978 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2979 }
2980
2981 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2982 {
2983 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2984 }
2985
2986 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
2987 struct l2cap_conf_rfc *rfc)
2988 {
2989 if (chan->local_amp_id && chan->hs_hcon) {
2990 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
2991
2992 /* Class 1 devices have must have ERTM timeouts
2993 * exceeding the Link Supervision Timeout. The
2994 * default Link Supervision Timeout for AMP
2995 * controllers is 10 seconds.
2996 *
2997 * Class 1 devices use 0xffffffff for their
2998 * best-effort flush timeout, so the clamping logic
2999 * will result in a timeout that meets the above
3000 * requirement. ERTM timeouts are 16-bit values, so
3001 * the maximum timeout is 65.535 seconds.
3002 */
3003
3004 /* Convert timeout to milliseconds and round */
3005 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3006
3007 /* This is the recommended formula for class 2 devices
3008 * that start ERTM timers when packets are sent to the
3009 * controller.
3010 */
3011 ertm_to = 3 * ertm_to + 500;
3012
3013 if (ertm_to > 0xffff)
3014 ertm_to = 0xffff;
3015
3016 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3017 rfc->monitor_timeout = rfc->retrans_timeout;
3018 } else {
3019 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3020 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3021 }
3022 }
3023
3024 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3025 {
3026 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3027 __l2cap_ews_supported(chan)) {
3028 /* use extended control field */
3029 set_bit(FLAG_EXT_CTRL, &chan->flags);
3030 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3031 } else {
3032 chan->tx_win = min_t(u16, chan->tx_win,
3033 L2CAP_DEFAULT_TX_WINDOW);
3034 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3035 }
3036 chan->ack_win = chan->tx_win;
3037 }
3038
3039 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3040 {
3041 struct l2cap_conf_req *req = data;
3042 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3043 void *ptr = req->data;
3044 u16 size;
3045
3046 BT_DBG("chan %p", chan);
3047
3048 if (chan->num_conf_req || chan->num_conf_rsp)
3049 goto done;
3050
3051 switch (chan->mode) {
3052 case L2CAP_MODE_STREAMING:
3053 case L2CAP_MODE_ERTM:
3054 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3055 break;
3056
3057 if (__l2cap_efs_supported(chan))
3058 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3059
3060 /* fall through */
3061 default:
3062 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3063 break;
3064 }
3065
3066 done:
3067 if (chan->imtu != L2CAP_DEFAULT_MTU)
3068 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3069
3070 switch (chan->mode) {
3071 case L2CAP_MODE_BASIC:
3072 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3073 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3074 break;
3075
3076 rfc.mode = L2CAP_MODE_BASIC;
3077 rfc.txwin_size = 0;
3078 rfc.max_transmit = 0;
3079 rfc.retrans_timeout = 0;
3080 rfc.monitor_timeout = 0;
3081 rfc.max_pdu_size = 0;
3082
3083 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3084 (unsigned long) &rfc);
3085 break;
3086
3087 case L2CAP_MODE_ERTM:
3088 rfc.mode = L2CAP_MODE_ERTM;
3089 rfc.max_transmit = chan->max_tx;
3090
3091 __l2cap_set_ertm_timeouts(chan, &rfc);
3092
3093 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3094 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3095 L2CAP_FCS_SIZE);
3096 rfc.max_pdu_size = cpu_to_le16(size);
3097
3098 l2cap_txwin_setup(chan);
3099
3100 rfc.txwin_size = min_t(u16, chan->tx_win,
3101 L2CAP_DEFAULT_TX_WINDOW);
3102
3103 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3104 (unsigned long) &rfc);
3105
3106 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3107 l2cap_add_opt_efs(&ptr, chan);
3108
3109 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3110 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3111 chan->tx_win);
3112
3113 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3114 if (chan->fcs == L2CAP_FCS_NONE ||
3115 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
3116 chan->fcs = L2CAP_FCS_NONE;
3117 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3118 chan->fcs);
3119 }
3120 break;
3121
3122 case L2CAP_MODE_STREAMING:
3123 l2cap_txwin_setup(chan);
3124 rfc.mode = L2CAP_MODE_STREAMING;
3125 rfc.txwin_size = 0;
3126 rfc.max_transmit = 0;
3127 rfc.retrans_timeout = 0;
3128 rfc.monitor_timeout = 0;
3129
3130 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3131 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3132 L2CAP_FCS_SIZE);
3133 rfc.max_pdu_size = cpu_to_le16(size);
3134
3135 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3136 (unsigned long) &rfc);
3137
3138 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3139 l2cap_add_opt_efs(&ptr, chan);
3140
3141 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3142 if (chan->fcs == L2CAP_FCS_NONE ||
3143 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
3144 chan->fcs = L2CAP_FCS_NONE;
3145 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3146 chan->fcs);
3147 }
3148 break;
3149 }
3150
3151 req->dcid = cpu_to_le16(chan->dcid);
3152 req->flags = __constant_cpu_to_le16(0);
3153
3154 return ptr - data;
3155 }
3156
3157 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3158 {
3159 struct l2cap_conf_rsp *rsp = data;
3160 void *ptr = rsp->data;
3161 void *req = chan->conf_req;
3162 int len = chan->conf_len;
3163 int type, hint, olen;
3164 unsigned long val;
3165 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3166 struct l2cap_conf_efs efs;
3167 u8 remote_efs = 0;
3168 u16 mtu = L2CAP_DEFAULT_MTU;
3169 u16 result = L2CAP_CONF_SUCCESS;
3170 u16 size;
3171
3172 BT_DBG("chan %p", chan);
3173
3174 while (len >= L2CAP_CONF_OPT_SIZE) {
3175 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3176
3177 hint = type & L2CAP_CONF_HINT;
3178 type &= L2CAP_CONF_MASK;
3179
3180 switch (type) {
3181 case L2CAP_CONF_MTU:
3182 mtu = val;
3183 break;
3184
3185 case L2CAP_CONF_FLUSH_TO:
3186 chan->flush_to = val;
3187 break;
3188
3189 case L2CAP_CONF_QOS:
3190 break;
3191
3192 case L2CAP_CONF_RFC:
3193 if (olen == sizeof(rfc))
3194 memcpy(&rfc, (void *) val, olen);
3195 break;
3196
3197 case L2CAP_CONF_FCS:
3198 if (val == L2CAP_FCS_NONE)
3199 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
3200 break;
3201
3202 case L2CAP_CONF_EFS:
3203 remote_efs = 1;
3204 if (olen == sizeof(efs))
3205 memcpy(&efs, (void *) val, olen);
3206 break;
3207
3208 case L2CAP_CONF_EWS:
3209 if (!enable_hs)
3210 return -ECONNREFUSED;
3211
3212 set_bit(FLAG_EXT_CTRL, &chan->flags);
3213 set_bit(CONF_EWS_RECV, &chan->conf_state);
3214 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3215 chan->remote_tx_win = val;
3216 break;
3217
3218 default:
3219 if (hint)
3220 break;
3221
3222 result = L2CAP_CONF_UNKNOWN;
3223 *((u8 *) ptr++) = type;
3224 break;
3225 }
3226 }
3227
3228 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3229 goto done;
3230
3231 switch (chan->mode) {
3232 case L2CAP_MODE_STREAMING:
3233 case L2CAP_MODE_ERTM:
3234 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3235 chan->mode = l2cap_select_mode(rfc.mode,
3236 chan->conn->feat_mask);
3237 break;
3238 }
3239
3240 if (remote_efs) {
3241 if (__l2cap_efs_supported(chan))
3242 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3243 else
3244 return -ECONNREFUSED;
3245 }
3246
3247 if (chan->mode != rfc.mode)
3248 return -ECONNREFUSED;
3249
3250 break;
3251 }
3252
3253 done:
3254 if (chan->mode != rfc.mode) {
3255 result = L2CAP_CONF_UNACCEPT;
3256 rfc.mode = chan->mode;
3257
3258 if (chan->num_conf_rsp == 1)
3259 return -ECONNREFUSED;
3260
3261 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3262 (unsigned long) &rfc);
3263 }
3264
3265 if (result == L2CAP_CONF_SUCCESS) {
3266 /* Configure output options and let the other side know
3267 * which ones we don't like. */
3268
3269 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3270 result = L2CAP_CONF_UNACCEPT;
3271 else {
3272 chan->omtu = mtu;
3273 set_bit(CONF_MTU_DONE, &chan->conf_state);
3274 }
3275 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3276
3277 if (remote_efs) {
3278 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3279 efs.stype != L2CAP_SERV_NOTRAFIC &&
3280 efs.stype != chan->local_stype) {
3281
3282 result = L2CAP_CONF_UNACCEPT;
3283
3284 if (chan->num_conf_req >= 1)
3285 return -ECONNREFUSED;
3286
3287 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3288 sizeof(efs),
3289 (unsigned long) &efs);
3290 } else {
3291 /* Send PENDING Conf Rsp */
3292 result = L2CAP_CONF_PENDING;
3293 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3294 }
3295 }
3296
3297 switch (rfc.mode) {
3298 case L2CAP_MODE_BASIC:
3299 chan->fcs = L2CAP_FCS_NONE;
3300 set_bit(CONF_MODE_DONE, &chan->conf_state);
3301 break;
3302
3303 case L2CAP_MODE_ERTM:
3304 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3305 chan->remote_tx_win = rfc.txwin_size;
3306 else
3307 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3308
3309 chan->remote_max_tx = rfc.max_transmit;
3310
3311 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3312 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3313 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3314 rfc.max_pdu_size = cpu_to_le16(size);
3315 chan->remote_mps = size;
3316
3317 __l2cap_set_ertm_timeouts(chan, &rfc);
3318
3319 set_bit(CONF_MODE_DONE, &chan->conf_state);
3320
3321 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3322 sizeof(rfc), (unsigned long) &rfc);
3323
3324 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3325 chan->remote_id = efs.id;
3326 chan->remote_stype = efs.stype;
3327 chan->remote_msdu = le16_to_cpu(efs.msdu);
3328 chan->remote_flush_to =
3329 le32_to_cpu(efs.flush_to);
3330 chan->remote_acc_lat =
3331 le32_to_cpu(efs.acc_lat);
3332 chan->remote_sdu_itime =
3333 le32_to_cpu(efs.sdu_itime);
3334 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3335 sizeof(efs),
3336 (unsigned long) &efs);
3337 }
3338 break;
3339
3340 case L2CAP_MODE_STREAMING:
3341 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3342 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3343 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3344 rfc.max_pdu_size = cpu_to_le16(size);
3345 chan->remote_mps = size;
3346
3347 set_bit(CONF_MODE_DONE, &chan->conf_state);
3348
3349 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3350 (unsigned long) &rfc);
3351
3352 break;
3353
3354 default:
3355 result = L2CAP_CONF_UNACCEPT;
3356
3357 memset(&rfc, 0, sizeof(rfc));
3358 rfc.mode = chan->mode;
3359 }
3360
3361 if (result == L2CAP_CONF_SUCCESS)
3362 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3363 }
3364 rsp->scid = cpu_to_le16(chan->dcid);
3365 rsp->result = cpu_to_le16(result);
3366 rsp->flags = __constant_cpu_to_le16(0);
3367
3368 return ptr - data;
3369 }
3370
3371 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3372 void *data, u16 *result)
3373 {
3374 struct l2cap_conf_req *req = data;
3375 void *ptr = req->data;
3376 int type, olen;
3377 unsigned long val;
3378 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3379 struct l2cap_conf_efs efs;
3380
3381 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3382
3383 while (len >= L2CAP_CONF_OPT_SIZE) {
3384 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3385
3386 switch (type) {
3387 case L2CAP_CONF_MTU:
3388 if (val < L2CAP_DEFAULT_MIN_MTU) {
3389 *result = L2CAP_CONF_UNACCEPT;
3390 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3391 } else
3392 chan->imtu = val;
3393 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3394 break;
3395
3396 case L2CAP_CONF_FLUSH_TO:
3397 chan->flush_to = val;
3398 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3399 2, chan->flush_to);
3400 break;
3401
3402 case L2CAP_CONF_RFC:
3403 if (olen == sizeof(rfc))
3404 memcpy(&rfc, (void *)val, olen);
3405
3406 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3407 rfc.mode != chan->mode)
3408 return -ECONNREFUSED;
3409
3410 chan->fcs = 0;
3411
3412 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3413 sizeof(rfc), (unsigned long) &rfc);
3414 break;
3415
3416 case L2CAP_CONF_EWS:
3417 chan->ack_win = min_t(u16, val, chan->ack_win);
3418 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3419 chan->tx_win);
3420 break;
3421
3422 case L2CAP_CONF_EFS:
3423 if (olen == sizeof(efs))
3424 memcpy(&efs, (void *)val, olen);
3425
3426 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3427 efs.stype != L2CAP_SERV_NOTRAFIC &&
3428 efs.stype != chan->local_stype)
3429 return -ECONNREFUSED;
3430
3431 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3432 (unsigned long) &efs);
3433 break;
3434 }
3435 }
3436
3437 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3438 return -ECONNREFUSED;
3439
3440 chan->mode = rfc.mode;
3441
3442 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3443 switch (rfc.mode) {
3444 case L2CAP_MODE_ERTM:
3445 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3446 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3447 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3448 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3449 chan->ack_win = min_t(u16, chan->ack_win,
3450 rfc.txwin_size);
3451
3452 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3453 chan->local_msdu = le16_to_cpu(efs.msdu);
3454 chan->local_sdu_itime =
3455 le32_to_cpu(efs.sdu_itime);
3456 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3457 chan->local_flush_to =
3458 le32_to_cpu(efs.flush_to);
3459 }
3460 break;
3461
3462 case L2CAP_MODE_STREAMING:
3463 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3464 }
3465 }
3466
3467 req->dcid = cpu_to_le16(chan->dcid);
3468 req->flags = __constant_cpu_to_le16(0);
3469
3470 return ptr - data;
3471 }
3472
3473 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3474 u16 result, u16 flags)
3475 {
3476 struct l2cap_conf_rsp *rsp = data;
3477 void *ptr = rsp->data;
3478
3479 BT_DBG("chan %p", chan);
3480
3481 rsp->scid = cpu_to_le16(chan->dcid);
3482 rsp->result = cpu_to_le16(result);
3483 rsp->flags = cpu_to_le16(flags);
3484
3485 return ptr - data;
3486 }
3487
3488 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3489 {
3490 struct l2cap_conn_rsp rsp;
3491 struct l2cap_conn *conn = chan->conn;
3492 u8 buf[128];
3493 u8 rsp_code;
3494
3495 rsp.scid = cpu_to_le16(chan->dcid);
3496 rsp.dcid = cpu_to_le16(chan->scid);
3497 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3498 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3499
3500 if (chan->hs_hcon)
3501 rsp_code = L2CAP_CREATE_CHAN_RSP;
3502 else
3503 rsp_code = L2CAP_CONN_RSP;
3504
3505 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3506
3507 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3508
3509 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3510 return;
3511
3512 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3513 l2cap_build_conf_req(chan, buf), buf);
3514 chan->num_conf_req++;
3515 }
3516
3517 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3518 {
3519 int type, olen;
3520 unsigned long val;
3521 /* Use sane default values in case a misbehaving remote device
3522 * did not send an RFC or extended window size option.
3523 */
3524 u16 txwin_ext = chan->ack_win;
3525 struct l2cap_conf_rfc rfc = {
3526 .mode = chan->mode,
3527 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3528 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3529 .max_pdu_size = cpu_to_le16(chan->imtu),
3530 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3531 };
3532
3533 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3534
3535 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3536 return;
3537
3538 while (len >= L2CAP_CONF_OPT_SIZE) {
3539 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3540
3541 switch (type) {
3542 case L2CAP_CONF_RFC:
3543 if (olen == sizeof(rfc))
3544 memcpy(&rfc, (void *)val, olen);
3545 break;
3546 case L2CAP_CONF_EWS:
3547 txwin_ext = val;
3548 break;
3549 }
3550 }
3551
3552 switch (rfc.mode) {
3553 case L2CAP_MODE_ERTM:
3554 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3555 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3556 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3557 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3558 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3559 else
3560 chan->ack_win = min_t(u16, chan->ack_win,
3561 rfc.txwin_size);
3562 break;
3563 case L2CAP_MODE_STREAMING:
3564 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3565 }
3566 }
3567
3568 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3569 struct l2cap_cmd_hdr *cmd, u8 *data)
3570 {
3571 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3572
3573 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3574 return 0;
3575
3576 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3577 cmd->ident == conn->info_ident) {
3578 cancel_delayed_work(&conn->info_timer);
3579
3580 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3581 conn->info_ident = 0;
3582
3583 l2cap_conn_start(conn);
3584 }
3585
3586 return 0;
3587 }
3588
3589 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3590 struct l2cap_cmd_hdr *cmd,
3591 u8 *data, u8 rsp_code, u8 amp_id)
3592 {
3593 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3594 struct l2cap_conn_rsp rsp;
3595 struct l2cap_chan *chan = NULL, *pchan;
3596 struct sock *parent, *sk = NULL;
3597 int result, status = L2CAP_CS_NO_INFO;
3598
3599 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3600 __le16 psm = req->psm;
3601
3602 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3603
3604 /* Check if we have socket listening on psm */
3605 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3606 if (!pchan) {
3607 result = L2CAP_CR_BAD_PSM;
3608 goto sendresp;
3609 }
3610
3611 parent = pchan->sk;
3612
3613 mutex_lock(&conn->chan_lock);
3614 lock_sock(parent);
3615
3616 /* Check if the ACL is secure enough (if not SDP) */
3617 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3618 !hci_conn_check_link_mode(conn->hcon)) {
3619 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3620 result = L2CAP_CR_SEC_BLOCK;
3621 goto response;
3622 }
3623
3624 result = L2CAP_CR_NO_MEM;
3625
3626 /* Check if we already have channel with that dcid */
3627 if (__l2cap_get_chan_by_dcid(conn, scid))
3628 goto response;
3629
3630 chan = pchan->ops->new_connection(pchan);
3631 if (!chan)
3632 goto response;
3633
3634 sk = chan->sk;
3635
3636 hci_conn_hold(conn->hcon);
3637
3638 bacpy(&bt_sk(sk)->src, conn->src);
3639 bacpy(&bt_sk(sk)->dst, conn->dst);
3640 chan->psm = psm;
3641 chan->dcid = scid;
3642 chan->local_amp_id = amp_id;
3643
3644 __l2cap_chan_add(conn, chan);
3645
3646 dcid = chan->scid;
3647
3648 __set_chan_timer(chan, sk->sk_sndtimeo);
3649
3650 chan->ident = cmd->ident;
3651
3652 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3653 if (l2cap_chan_check_security(chan)) {
3654 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3655 __l2cap_state_change(chan, BT_CONNECT2);
3656 result = L2CAP_CR_PEND;
3657 status = L2CAP_CS_AUTHOR_PEND;
3658 chan->ops->defer(chan);
3659 } else {
3660 /* Force pending result for AMP controllers.
3661 * The connection will succeed after the
3662 * physical link is up.
3663 */
3664 if (amp_id) {
3665 __l2cap_state_change(chan, BT_CONNECT2);
3666 result = L2CAP_CR_PEND;
3667 } else {
3668 __l2cap_state_change(chan, BT_CONFIG);
3669 result = L2CAP_CR_SUCCESS;
3670 }
3671 status = L2CAP_CS_NO_INFO;
3672 }
3673 } else {
3674 __l2cap_state_change(chan, BT_CONNECT2);
3675 result = L2CAP_CR_PEND;
3676 status = L2CAP_CS_AUTHEN_PEND;
3677 }
3678 } else {
3679 __l2cap_state_change(chan, BT_CONNECT2);
3680 result = L2CAP_CR_PEND;
3681 status = L2CAP_CS_NO_INFO;
3682 }
3683
3684 response:
3685 release_sock(parent);
3686 mutex_unlock(&conn->chan_lock);
3687
3688 sendresp:
3689 rsp.scid = cpu_to_le16(scid);
3690 rsp.dcid = cpu_to_le16(dcid);
3691 rsp.result = cpu_to_le16(result);
3692 rsp.status = cpu_to_le16(status);
3693 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3694
3695 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3696 struct l2cap_info_req info;
3697 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3698
3699 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3700 conn->info_ident = l2cap_get_ident(conn);
3701
3702 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3703
3704 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3705 sizeof(info), &info);
3706 }
3707
3708 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3709 result == L2CAP_CR_SUCCESS) {
3710 u8 buf[128];
3711 set_bit(CONF_REQ_SENT, &chan->conf_state);
3712 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3713 l2cap_build_conf_req(chan, buf), buf);
3714 chan->num_conf_req++;
3715 }
3716
3717 return chan;
3718 }
3719
3720 static int l2cap_connect_req(struct l2cap_conn *conn,
3721 struct l2cap_cmd_hdr *cmd, u8 *data)
3722 {
3723 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3724 return 0;
3725 }
3726
3727 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3728 struct l2cap_cmd_hdr *cmd, u8 *data)
3729 {
3730 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3731 u16 scid, dcid, result, status;
3732 struct l2cap_chan *chan;
3733 u8 req[128];
3734 int err;
3735
3736 scid = __le16_to_cpu(rsp->scid);
3737 dcid = __le16_to_cpu(rsp->dcid);
3738 result = __le16_to_cpu(rsp->result);
3739 status = __le16_to_cpu(rsp->status);
3740
3741 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3742 dcid, scid, result, status);
3743
3744 mutex_lock(&conn->chan_lock);
3745
3746 if (scid) {
3747 chan = __l2cap_get_chan_by_scid(conn, scid);
3748 if (!chan) {
3749 err = -EFAULT;
3750 goto unlock;
3751 }
3752 } else {
3753 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3754 if (!chan) {
3755 err = -EFAULT;
3756 goto unlock;
3757 }
3758 }
3759
3760 err = 0;
3761
3762 l2cap_chan_lock(chan);
3763
3764 switch (result) {
3765 case L2CAP_CR_SUCCESS:
3766 l2cap_state_change(chan, BT_CONFIG);
3767 chan->ident = 0;
3768 chan->dcid = dcid;
3769 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3770
3771 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3772 break;
3773
3774 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3775 l2cap_build_conf_req(chan, req), req);
3776 chan->num_conf_req++;
3777 break;
3778
3779 case L2CAP_CR_PEND:
3780 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3781 break;
3782
3783 default:
3784 l2cap_chan_del(chan, ECONNREFUSED);
3785 break;
3786 }
3787
3788 l2cap_chan_unlock(chan);
3789
3790 unlock:
3791 mutex_unlock(&conn->chan_lock);
3792
3793 return err;
3794 }
3795
3796 static inline void set_default_fcs(struct l2cap_chan *chan)
3797 {
3798 /* FCS is enabled only in ERTM or streaming mode, if one or both
3799 * sides request it.
3800 */
3801 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3802 chan->fcs = L2CAP_FCS_NONE;
3803 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3804 chan->fcs = L2CAP_FCS_CRC16;
3805 }
3806
3807 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3808 u8 ident, u16 flags)
3809 {
3810 struct l2cap_conn *conn = chan->conn;
3811
3812 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3813 flags);
3814
3815 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3816 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3817
3818 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3819 l2cap_build_conf_rsp(chan, data,
3820 L2CAP_CONF_SUCCESS, flags), data);
3821 }
3822
3823 static inline int l2cap_config_req(struct l2cap_conn *conn,
3824 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3825 u8 *data)
3826 {
3827 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3828 u16 dcid, flags;
3829 u8 rsp[64];
3830 struct l2cap_chan *chan;
3831 int len, err = 0;
3832
3833 dcid = __le16_to_cpu(req->dcid);
3834 flags = __le16_to_cpu(req->flags);
3835
3836 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3837
3838 chan = l2cap_get_chan_by_scid(conn, dcid);
3839 if (!chan)
3840 return -ENOENT;
3841
3842 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3843 struct l2cap_cmd_rej_cid rej;
3844
3845 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3846 rej.scid = cpu_to_le16(chan->scid);
3847 rej.dcid = cpu_to_le16(chan->dcid);
3848
3849 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3850 sizeof(rej), &rej);
3851 goto unlock;
3852 }
3853
3854 /* Reject if config buffer is too small. */
3855 len = cmd_len - sizeof(*req);
3856 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3857 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3858 l2cap_build_conf_rsp(chan, rsp,
3859 L2CAP_CONF_REJECT, flags), rsp);
3860 goto unlock;
3861 }
3862
3863 /* Store config. */
3864 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3865 chan->conf_len += len;
3866
3867 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3868 /* Incomplete config. Send empty response. */
3869 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3870 l2cap_build_conf_rsp(chan, rsp,
3871 L2CAP_CONF_SUCCESS, flags), rsp);
3872 goto unlock;
3873 }
3874
3875 /* Complete config. */
3876 len = l2cap_parse_conf_req(chan, rsp);
3877 if (len < 0) {
3878 l2cap_send_disconn_req(chan, ECONNRESET);
3879 goto unlock;
3880 }
3881
3882 chan->ident = cmd->ident;
3883 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3884 chan->num_conf_rsp++;
3885
3886 /* Reset config buffer. */
3887 chan->conf_len = 0;
3888
3889 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3890 goto unlock;
3891
3892 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3893 set_default_fcs(chan);
3894
3895 if (chan->mode == L2CAP_MODE_ERTM ||
3896 chan->mode == L2CAP_MODE_STREAMING)
3897 err = l2cap_ertm_init(chan);
3898
3899 if (err < 0)
3900 l2cap_send_disconn_req(chan, -err);
3901 else
3902 l2cap_chan_ready(chan);
3903
3904 goto unlock;
3905 }
3906
3907 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3908 u8 buf[64];
3909 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3910 l2cap_build_conf_req(chan, buf), buf);
3911 chan->num_conf_req++;
3912 }
3913
3914 /* Got Conf Rsp PENDING from remote side and asume we sent
3915 Conf Rsp PENDING in the code above */
3916 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3917 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3918
3919 /* check compatibility */
3920
3921 /* Send rsp for BR/EDR channel */
3922 if (!chan->hs_hcon)
3923 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
3924 else
3925 chan->ident = cmd->ident;
3926 }
3927
3928 unlock:
3929 l2cap_chan_unlock(chan);
3930 return err;
3931 }
3932
3933 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
3934 struct l2cap_cmd_hdr *cmd, u8 *data)
3935 {
3936 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3937 u16 scid, flags, result;
3938 struct l2cap_chan *chan;
3939 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3940 int err = 0;
3941
3942 scid = __le16_to_cpu(rsp->scid);
3943 flags = __le16_to_cpu(rsp->flags);
3944 result = __le16_to_cpu(rsp->result);
3945
3946 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3947 result, len);
3948
3949 chan = l2cap_get_chan_by_scid(conn, scid);
3950 if (!chan)
3951 return 0;
3952
3953 switch (result) {
3954 case L2CAP_CONF_SUCCESS:
3955 l2cap_conf_rfc_get(chan, rsp->data, len);
3956 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3957 break;
3958
3959 case L2CAP_CONF_PENDING:
3960 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3961
3962 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3963 char buf[64];
3964
3965 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3966 buf, &result);
3967 if (len < 0) {
3968 l2cap_send_disconn_req(chan, ECONNRESET);
3969 goto done;
3970 }
3971
3972 if (!chan->hs_hcon) {
3973 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
3974 0);
3975 } else {
3976 if (l2cap_check_efs(chan)) {
3977 amp_create_logical_link(chan);
3978 chan->ident = cmd->ident;
3979 }
3980 }
3981 }
3982 goto done;
3983
3984 case L2CAP_CONF_UNACCEPT:
3985 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3986 char req[64];
3987
3988 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3989 l2cap_send_disconn_req(chan, ECONNRESET);
3990 goto done;
3991 }
3992
3993 /* throw out any old stored conf requests */
3994 result = L2CAP_CONF_SUCCESS;
3995 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3996 req, &result);
3997 if (len < 0) {
3998 l2cap_send_disconn_req(chan, ECONNRESET);
3999 goto done;
4000 }
4001
4002 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4003 L2CAP_CONF_REQ, len, req);
4004 chan->num_conf_req++;
4005 if (result != L2CAP_CONF_SUCCESS)
4006 goto done;
4007 break;
4008 }
4009
4010 default:
4011 l2cap_chan_set_err(chan, ECONNRESET);
4012
4013 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4014 l2cap_send_disconn_req(chan, ECONNRESET);
4015 goto done;
4016 }
4017
4018 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4019 goto done;
4020
4021 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4022
4023 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4024 set_default_fcs(chan);
4025
4026 if (chan->mode == L2CAP_MODE_ERTM ||
4027 chan->mode == L2CAP_MODE_STREAMING)
4028 err = l2cap_ertm_init(chan);
4029
4030 if (err < 0)
4031 l2cap_send_disconn_req(chan, -err);
4032 else
4033 l2cap_chan_ready(chan);
4034 }
4035
4036 done:
4037 l2cap_chan_unlock(chan);
4038 return err;
4039 }
4040
4041 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4042 struct l2cap_cmd_hdr *cmd, u8 *data)
4043 {
4044 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4045 struct l2cap_disconn_rsp rsp;
4046 u16 dcid, scid;
4047 struct l2cap_chan *chan;
4048 struct sock *sk;
4049
4050 scid = __le16_to_cpu(req->scid);
4051 dcid = __le16_to_cpu(req->dcid);
4052
4053 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4054
4055 mutex_lock(&conn->chan_lock);
4056
4057 chan = __l2cap_get_chan_by_scid(conn, dcid);
4058 if (!chan) {
4059 mutex_unlock(&conn->chan_lock);
4060 return 0;
4061 }
4062
4063 l2cap_chan_lock(chan);
4064
4065 sk = chan->sk;
4066
4067 rsp.dcid = cpu_to_le16(chan->scid);
4068 rsp.scid = cpu_to_le16(chan->dcid);
4069 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4070
4071 lock_sock(sk);
4072 sk->sk_shutdown = SHUTDOWN_MASK;
4073 release_sock(sk);
4074
4075 l2cap_chan_hold(chan);
4076 l2cap_chan_del(chan, ECONNRESET);
4077
4078 l2cap_chan_unlock(chan);
4079
4080 chan->ops->close(chan);
4081 l2cap_chan_put(chan);
4082
4083 mutex_unlock(&conn->chan_lock);
4084
4085 return 0;
4086 }
4087
4088 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4089 struct l2cap_cmd_hdr *cmd, u8 *data)
4090 {
4091 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4092 u16 dcid, scid;
4093 struct l2cap_chan *chan;
4094
4095 scid = __le16_to_cpu(rsp->scid);
4096 dcid = __le16_to_cpu(rsp->dcid);
4097
4098 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4099
4100 mutex_lock(&conn->chan_lock);
4101
4102 chan = __l2cap_get_chan_by_scid(conn, scid);
4103 if (!chan) {
4104 mutex_unlock(&conn->chan_lock);
4105 return 0;
4106 }
4107
4108 l2cap_chan_lock(chan);
4109
4110 l2cap_chan_hold(chan);
4111 l2cap_chan_del(chan, 0);
4112
4113 l2cap_chan_unlock(chan);
4114
4115 chan->ops->close(chan);
4116 l2cap_chan_put(chan);
4117
4118 mutex_unlock(&conn->chan_lock);
4119
4120 return 0;
4121 }
4122
4123 static inline int l2cap_information_req(struct l2cap_conn *conn,
4124 struct l2cap_cmd_hdr *cmd, u8 *data)
4125 {
4126 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4127 u16 type;
4128
4129 type = __le16_to_cpu(req->type);
4130
4131 BT_DBG("type 0x%4.4x", type);
4132
4133 if (type == L2CAP_IT_FEAT_MASK) {
4134 u8 buf[8];
4135 u32 feat_mask = l2cap_feat_mask;
4136 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4137 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4138 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4139 if (!disable_ertm)
4140 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4141 | L2CAP_FEAT_FCS;
4142 if (enable_hs)
4143 feat_mask |= L2CAP_FEAT_EXT_FLOW
4144 | L2CAP_FEAT_EXT_WINDOW;
4145
4146 put_unaligned_le32(feat_mask, rsp->data);
4147 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4148 buf);
4149 } else if (type == L2CAP_IT_FIXED_CHAN) {
4150 u8 buf[12];
4151 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4152
4153 if (enable_hs)
4154 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4155 else
4156 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4157
4158 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4159 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4160 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4161 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4162 buf);
4163 } else {
4164 struct l2cap_info_rsp rsp;
4165 rsp.type = cpu_to_le16(type);
4166 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4167 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4168 &rsp);
4169 }
4170
4171 return 0;
4172 }
4173
4174 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4175 struct l2cap_cmd_hdr *cmd, u8 *data)
4176 {
4177 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4178 u16 type, result;
4179
4180 type = __le16_to_cpu(rsp->type);
4181 result = __le16_to_cpu(rsp->result);
4182
4183 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4184
4185 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4186 if (cmd->ident != conn->info_ident ||
4187 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4188 return 0;
4189
4190 cancel_delayed_work(&conn->info_timer);
4191
4192 if (result != L2CAP_IR_SUCCESS) {
4193 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4194 conn->info_ident = 0;
4195
4196 l2cap_conn_start(conn);
4197
4198 return 0;
4199 }
4200
4201 switch (type) {
4202 case L2CAP_IT_FEAT_MASK:
4203 conn->feat_mask = get_unaligned_le32(rsp->data);
4204
4205 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4206 struct l2cap_info_req req;
4207 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4208
4209 conn->info_ident = l2cap_get_ident(conn);
4210
4211 l2cap_send_cmd(conn, conn->info_ident,
4212 L2CAP_INFO_REQ, sizeof(req), &req);
4213 } else {
4214 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4215 conn->info_ident = 0;
4216
4217 l2cap_conn_start(conn);
4218 }
4219 break;
4220
4221 case L2CAP_IT_FIXED_CHAN:
4222 conn->fixed_chan_mask = rsp->data[0];
4223 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4224 conn->info_ident = 0;
4225
4226 l2cap_conn_start(conn);
4227 break;
4228 }
4229
4230 return 0;
4231 }
4232
4233 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4234 struct l2cap_cmd_hdr *cmd,
4235 u16 cmd_len, void *data)
4236 {
4237 struct l2cap_create_chan_req *req = data;
4238 struct l2cap_create_chan_rsp rsp;
4239 struct l2cap_chan *chan;
4240 struct hci_dev *hdev;
4241 u16 psm, scid;
4242
4243 if (cmd_len != sizeof(*req))
4244 return -EPROTO;
4245
4246 if (!enable_hs)
4247 return -EINVAL;
4248
4249 psm = le16_to_cpu(req->psm);
4250 scid = le16_to_cpu(req->scid);
4251
4252 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4253
4254 /* For controller id 0 make BR/EDR connection */
4255 if (req->amp_id == HCI_BREDR_ID) {
4256 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4257 req->amp_id);
4258 return 0;
4259 }
4260
4261 /* Validate AMP controller id */
4262 hdev = hci_dev_get(req->amp_id);
4263 if (!hdev)
4264 goto error;
4265
4266 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4267 hci_dev_put(hdev);
4268 goto error;
4269 }
4270
4271 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4272 req->amp_id);
4273 if (chan) {
4274 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4275 struct hci_conn *hs_hcon;
4276
4277 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, conn->dst);
4278 if (!hs_hcon) {
4279 hci_dev_put(hdev);
4280 return -EFAULT;
4281 }
4282
4283 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4284
4285 mgr->bredr_chan = chan;
4286 chan->hs_hcon = hs_hcon;
4287 chan->fcs = L2CAP_FCS_NONE;
4288 conn->mtu = hdev->block_mtu;
4289 }
4290
4291 hci_dev_put(hdev);
4292
4293 return 0;
4294
4295 error:
4296 rsp.dcid = 0;
4297 rsp.scid = cpu_to_le16(scid);
4298 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4299 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4300
4301 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4302 sizeof(rsp), &rsp);
4303
4304 return -EFAULT;
4305 }
4306
4307 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4308 {
4309 struct l2cap_move_chan_req req;
4310 u8 ident;
4311
4312 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4313
4314 ident = l2cap_get_ident(chan->conn);
4315 chan->ident = ident;
4316
4317 req.icid = cpu_to_le16(chan->scid);
4318 req.dest_amp_id = dest_amp_id;
4319
4320 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4321 &req);
4322
4323 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4324 }
4325
4326 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4327 {
4328 struct l2cap_move_chan_rsp rsp;
4329
4330 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4331
4332 rsp.icid = cpu_to_le16(chan->dcid);
4333 rsp.result = cpu_to_le16(result);
4334
4335 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4336 sizeof(rsp), &rsp);
4337 }
4338
4339 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4340 {
4341 struct l2cap_move_chan_cfm cfm;
4342
4343 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4344
4345 chan->ident = l2cap_get_ident(chan->conn);
4346
4347 cfm.icid = cpu_to_le16(chan->scid);
4348 cfm.result = cpu_to_le16(result);
4349
4350 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4351 sizeof(cfm), &cfm);
4352
4353 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4354 }
4355
4356 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4357 {
4358 struct l2cap_move_chan_cfm cfm;
4359
4360 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4361
4362 cfm.icid = cpu_to_le16(icid);
4363 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4364
4365 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4366 sizeof(cfm), &cfm);
4367 }
4368
4369 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4370 u16 icid)
4371 {
4372 struct l2cap_move_chan_cfm_rsp rsp;
4373
4374 BT_DBG("icid 0x%4.4x", icid);
4375
4376 rsp.icid = cpu_to_le16(icid);
4377 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4378 }
4379
4380 static void __release_logical_link(struct l2cap_chan *chan)
4381 {
4382 chan->hs_hchan = NULL;
4383 chan->hs_hcon = NULL;
4384
4385 /* Placeholder - release the logical link */
4386 }
4387
4388 static void l2cap_logical_fail(struct l2cap_chan *chan)
4389 {
4390 /* Logical link setup failed */
4391 if (chan->state != BT_CONNECTED) {
4392 /* Create channel failure, disconnect */
4393 l2cap_send_disconn_req(chan, ECONNRESET);
4394 return;
4395 }
4396
4397 switch (chan->move_role) {
4398 case L2CAP_MOVE_ROLE_RESPONDER:
4399 l2cap_move_done(chan);
4400 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4401 break;
4402 case L2CAP_MOVE_ROLE_INITIATOR:
4403 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4404 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4405 /* Remote has only sent pending or
4406 * success responses, clean up
4407 */
4408 l2cap_move_done(chan);
4409 }
4410
4411 /* Other amp move states imply that the move
4412 * has already aborted
4413 */
4414 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4415 break;
4416 }
4417 }
4418
4419 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4420 struct hci_chan *hchan)
4421 {
4422 struct l2cap_conf_rsp rsp;
4423
4424 chan->hs_hchan = hchan;
4425 chan->hs_hcon->l2cap_data = chan->conn;
4426
4427 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4428
4429 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4430 int err;
4431
4432 set_default_fcs(chan);
4433
4434 err = l2cap_ertm_init(chan);
4435 if (err < 0)
4436 l2cap_send_disconn_req(chan, -err);
4437 else
4438 l2cap_chan_ready(chan);
4439 }
4440 }
4441
4442 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4443 struct hci_chan *hchan)
4444 {
4445 chan->hs_hcon = hchan->conn;
4446 chan->hs_hcon->l2cap_data = chan->conn;
4447
4448 BT_DBG("move_state %d", chan->move_state);
4449
4450 switch (chan->move_state) {
4451 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4452 /* Move confirm will be sent after a success
4453 * response is received
4454 */
4455 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4456 break;
4457 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4458 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4459 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4460 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4461 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4462 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4463 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4464 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4465 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4466 }
4467 break;
4468 default:
4469 /* Move was not in expected state, free the channel */
4470 __release_logical_link(chan);
4471
4472 chan->move_state = L2CAP_MOVE_STABLE;
4473 }
4474 }
4475
4476 /* Call with chan locked */
4477 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4478 u8 status)
4479 {
4480 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4481
4482 if (status) {
4483 l2cap_logical_fail(chan);
4484 __release_logical_link(chan);
4485 return;
4486 }
4487
4488 if (chan->state != BT_CONNECTED) {
4489 /* Ignore logical link if channel is on BR/EDR */
4490 if (chan->local_amp_id)
4491 l2cap_logical_finish_create(chan, hchan);
4492 } else {
4493 l2cap_logical_finish_move(chan, hchan);
4494 }
4495 }
4496
4497 void l2cap_move_start(struct l2cap_chan *chan)
4498 {
4499 BT_DBG("chan %p", chan);
4500
4501 if (chan->local_amp_id == HCI_BREDR_ID) {
4502 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4503 return;
4504 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4505 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4506 /* Placeholder - start physical link setup */
4507 } else {
4508 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4509 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4510 chan->move_id = 0;
4511 l2cap_move_setup(chan);
4512 l2cap_send_move_chan_req(chan, 0);
4513 }
4514 }
4515
4516 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4517 u8 local_amp_id, u8 remote_amp_id)
4518 {
4519 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4520 local_amp_id, remote_amp_id);
4521
4522 chan->fcs = L2CAP_FCS_NONE;
4523
4524 /* Outgoing channel on AMP */
4525 if (chan->state == BT_CONNECT) {
4526 if (result == L2CAP_CR_SUCCESS) {
4527 chan->local_amp_id = local_amp_id;
4528 l2cap_send_create_chan_req(chan, remote_amp_id);
4529 } else {
4530 /* Revert to BR/EDR connect */
4531 l2cap_send_conn_req(chan);
4532 }
4533
4534 return;
4535 }
4536
4537 /* Incoming channel on AMP */
4538 if (__l2cap_no_conn_pending(chan)) {
4539 struct l2cap_conn_rsp rsp;
4540 char buf[128];
4541 rsp.scid = cpu_to_le16(chan->dcid);
4542 rsp.dcid = cpu_to_le16(chan->scid);
4543
4544 if (result == L2CAP_CR_SUCCESS) {
4545 /* Send successful response */
4546 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4547 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4548 } else {
4549 /* Send negative response */
4550 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4551 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4552 }
4553
4554 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4555 sizeof(rsp), &rsp);
4556
4557 if (result == L2CAP_CR_SUCCESS) {
4558 __l2cap_state_change(chan, BT_CONFIG);
4559 set_bit(CONF_REQ_SENT, &chan->conf_state);
4560 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4561 L2CAP_CONF_REQ,
4562 l2cap_build_conf_req(chan, buf), buf);
4563 chan->num_conf_req++;
4564 }
4565 }
4566 }
4567
4568 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4569 u8 remote_amp_id)
4570 {
4571 l2cap_move_setup(chan);
4572 chan->move_id = local_amp_id;
4573 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4574
4575 l2cap_send_move_chan_req(chan, remote_amp_id);
4576 }
4577
4578 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4579 {
4580 struct hci_chan *hchan = NULL;
4581
4582 /* Placeholder - get hci_chan for logical link */
4583
4584 if (hchan) {
4585 if (hchan->state == BT_CONNECTED) {
4586 /* Logical link is ready to go */
4587 chan->hs_hcon = hchan->conn;
4588 chan->hs_hcon->l2cap_data = chan->conn;
4589 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4590 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4591
4592 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4593 } else {
4594 /* Wait for logical link to be ready */
4595 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4596 }
4597 } else {
4598 /* Logical link not available */
4599 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4600 }
4601 }
4602
4603 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4604 {
4605 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4606 u8 rsp_result;
4607 if (result == -EINVAL)
4608 rsp_result = L2CAP_MR_BAD_ID;
4609 else
4610 rsp_result = L2CAP_MR_NOT_ALLOWED;
4611
4612 l2cap_send_move_chan_rsp(chan, rsp_result);
4613 }
4614
4615 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4616 chan->move_state = L2CAP_MOVE_STABLE;
4617
4618 /* Restart data transmission */
4619 l2cap_ertm_send(chan);
4620 }
4621
4622 /* Invoke with locked chan */
4623 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4624 {
4625 u8 local_amp_id = chan->local_amp_id;
4626 u8 remote_amp_id = chan->remote_amp_id;
4627
4628 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4629 chan, result, local_amp_id, remote_amp_id);
4630
4631 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4632 l2cap_chan_unlock(chan);
4633 return;
4634 }
4635
4636 if (chan->state != BT_CONNECTED) {
4637 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4638 } else if (result != L2CAP_MR_SUCCESS) {
4639 l2cap_do_move_cancel(chan, result);
4640 } else {
4641 switch (chan->move_role) {
4642 case L2CAP_MOVE_ROLE_INITIATOR:
4643 l2cap_do_move_initiate(chan, local_amp_id,
4644 remote_amp_id);
4645 break;
4646 case L2CAP_MOVE_ROLE_RESPONDER:
4647 l2cap_do_move_respond(chan, result);
4648 break;
4649 default:
4650 l2cap_do_move_cancel(chan, result);
4651 break;
4652 }
4653 }
4654 }
4655
4656 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4657 struct l2cap_cmd_hdr *cmd,
4658 u16 cmd_len, void *data)
4659 {
4660 struct l2cap_move_chan_req *req = data;
4661 struct l2cap_move_chan_rsp rsp;
4662 struct l2cap_chan *chan;
4663 u16 icid = 0;
4664 u16 result = L2CAP_MR_NOT_ALLOWED;
4665
4666 if (cmd_len != sizeof(*req))
4667 return -EPROTO;
4668
4669 icid = le16_to_cpu(req->icid);
4670
4671 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4672
4673 if (!enable_hs)
4674 return -EINVAL;
4675
4676 chan = l2cap_get_chan_by_dcid(conn, icid);
4677 if (!chan) {
4678 rsp.icid = cpu_to_le16(icid);
4679 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4680 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4681 sizeof(rsp), &rsp);
4682 return 0;
4683 }
4684
4685 chan->ident = cmd->ident;
4686
4687 if (chan->scid < L2CAP_CID_DYN_START ||
4688 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4689 (chan->mode != L2CAP_MODE_ERTM &&
4690 chan->mode != L2CAP_MODE_STREAMING)) {
4691 result = L2CAP_MR_NOT_ALLOWED;
4692 goto send_move_response;
4693 }
4694
4695 if (chan->local_amp_id == req->dest_amp_id) {
4696 result = L2CAP_MR_SAME_ID;
4697 goto send_move_response;
4698 }
4699
4700 if (req->dest_amp_id) {
4701 struct hci_dev *hdev;
4702 hdev = hci_dev_get(req->dest_amp_id);
4703 if (!hdev || hdev->dev_type != HCI_AMP ||
4704 !test_bit(HCI_UP, &hdev->flags)) {
4705 if (hdev)
4706 hci_dev_put(hdev);
4707
4708 result = L2CAP_MR_BAD_ID;
4709 goto send_move_response;
4710 }
4711 hci_dev_put(hdev);
4712 }
4713
4714 /* Detect a move collision. Only send a collision response
4715 * if this side has "lost", otherwise proceed with the move.
4716 * The winner has the larger bd_addr.
4717 */
4718 if ((__chan_is_moving(chan) ||
4719 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4720 bacmp(conn->src, conn->dst) > 0) {
4721 result = L2CAP_MR_COLLISION;
4722 goto send_move_response;
4723 }
4724
4725 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4726 l2cap_move_setup(chan);
4727 chan->move_id = req->dest_amp_id;
4728 icid = chan->dcid;
4729
4730 if (!req->dest_amp_id) {
4731 /* Moving to BR/EDR */
4732 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4733 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4734 result = L2CAP_MR_PEND;
4735 } else {
4736 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4737 result = L2CAP_MR_SUCCESS;
4738 }
4739 } else {
4740 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4741 /* Placeholder - uncomment when amp functions are available */
4742 /*amp_accept_physical(chan, req->dest_amp_id);*/
4743 result = L2CAP_MR_PEND;
4744 }
4745
4746 send_move_response:
4747 l2cap_send_move_chan_rsp(chan, result);
4748
4749 l2cap_chan_unlock(chan);
4750
4751 return 0;
4752 }
4753
4754 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4755 {
4756 struct l2cap_chan *chan;
4757 struct hci_chan *hchan = NULL;
4758
4759 chan = l2cap_get_chan_by_scid(conn, icid);
4760 if (!chan) {
4761 l2cap_send_move_chan_cfm_icid(conn, icid);
4762 return;
4763 }
4764
4765 __clear_chan_timer(chan);
4766 if (result == L2CAP_MR_PEND)
4767 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4768
4769 switch (chan->move_state) {
4770 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4771 /* Move confirm will be sent when logical link
4772 * is complete.
4773 */
4774 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4775 break;
4776 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4777 if (result == L2CAP_MR_PEND) {
4778 break;
4779 } else if (test_bit(CONN_LOCAL_BUSY,
4780 &chan->conn_state)) {
4781 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4782 } else {
4783 /* Logical link is up or moving to BR/EDR,
4784 * proceed with move
4785 */
4786 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4787 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4788 }
4789 break;
4790 case L2CAP_MOVE_WAIT_RSP:
4791 /* Moving to AMP */
4792 if (result == L2CAP_MR_SUCCESS) {
4793 /* Remote is ready, send confirm immediately
4794 * after logical link is ready
4795 */
4796 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4797 } else {
4798 /* Both logical link and move success
4799 * are required to confirm
4800 */
4801 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4802 }
4803
4804 /* Placeholder - get hci_chan for logical link */
4805 if (!hchan) {
4806 /* Logical link not available */
4807 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4808 break;
4809 }
4810
4811 /* If the logical link is not yet connected, do not
4812 * send confirmation.
4813 */
4814 if (hchan->state != BT_CONNECTED)
4815 break;
4816
4817 /* Logical link is already ready to go */
4818
4819 chan->hs_hcon = hchan->conn;
4820 chan->hs_hcon->l2cap_data = chan->conn;
4821
4822 if (result == L2CAP_MR_SUCCESS) {
4823 /* Can confirm now */
4824 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4825 } else {
4826 /* Now only need move success
4827 * to confirm
4828 */
4829 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4830 }
4831
4832 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4833 break;
4834 default:
4835 /* Any other amp move state means the move failed. */
4836 chan->move_id = chan->local_amp_id;
4837 l2cap_move_done(chan);
4838 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4839 }
4840
4841 l2cap_chan_unlock(chan);
4842 }
4843
4844 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
4845 u16 result)
4846 {
4847 struct l2cap_chan *chan;
4848
4849 chan = l2cap_get_chan_by_ident(conn, ident);
4850 if (!chan) {
4851 /* Could not locate channel, icid is best guess */
4852 l2cap_send_move_chan_cfm_icid(conn, icid);
4853 return;
4854 }
4855
4856 __clear_chan_timer(chan);
4857
4858 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4859 if (result == L2CAP_MR_COLLISION) {
4860 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4861 } else {
4862 /* Cleanup - cancel move */
4863 chan->move_id = chan->local_amp_id;
4864 l2cap_move_done(chan);
4865 }
4866 }
4867
4868 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4869
4870 l2cap_chan_unlock(chan);
4871 }
4872
4873 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4874 struct l2cap_cmd_hdr *cmd,
4875 u16 cmd_len, void *data)
4876 {
4877 struct l2cap_move_chan_rsp *rsp = data;
4878 u16 icid, result;
4879
4880 if (cmd_len != sizeof(*rsp))
4881 return -EPROTO;
4882
4883 icid = le16_to_cpu(rsp->icid);
4884 result = le16_to_cpu(rsp->result);
4885
4886 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4887
4888 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
4889 l2cap_move_continue(conn, icid, result);
4890 else
4891 l2cap_move_fail(conn, cmd->ident, icid, result);
4892
4893 return 0;
4894 }
4895
4896 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4897 struct l2cap_cmd_hdr *cmd,
4898 u16 cmd_len, void *data)
4899 {
4900 struct l2cap_move_chan_cfm *cfm = data;
4901 struct l2cap_chan *chan;
4902 u16 icid, result;
4903
4904 if (cmd_len != sizeof(*cfm))
4905 return -EPROTO;
4906
4907 icid = le16_to_cpu(cfm->icid);
4908 result = le16_to_cpu(cfm->result);
4909
4910 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4911
4912 chan = l2cap_get_chan_by_dcid(conn, icid);
4913 if (!chan) {
4914 /* Spec requires a response even if the icid was not found */
4915 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4916 return 0;
4917 }
4918
4919 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
4920 if (result == L2CAP_MC_CONFIRMED) {
4921 chan->local_amp_id = chan->move_id;
4922 if (!chan->local_amp_id)
4923 __release_logical_link(chan);
4924 } else {
4925 chan->move_id = chan->local_amp_id;
4926 }
4927
4928 l2cap_move_done(chan);
4929 }
4930
4931 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4932
4933 l2cap_chan_unlock(chan);
4934
4935 return 0;
4936 }
4937
4938 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4939 struct l2cap_cmd_hdr *cmd,
4940 u16 cmd_len, void *data)
4941 {
4942 struct l2cap_move_chan_cfm_rsp *rsp = data;
4943 struct l2cap_chan *chan;
4944 u16 icid;
4945
4946 if (cmd_len != sizeof(*rsp))
4947 return -EPROTO;
4948
4949 icid = le16_to_cpu(rsp->icid);
4950
4951 BT_DBG("icid 0x%4.4x", icid);
4952
4953 chan = l2cap_get_chan_by_scid(conn, icid);
4954 if (!chan)
4955 return 0;
4956
4957 __clear_chan_timer(chan);
4958
4959 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
4960 chan->local_amp_id = chan->move_id;
4961
4962 if (!chan->local_amp_id && chan->hs_hchan)
4963 __release_logical_link(chan);
4964
4965 l2cap_move_done(chan);
4966 }
4967
4968 l2cap_chan_unlock(chan);
4969
4970 return 0;
4971 }
4972
4973 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4974 u16 to_multiplier)
4975 {
4976 u16 max_latency;
4977
4978 if (min > max || min < 6 || max > 3200)
4979 return -EINVAL;
4980
4981 if (to_multiplier < 10 || to_multiplier > 3200)
4982 return -EINVAL;
4983
4984 if (max >= to_multiplier * 8)
4985 return -EINVAL;
4986
4987 max_latency = (to_multiplier * 8 / max) - 1;
4988 if (latency > 499 || latency > max_latency)
4989 return -EINVAL;
4990
4991 return 0;
4992 }
4993
4994 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4995 struct l2cap_cmd_hdr *cmd,
4996 u8 *data)
4997 {
4998 struct hci_conn *hcon = conn->hcon;
4999 struct l2cap_conn_param_update_req *req;
5000 struct l2cap_conn_param_update_rsp rsp;
5001 u16 min, max, latency, to_multiplier, cmd_len;
5002 int err;
5003
5004 if (!(hcon->link_mode & HCI_LM_MASTER))
5005 return -EINVAL;
5006
5007 cmd_len = __le16_to_cpu(cmd->len);
5008 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5009 return -EPROTO;
5010
5011 req = (struct l2cap_conn_param_update_req *) data;
5012 min = __le16_to_cpu(req->min);
5013 max = __le16_to_cpu(req->max);
5014 latency = __le16_to_cpu(req->latency);
5015 to_multiplier = __le16_to_cpu(req->to_multiplier);
5016
5017 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5018 min, max, latency, to_multiplier);
5019
5020 memset(&rsp, 0, sizeof(rsp));
5021
5022 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5023 if (err)
5024 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5025 else
5026 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5027
5028 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5029 sizeof(rsp), &rsp);
5030
5031 if (!err)
5032 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5033
5034 return 0;
5035 }
5036
5037 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5038 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5039 u8 *data)
5040 {
5041 int err = 0;
5042
5043 switch (cmd->code) {
5044 case L2CAP_COMMAND_REJ:
5045 l2cap_command_rej(conn, cmd, data);
5046 break;
5047
5048 case L2CAP_CONN_REQ:
5049 err = l2cap_connect_req(conn, cmd, data);
5050 break;
5051
5052 case L2CAP_CONN_RSP:
5053 case L2CAP_CREATE_CHAN_RSP:
5054 err = l2cap_connect_create_rsp(conn, cmd, data);
5055 break;
5056
5057 case L2CAP_CONF_REQ:
5058 err = l2cap_config_req(conn, cmd, cmd_len, data);
5059 break;
5060
5061 case L2CAP_CONF_RSP:
5062 err = l2cap_config_rsp(conn, cmd, data);
5063 break;
5064
5065 case L2CAP_DISCONN_REQ:
5066 err = l2cap_disconnect_req(conn, cmd, data);
5067 break;
5068
5069 case L2CAP_DISCONN_RSP:
5070 err = l2cap_disconnect_rsp(conn, cmd, data);
5071 break;
5072
5073 case L2CAP_ECHO_REQ:
5074 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5075 break;
5076
5077 case L2CAP_ECHO_RSP:
5078 break;
5079
5080 case L2CAP_INFO_REQ:
5081 err = l2cap_information_req(conn, cmd, data);
5082 break;
5083
5084 case L2CAP_INFO_RSP:
5085 err = l2cap_information_rsp(conn, cmd, data);
5086 break;
5087
5088 case L2CAP_CREATE_CHAN_REQ:
5089 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5090 break;
5091
5092 case L2CAP_MOVE_CHAN_REQ:
5093 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5094 break;
5095
5096 case L2CAP_MOVE_CHAN_RSP:
5097 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5098 break;
5099
5100 case L2CAP_MOVE_CHAN_CFM:
5101 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5102 break;
5103
5104 case L2CAP_MOVE_CHAN_CFM_RSP:
5105 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5106 break;
5107
5108 default:
5109 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5110 err = -EINVAL;
5111 break;
5112 }
5113
5114 return err;
5115 }
5116
5117 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5118 struct l2cap_cmd_hdr *cmd, u8 *data)
5119 {
5120 switch (cmd->code) {
5121 case L2CAP_COMMAND_REJ:
5122 return 0;
5123
5124 case L2CAP_CONN_PARAM_UPDATE_REQ:
5125 return l2cap_conn_param_update_req(conn, cmd, data);
5126
5127 case L2CAP_CONN_PARAM_UPDATE_RSP:
5128 return 0;
5129
5130 default:
5131 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5132 return -EINVAL;
5133 }
5134 }
5135
5136 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5137 struct sk_buff *skb)
5138 {
5139 u8 *data = skb->data;
5140 int len = skb->len;
5141 struct l2cap_cmd_hdr cmd;
5142 int err;
5143
5144 l2cap_raw_recv(conn, skb);
5145
5146 while (len >= L2CAP_CMD_HDR_SIZE) {
5147 u16 cmd_len;
5148 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5149 data += L2CAP_CMD_HDR_SIZE;
5150 len -= L2CAP_CMD_HDR_SIZE;
5151
5152 cmd_len = le16_to_cpu(cmd.len);
5153
5154 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5155 cmd.ident);
5156
5157 if (cmd_len > len || !cmd.ident) {
5158 BT_DBG("corrupted command");
5159 break;
5160 }
5161
5162 if (conn->hcon->type == LE_LINK)
5163 err = l2cap_le_sig_cmd(conn, &cmd, data);
5164 else
5165 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5166
5167 if (err) {
5168 struct l2cap_cmd_rej_unk rej;
5169
5170 BT_ERR("Wrong link type (%d)", err);
5171
5172 /* FIXME: Map err to a valid reason */
5173 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5174 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5175 sizeof(rej), &rej);
5176 }
5177
5178 data += cmd_len;
5179 len -= cmd_len;
5180 }
5181
5182 kfree_skb(skb);
5183 }
5184
5185 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5186 {
5187 u16 our_fcs, rcv_fcs;
5188 int hdr_size;
5189
5190 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5191 hdr_size = L2CAP_EXT_HDR_SIZE;
5192 else
5193 hdr_size = L2CAP_ENH_HDR_SIZE;
5194
5195 if (chan->fcs == L2CAP_FCS_CRC16) {
5196 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5197 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5198 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5199
5200 if (our_fcs != rcv_fcs)
5201 return -EBADMSG;
5202 }
5203 return 0;
5204 }
5205
5206 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5207 {
5208 struct l2cap_ctrl control;
5209
5210 BT_DBG("chan %p", chan);
5211
5212 memset(&control, 0, sizeof(control));
5213 control.sframe = 1;
5214 control.final = 1;
5215 control.reqseq = chan->buffer_seq;
5216 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5217
5218 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5219 control.super = L2CAP_SUPER_RNR;
5220 l2cap_send_sframe(chan, &control);
5221 }
5222
5223 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5224 chan->unacked_frames > 0)
5225 __set_retrans_timer(chan);
5226
5227 /* Send pending iframes */
5228 l2cap_ertm_send(chan);
5229
5230 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5231 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5232 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5233 * send it now.
5234 */
5235 control.super = L2CAP_SUPER_RR;
5236 l2cap_send_sframe(chan, &control);
5237 }
5238 }
5239
5240 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5241 struct sk_buff **last_frag)
5242 {
5243 /* skb->len reflects data in skb as well as all fragments
5244 * skb->data_len reflects only data in fragments
5245 */
5246 if (!skb_has_frag_list(skb))
5247 skb_shinfo(skb)->frag_list = new_frag;
5248
5249 new_frag->next = NULL;
5250
5251 (*last_frag)->next = new_frag;
5252 *last_frag = new_frag;
5253
5254 skb->len += new_frag->len;
5255 skb->data_len += new_frag->len;
5256 skb->truesize += new_frag->truesize;
5257 }
5258
5259 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5260 struct l2cap_ctrl *control)
5261 {
5262 int err = -EINVAL;
5263
5264 switch (control->sar) {
5265 case L2CAP_SAR_UNSEGMENTED:
5266 if (chan->sdu)
5267 break;
5268
5269 err = chan->ops->recv(chan, skb);
5270 break;
5271
5272 case L2CAP_SAR_START:
5273 if (chan->sdu)
5274 break;
5275
5276 chan->sdu_len = get_unaligned_le16(skb->data);
5277 skb_pull(skb, L2CAP_SDULEN_SIZE);
5278
5279 if (chan->sdu_len > chan->imtu) {
5280 err = -EMSGSIZE;
5281 break;
5282 }
5283
5284 if (skb->len >= chan->sdu_len)
5285 break;
5286
5287 chan->sdu = skb;
5288 chan->sdu_last_frag = skb;
5289
5290 skb = NULL;
5291 err = 0;
5292 break;
5293
5294 case L2CAP_SAR_CONTINUE:
5295 if (!chan->sdu)
5296 break;
5297
5298 append_skb_frag(chan->sdu, skb,
5299 &chan->sdu_last_frag);
5300 skb = NULL;
5301
5302 if (chan->sdu->len >= chan->sdu_len)
5303 break;
5304
5305 err = 0;
5306 break;
5307
5308 case L2CAP_SAR_END:
5309 if (!chan->sdu)
5310 break;
5311
5312 append_skb_frag(chan->sdu, skb,
5313 &chan->sdu_last_frag);
5314 skb = NULL;
5315
5316 if (chan->sdu->len != chan->sdu_len)
5317 break;
5318
5319 err = chan->ops->recv(chan, chan->sdu);
5320
5321 if (!err) {
5322 /* Reassembly complete */
5323 chan->sdu = NULL;
5324 chan->sdu_last_frag = NULL;
5325 chan->sdu_len = 0;
5326 }
5327 break;
5328 }
5329
5330 if (err) {
5331 kfree_skb(skb);
5332 kfree_skb(chan->sdu);
5333 chan->sdu = NULL;
5334 chan->sdu_last_frag = NULL;
5335 chan->sdu_len = 0;
5336 }
5337
5338 return err;
5339 }
5340
5341 static int l2cap_resegment(struct l2cap_chan *chan)
5342 {
5343 /* Placeholder */
5344 return 0;
5345 }
5346
5347 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5348 {
5349 u8 event;
5350
5351 if (chan->mode != L2CAP_MODE_ERTM)
5352 return;
5353
5354 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5355 l2cap_tx(chan, NULL, NULL, event);
5356 }
5357
5358 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5359 {
5360 int err = 0;
5361 /* Pass sequential frames to l2cap_reassemble_sdu()
5362 * until a gap is encountered.
5363 */
5364
5365 BT_DBG("chan %p", chan);
5366
5367 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5368 struct sk_buff *skb;
5369 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5370 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5371
5372 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5373
5374 if (!skb)
5375 break;
5376
5377 skb_unlink(skb, &chan->srej_q);
5378 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5379 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5380 if (err)
5381 break;
5382 }
5383
5384 if (skb_queue_empty(&chan->srej_q)) {
5385 chan->rx_state = L2CAP_RX_STATE_RECV;
5386 l2cap_send_ack(chan);
5387 }
5388
5389 return err;
5390 }
5391
5392 static void l2cap_handle_srej(struct l2cap_chan *chan,
5393 struct l2cap_ctrl *control)
5394 {
5395 struct sk_buff *skb;
5396
5397 BT_DBG("chan %p, control %p", chan, control);
5398
5399 if (control->reqseq == chan->next_tx_seq) {
5400 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5401 l2cap_send_disconn_req(chan, ECONNRESET);
5402 return;
5403 }
5404
5405 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5406
5407 if (skb == NULL) {
5408 BT_DBG("Seq %d not available for retransmission",
5409 control->reqseq);
5410 return;
5411 }
5412
5413 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5414 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5415 l2cap_send_disconn_req(chan, ECONNRESET);
5416 return;
5417 }
5418
5419 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5420
5421 if (control->poll) {
5422 l2cap_pass_to_tx(chan, control);
5423
5424 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5425 l2cap_retransmit(chan, control);
5426 l2cap_ertm_send(chan);
5427
5428 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5429 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5430 chan->srej_save_reqseq = control->reqseq;
5431 }
5432 } else {
5433 l2cap_pass_to_tx_fbit(chan, control);
5434
5435 if (control->final) {
5436 if (chan->srej_save_reqseq != control->reqseq ||
5437 !test_and_clear_bit(CONN_SREJ_ACT,
5438 &chan->conn_state))
5439 l2cap_retransmit(chan, control);
5440 } else {
5441 l2cap_retransmit(chan, control);
5442 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5443 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5444 chan->srej_save_reqseq = control->reqseq;
5445 }
5446 }
5447 }
5448 }
5449
5450 static void l2cap_handle_rej(struct l2cap_chan *chan,
5451 struct l2cap_ctrl *control)
5452 {
5453 struct sk_buff *skb;
5454
5455 BT_DBG("chan %p, control %p", chan, control);
5456
5457 if (control->reqseq == chan->next_tx_seq) {
5458 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5459 l2cap_send_disconn_req(chan, ECONNRESET);
5460 return;
5461 }
5462
5463 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5464
5465 if (chan->max_tx && skb &&
5466 bt_cb(skb)->control.retries >= chan->max_tx) {
5467 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5468 l2cap_send_disconn_req(chan, ECONNRESET);
5469 return;
5470 }
5471
5472 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5473
5474 l2cap_pass_to_tx(chan, control);
5475
5476 if (control->final) {
5477 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5478 l2cap_retransmit_all(chan, control);
5479 } else {
5480 l2cap_retransmit_all(chan, control);
5481 l2cap_ertm_send(chan);
5482 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5483 set_bit(CONN_REJ_ACT, &chan->conn_state);
5484 }
5485 }
5486
5487 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5488 {
5489 BT_DBG("chan %p, txseq %d", chan, txseq);
5490
5491 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5492 chan->expected_tx_seq);
5493
5494 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5495 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5496 chan->tx_win) {
5497 /* See notes below regarding "double poll" and
5498 * invalid packets.
5499 */
5500 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5501 BT_DBG("Invalid/Ignore - after SREJ");
5502 return L2CAP_TXSEQ_INVALID_IGNORE;
5503 } else {
5504 BT_DBG("Invalid - in window after SREJ sent");
5505 return L2CAP_TXSEQ_INVALID;
5506 }
5507 }
5508
5509 if (chan->srej_list.head == txseq) {
5510 BT_DBG("Expected SREJ");
5511 return L2CAP_TXSEQ_EXPECTED_SREJ;
5512 }
5513
5514 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5515 BT_DBG("Duplicate SREJ - txseq already stored");
5516 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5517 }
5518
5519 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5520 BT_DBG("Unexpected SREJ - not requested");
5521 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5522 }
5523 }
5524
5525 if (chan->expected_tx_seq == txseq) {
5526 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5527 chan->tx_win) {
5528 BT_DBG("Invalid - txseq outside tx window");
5529 return L2CAP_TXSEQ_INVALID;
5530 } else {
5531 BT_DBG("Expected");
5532 return L2CAP_TXSEQ_EXPECTED;
5533 }
5534 }
5535
5536 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5537 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5538 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5539 return L2CAP_TXSEQ_DUPLICATE;
5540 }
5541
5542 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5543 /* A source of invalid packets is a "double poll" condition,
5544 * where delays cause us to send multiple poll packets. If
5545 * the remote stack receives and processes both polls,
5546 * sequence numbers can wrap around in such a way that a
5547 * resent frame has a sequence number that looks like new data
5548 * with a sequence gap. This would trigger an erroneous SREJ
5549 * request.
5550 *
5551 * Fortunately, this is impossible with a tx window that's
5552 * less than half of the maximum sequence number, which allows
5553 * invalid frames to be safely ignored.
5554 *
5555 * With tx window sizes greater than half of the tx window
5556 * maximum, the frame is invalid and cannot be ignored. This
5557 * causes a disconnect.
5558 */
5559
5560 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5561 BT_DBG("Invalid/Ignore - txseq outside tx window");
5562 return L2CAP_TXSEQ_INVALID_IGNORE;
5563 } else {
5564 BT_DBG("Invalid - txseq outside tx window");
5565 return L2CAP_TXSEQ_INVALID;
5566 }
5567 } else {
5568 BT_DBG("Unexpected - txseq indicates missing frames");
5569 return L2CAP_TXSEQ_UNEXPECTED;
5570 }
5571 }
5572
5573 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5574 struct l2cap_ctrl *control,
5575 struct sk_buff *skb, u8 event)
5576 {
5577 int err = 0;
5578 bool skb_in_use = 0;
5579
5580 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5581 event);
5582
5583 switch (event) {
5584 case L2CAP_EV_RECV_IFRAME:
5585 switch (l2cap_classify_txseq(chan, control->txseq)) {
5586 case L2CAP_TXSEQ_EXPECTED:
5587 l2cap_pass_to_tx(chan, control);
5588
5589 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5590 BT_DBG("Busy, discarding expected seq %d",
5591 control->txseq);
5592 break;
5593 }
5594
5595 chan->expected_tx_seq = __next_seq(chan,
5596 control->txseq);
5597
5598 chan->buffer_seq = chan->expected_tx_seq;
5599 skb_in_use = 1;
5600
5601 err = l2cap_reassemble_sdu(chan, skb, control);
5602 if (err)
5603 break;
5604
5605 if (control->final) {
5606 if (!test_and_clear_bit(CONN_REJ_ACT,
5607 &chan->conn_state)) {
5608 control->final = 0;
5609 l2cap_retransmit_all(chan, control);
5610 l2cap_ertm_send(chan);
5611 }
5612 }
5613
5614 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5615 l2cap_send_ack(chan);
5616 break;
5617 case L2CAP_TXSEQ_UNEXPECTED:
5618 l2cap_pass_to_tx(chan, control);
5619
5620 /* Can't issue SREJ frames in the local busy state.
5621 * Drop this frame, it will be seen as missing
5622 * when local busy is exited.
5623 */
5624 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5625 BT_DBG("Busy, discarding unexpected seq %d",
5626 control->txseq);
5627 break;
5628 }
5629
5630 /* There was a gap in the sequence, so an SREJ
5631 * must be sent for each missing frame. The
5632 * current frame is stored for later use.
5633 */
5634 skb_queue_tail(&chan->srej_q, skb);
5635 skb_in_use = 1;
5636 BT_DBG("Queued %p (queue len %d)", skb,
5637 skb_queue_len(&chan->srej_q));
5638
5639 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5640 l2cap_seq_list_clear(&chan->srej_list);
5641 l2cap_send_srej(chan, control->txseq);
5642
5643 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5644 break;
5645 case L2CAP_TXSEQ_DUPLICATE:
5646 l2cap_pass_to_tx(chan, control);
5647 break;
5648 case L2CAP_TXSEQ_INVALID_IGNORE:
5649 break;
5650 case L2CAP_TXSEQ_INVALID:
5651 default:
5652 l2cap_send_disconn_req(chan, ECONNRESET);
5653 break;
5654 }
5655 break;
5656 case L2CAP_EV_RECV_RR:
5657 l2cap_pass_to_tx(chan, control);
5658 if (control->final) {
5659 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5660
5661 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5662 !__chan_is_moving(chan)) {
5663 control->final = 0;
5664 l2cap_retransmit_all(chan, control);
5665 }
5666
5667 l2cap_ertm_send(chan);
5668 } else if (control->poll) {
5669 l2cap_send_i_or_rr_or_rnr(chan);
5670 } else {
5671 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5672 &chan->conn_state) &&
5673 chan->unacked_frames)
5674 __set_retrans_timer(chan);
5675
5676 l2cap_ertm_send(chan);
5677 }
5678 break;
5679 case L2CAP_EV_RECV_RNR:
5680 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5681 l2cap_pass_to_tx(chan, control);
5682 if (control && control->poll) {
5683 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5684 l2cap_send_rr_or_rnr(chan, 0);
5685 }
5686 __clear_retrans_timer(chan);
5687 l2cap_seq_list_clear(&chan->retrans_list);
5688 break;
5689 case L2CAP_EV_RECV_REJ:
5690 l2cap_handle_rej(chan, control);
5691 break;
5692 case L2CAP_EV_RECV_SREJ:
5693 l2cap_handle_srej(chan, control);
5694 break;
5695 default:
5696 break;
5697 }
5698
5699 if (skb && !skb_in_use) {
5700 BT_DBG("Freeing %p", skb);
5701 kfree_skb(skb);
5702 }
5703
5704 return err;
5705 }
5706
5707 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5708 struct l2cap_ctrl *control,
5709 struct sk_buff *skb, u8 event)
5710 {
5711 int err = 0;
5712 u16 txseq = control->txseq;
5713 bool skb_in_use = 0;
5714
5715 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5716 event);
5717
5718 switch (event) {
5719 case L2CAP_EV_RECV_IFRAME:
5720 switch (l2cap_classify_txseq(chan, txseq)) {
5721 case L2CAP_TXSEQ_EXPECTED:
5722 /* Keep frame for reassembly later */
5723 l2cap_pass_to_tx(chan, control);
5724 skb_queue_tail(&chan->srej_q, skb);
5725 skb_in_use = 1;
5726 BT_DBG("Queued %p (queue len %d)", skb,
5727 skb_queue_len(&chan->srej_q));
5728
5729 chan->expected_tx_seq = __next_seq(chan, txseq);
5730 break;
5731 case L2CAP_TXSEQ_EXPECTED_SREJ:
5732 l2cap_seq_list_pop(&chan->srej_list);
5733
5734 l2cap_pass_to_tx(chan, control);
5735 skb_queue_tail(&chan->srej_q, skb);
5736 skb_in_use = 1;
5737 BT_DBG("Queued %p (queue len %d)", skb,
5738 skb_queue_len(&chan->srej_q));
5739
5740 err = l2cap_rx_queued_iframes(chan);
5741 if (err)
5742 break;
5743
5744 break;
5745 case L2CAP_TXSEQ_UNEXPECTED:
5746 /* Got a frame that can't be reassembled yet.
5747 * Save it for later, and send SREJs to cover
5748 * the missing frames.
5749 */
5750 skb_queue_tail(&chan->srej_q, skb);
5751 skb_in_use = 1;
5752 BT_DBG("Queued %p (queue len %d)", skb,
5753 skb_queue_len(&chan->srej_q));
5754
5755 l2cap_pass_to_tx(chan, control);
5756 l2cap_send_srej(chan, control->txseq);
5757 break;
5758 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
5759 /* This frame was requested with an SREJ, but
5760 * some expected retransmitted frames are
5761 * missing. Request retransmission of missing
5762 * SREJ'd frames.
5763 */
5764 skb_queue_tail(&chan->srej_q, skb);
5765 skb_in_use = 1;
5766 BT_DBG("Queued %p (queue len %d)", skb,
5767 skb_queue_len(&chan->srej_q));
5768
5769 l2cap_pass_to_tx(chan, control);
5770 l2cap_send_srej_list(chan, control->txseq);
5771 break;
5772 case L2CAP_TXSEQ_DUPLICATE_SREJ:
5773 /* We've already queued this frame. Drop this copy. */
5774 l2cap_pass_to_tx(chan, control);
5775 break;
5776 case L2CAP_TXSEQ_DUPLICATE:
5777 /* Expecting a later sequence number, so this frame
5778 * was already received. Ignore it completely.
5779 */
5780 break;
5781 case L2CAP_TXSEQ_INVALID_IGNORE:
5782 break;
5783 case L2CAP_TXSEQ_INVALID:
5784 default:
5785 l2cap_send_disconn_req(chan, ECONNRESET);
5786 break;
5787 }
5788 break;
5789 case L2CAP_EV_RECV_RR:
5790 l2cap_pass_to_tx(chan, control);
5791 if (control->final) {
5792 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5793
5794 if (!test_and_clear_bit(CONN_REJ_ACT,
5795 &chan->conn_state)) {
5796 control->final = 0;
5797 l2cap_retransmit_all(chan, control);
5798 }
5799
5800 l2cap_ertm_send(chan);
5801 } else if (control->poll) {
5802 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5803 &chan->conn_state) &&
5804 chan->unacked_frames) {
5805 __set_retrans_timer(chan);
5806 }
5807
5808 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5809 l2cap_send_srej_tail(chan);
5810 } else {
5811 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5812 &chan->conn_state) &&
5813 chan->unacked_frames)
5814 __set_retrans_timer(chan);
5815
5816 l2cap_send_ack(chan);
5817 }
5818 break;
5819 case L2CAP_EV_RECV_RNR:
5820 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5821 l2cap_pass_to_tx(chan, control);
5822 if (control->poll) {
5823 l2cap_send_srej_tail(chan);
5824 } else {
5825 struct l2cap_ctrl rr_control;
5826 memset(&rr_control, 0, sizeof(rr_control));
5827 rr_control.sframe = 1;
5828 rr_control.super = L2CAP_SUPER_RR;
5829 rr_control.reqseq = chan->buffer_seq;
5830 l2cap_send_sframe(chan, &rr_control);
5831 }
5832
5833 break;
5834 case L2CAP_EV_RECV_REJ:
5835 l2cap_handle_rej(chan, control);
5836 break;
5837 case L2CAP_EV_RECV_SREJ:
5838 l2cap_handle_srej(chan, control);
5839 break;
5840 }
5841
5842 if (skb && !skb_in_use) {
5843 BT_DBG("Freeing %p", skb);
5844 kfree_skb(skb);
5845 }
5846
5847 return err;
5848 }
5849
5850 static int l2cap_finish_move(struct l2cap_chan *chan)
5851 {
5852 BT_DBG("chan %p", chan);
5853
5854 chan->rx_state = L2CAP_RX_STATE_RECV;
5855
5856 if (chan->hs_hcon)
5857 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
5858 else
5859 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
5860
5861 return l2cap_resegment(chan);
5862 }
5863
5864 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
5865 struct l2cap_ctrl *control,
5866 struct sk_buff *skb, u8 event)
5867 {
5868 int err;
5869
5870 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5871 event);
5872
5873 if (!control->poll)
5874 return -EPROTO;
5875
5876 l2cap_process_reqseq(chan, control->reqseq);
5877
5878 if (!skb_queue_empty(&chan->tx_q))
5879 chan->tx_send_head = skb_peek(&chan->tx_q);
5880 else
5881 chan->tx_send_head = NULL;
5882
5883 /* Rewind next_tx_seq to the point expected
5884 * by the receiver.
5885 */
5886 chan->next_tx_seq = control->reqseq;
5887 chan->unacked_frames = 0;
5888
5889 err = l2cap_finish_move(chan);
5890 if (err)
5891 return err;
5892
5893 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5894 l2cap_send_i_or_rr_or_rnr(chan);
5895
5896 if (event == L2CAP_EV_RECV_IFRAME)
5897 return -EPROTO;
5898
5899 return l2cap_rx_state_recv(chan, control, NULL, event);
5900 }
5901
5902 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
5903 struct l2cap_ctrl *control,
5904 struct sk_buff *skb, u8 event)
5905 {
5906 int err;
5907
5908 if (!control->final)
5909 return -EPROTO;
5910
5911 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5912
5913 chan->rx_state = L2CAP_RX_STATE_RECV;
5914 l2cap_process_reqseq(chan, control->reqseq);
5915
5916 if (!skb_queue_empty(&chan->tx_q))
5917 chan->tx_send_head = skb_peek(&chan->tx_q);
5918 else
5919 chan->tx_send_head = NULL;
5920
5921 /* Rewind next_tx_seq to the point expected
5922 * by the receiver.
5923 */
5924 chan->next_tx_seq = control->reqseq;
5925 chan->unacked_frames = 0;
5926
5927 if (chan->hs_hcon)
5928 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
5929 else
5930 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
5931
5932 err = l2cap_resegment(chan);
5933
5934 if (!err)
5935 err = l2cap_rx_state_recv(chan, control, skb, event);
5936
5937 return err;
5938 }
5939
5940 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5941 {
5942 /* Make sure reqseq is for a packet that has been sent but not acked */
5943 u16 unacked;
5944
5945 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5946 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5947 }
5948
5949 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5950 struct sk_buff *skb, u8 event)
5951 {
5952 int err = 0;
5953
5954 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5955 control, skb, event, chan->rx_state);
5956
5957 if (__valid_reqseq(chan, control->reqseq)) {
5958 switch (chan->rx_state) {
5959 case L2CAP_RX_STATE_RECV:
5960 err = l2cap_rx_state_recv(chan, control, skb, event);
5961 break;
5962 case L2CAP_RX_STATE_SREJ_SENT:
5963 err = l2cap_rx_state_srej_sent(chan, control, skb,
5964 event);
5965 break;
5966 case L2CAP_RX_STATE_WAIT_P:
5967 err = l2cap_rx_state_wait_p(chan, control, skb, event);
5968 break;
5969 case L2CAP_RX_STATE_WAIT_F:
5970 err = l2cap_rx_state_wait_f(chan, control, skb, event);
5971 break;
5972 default:
5973 /* shut it down */
5974 break;
5975 }
5976 } else {
5977 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5978 control->reqseq, chan->next_tx_seq,
5979 chan->expected_ack_seq);
5980 l2cap_send_disconn_req(chan, ECONNRESET);
5981 }
5982
5983 return err;
5984 }
5985
5986 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5987 struct sk_buff *skb)
5988 {
5989 int err = 0;
5990
5991 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5992 chan->rx_state);
5993
5994 if (l2cap_classify_txseq(chan, control->txseq) ==
5995 L2CAP_TXSEQ_EXPECTED) {
5996 l2cap_pass_to_tx(chan, control);
5997
5998 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5999 __next_seq(chan, chan->buffer_seq));
6000
6001 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6002
6003 l2cap_reassemble_sdu(chan, skb, control);
6004 } else {
6005 if (chan->sdu) {
6006 kfree_skb(chan->sdu);
6007 chan->sdu = NULL;
6008 }
6009 chan->sdu_last_frag = NULL;
6010 chan->sdu_len = 0;
6011
6012 if (skb) {
6013 BT_DBG("Freeing %p", skb);
6014 kfree_skb(skb);
6015 }
6016 }
6017
6018 chan->last_acked_seq = control->txseq;
6019 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6020
6021 return err;
6022 }
6023
6024 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6025 {
6026 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6027 u16 len;
6028 u8 event;
6029
6030 __unpack_control(chan, skb);
6031
6032 len = skb->len;
6033
6034 /*
6035 * We can just drop the corrupted I-frame here.
6036 * Receiver will miss it and start proper recovery
6037 * procedures and ask for retransmission.
6038 */
6039 if (l2cap_check_fcs(chan, skb))
6040 goto drop;
6041
6042 if (!control->sframe && control->sar == L2CAP_SAR_START)
6043 len -= L2CAP_SDULEN_SIZE;
6044
6045 if (chan->fcs == L2CAP_FCS_CRC16)
6046 len -= L2CAP_FCS_SIZE;
6047
6048 if (len > chan->mps) {
6049 l2cap_send_disconn_req(chan, ECONNRESET);
6050 goto drop;
6051 }
6052
6053 if (!control->sframe) {
6054 int err;
6055
6056 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6057 control->sar, control->reqseq, control->final,
6058 control->txseq);
6059
6060 /* Validate F-bit - F=0 always valid, F=1 only
6061 * valid in TX WAIT_F
6062 */
6063 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6064 goto drop;
6065
6066 if (chan->mode != L2CAP_MODE_STREAMING) {
6067 event = L2CAP_EV_RECV_IFRAME;
6068 err = l2cap_rx(chan, control, skb, event);
6069 } else {
6070 err = l2cap_stream_rx(chan, control, skb);
6071 }
6072
6073 if (err)
6074 l2cap_send_disconn_req(chan, ECONNRESET);
6075 } else {
6076 const u8 rx_func_to_event[4] = {
6077 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6078 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6079 };
6080
6081 /* Only I-frames are expected in streaming mode */
6082 if (chan->mode == L2CAP_MODE_STREAMING)
6083 goto drop;
6084
6085 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6086 control->reqseq, control->final, control->poll,
6087 control->super);
6088
6089 if (len != 0) {
6090 BT_ERR("Trailing bytes: %d in sframe", len);
6091 l2cap_send_disconn_req(chan, ECONNRESET);
6092 goto drop;
6093 }
6094
6095 /* Validate F and P bits */
6096 if (control->final && (control->poll ||
6097 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6098 goto drop;
6099
6100 event = rx_func_to_event[control->super];
6101 if (l2cap_rx(chan, control, skb, event))
6102 l2cap_send_disconn_req(chan, ECONNRESET);
6103 }
6104
6105 return 0;
6106
6107 drop:
6108 kfree_skb(skb);
6109 return 0;
6110 }
6111
6112 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6113 struct sk_buff *skb)
6114 {
6115 struct l2cap_chan *chan;
6116
6117 chan = l2cap_get_chan_by_scid(conn, cid);
6118 if (!chan) {
6119 if (cid == L2CAP_CID_A2MP) {
6120 chan = a2mp_channel_create(conn, skb);
6121 if (!chan) {
6122 kfree_skb(skb);
6123 return;
6124 }
6125
6126 l2cap_chan_lock(chan);
6127 } else {
6128 BT_DBG("unknown cid 0x%4.4x", cid);
6129 /* Drop packet and return */
6130 kfree_skb(skb);
6131 return;
6132 }
6133 }
6134
6135 BT_DBG("chan %p, len %d", chan, skb->len);
6136
6137 if (chan->state != BT_CONNECTED)
6138 goto drop;
6139
6140 switch (chan->mode) {
6141 case L2CAP_MODE_BASIC:
6142 /* If socket recv buffers overflows we drop data here
6143 * which is *bad* because L2CAP has to be reliable.
6144 * But we don't have any other choice. L2CAP doesn't
6145 * provide flow control mechanism. */
6146
6147 if (chan->imtu < skb->len)
6148 goto drop;
6149
6150 if (!chan->ops->recv(chan, skb))
6151 goto done;
6152 break;
6153
6154 case L2CAP_MODE_ERTM:
6155 case L2CAP_MODE_STREAMING:
6156 l2cap_data_rcv(chan, skb);
6157 goto done;
6158
6159 default:
6160 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6161 break;
6162 }
6163
6164 drop:
6165 kfree_skb(skb);
6166
6167 done:
6168 l2cap_chan_unlock(chan);
6169 }
6170
6171 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6172 struct sk_buff *skb)
6173 {
6174 struct l2cap_chan *chan;
6175
6176 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
6177 if (!chan)
6178 goto drop;
6179
6180 BT_DBG("chan %p, len %d", chan, skb->len);
6181
6182 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6183 goto drop;
6184
6185 if (chan->imtu < skb->len)
6186 goto drop;
6187
6188 if (!chan->ops->recv(chan, skb))
6189 return;
6190
6191 drop:
6192 kfree_skb(skb);
6193 }
6194
6195 static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
6196 struct sk_buff *skb)
6197 {
6198 struct l2cap_chan *chan;
6199
6200 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
6201 if (!chan)
6202 goto drop;
6203
6204 BT_DBG("chan %p, len %d", chan, skb->len);
6205
6206 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6207 goto drop;
6208
6209 if (chan->imtu < skb->len)
6210 goto drop;
6211
6212 if (!chan->ops->recv(chan, skb))
6213 return;
6214
6215 drop:
6216 kfree_skb(skb);
6217 }
6218
6219 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6220 {
6221 struct l2cap_hdr *lh = (void *) skb->data;
6222 u16 cid, len;
6223 __le16 psm;
6224
6225 skb_pull(skb, L2CAP_HDR_SIZE);
6226 cid = __le16_to_cpu(lh->cid);
6227 len = __le16_to_cpu(lh->len);
6228
6229 if (len != skb->len) {
6230 kfree_skb(skb);
6231 return;
6232 }
6233
6234 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6235
6236 switch (cid) {
6237 case L2CAP_CID_LE_SIGNALING:
6238 case L2CAP_CID_SIGNALING:
6239 l2cap_sig_channel(conn, skb);
6240 break;
6241
6242 case L2CAP_CID_CONN_LESS:
6243 psm = get_unaligned((__le16 *) skb->data);
6244 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6245 l2cap_conless_channel(conn, psm, skb);
6246 break;
6247
6248 case L2CAP_CID_LE_DATA:
6249 l2cap_att_channel(conn, cid, skb);
6250 break;
6251
6252 case L2CAP_CID_SMP:
6253 if (smp_sig_channel(conn, skb))
6254 l2cap_conn_del(conn->hcon, EACCES);
6255 break;
6256
6257 default:
6258 l2cap_data_channel(conn, cid, skb);
6259 break;
6260 }
6261 }
6262
6263 /* ---- L2CAP interface with lower layer (HCI) ---- */
6264
6265 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6266 {
6267 int exact = 0, lm1 = 0, lm2 = 0;
6268 struct l2cap_chan *c;
6269
6270 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6271
6272 /* Find listening sockets and check their link_mode */
6273 read_lock(&chan_list_lock);
6274 list_for_each_entry(c, &chan_list, global_l) {
6275 struct sock *sk = c->sk;
6276
6277 if (c->state != BT_LISTEN)
6278 continue;
6279
6280 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
6281 lm1 |= HCI_LM_ACCEPT;
6282 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6283 lm1 |= HCI_LM_MASTER;
6284 exact++;
6285 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
6286 lm2 |= HCI_LM_ACCEPT;
6287 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6288 lm2 |= HCI_LM_MASTER;
6289 }
6290 }
6291 read_unlock(&chan_list_lock);
6292
6293 return exact ? lm1 : lm2;
6294 }
6295
6296 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6297 {
6298 struct l2cap_conn *conn;
6299
6300 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6301
6302 if (!status) {
6303 conn = l2cap_conn_add(hcon, status);
6304 if (conn)
6305 l2cap_conn_ready(conn);
6306 } else {
6307 l2cap_conn_del(hcon, bt_to_errno(status));
6308 }
6309 }
6310
6311 int l2cap_disconn_ind(struct hci_conn *hcon)
6312 {
6313 struct l2cap_conn *conn = hcon->l2cap_data;
6314
6315 BT_DBG("hcon %p", hcon);
6316
6317 if (!conn)
6318 return HCI_ERROR_REMOTE_USER_TERM;
6319 return conn->disc_reason;
6320 }
6321
6322 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6323 {
6324 BT_DBG("hcon %p reason %d", hcon, reason);
6325
6326 l2cap_conn_del(hcon, bt_to_errno(reason));
6327 }
6328
6329 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6330 {
6331 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6332 return;
6333
6334 if (encrypt == 0x00) {
6335 if (chan->sec_level == BT_SECURITY_MEDIUM) {
6336 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6337 } else if (chan->sec_level == BT_SECURITY_HIGH)
6338 l2cap_chan_close(chan, ECONNREFUSED);
6339 } else {
6340 if (chan->sec_level == BT_SECURITY_MEDIUM)
6341 __clear_chan_timer(chan);
6342 }
6343 }
6344
6345 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6346 {
6347 struct l2cap_conn *conn = hcon->l2cap_data;
6348 struct l2cap_chan *chan;
6349
6350 if (!conn)
6351 return 0;
6352
6353 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6354
6355 if (hcon->type == LE_LINK) {
6356 if (!status && encrypt)
6357 smp_distribute_keys(conn, 0);
6358 cancel_delayed_work(&conn->security_timer);
6359 }
6360
6361 mutex_lock(&conn->chan_lock);
6362
6363 list_for_each_entry(chan, &conn->chan_l, list) {
6364 l2cap_chan_lock(chan);
6365
6366 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6367 state_to_string(chan->state));
6368
6369 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6370 l2cap_chan_unlock(chan);
6371 continue;
6372 }
6373
6374 if (chan->scid == L2CAP_CID_LE_DATA) {
6375 if (!status && encrypt) {
6376 chan->sec_level = hcon->sec_level;
6377 l2cap_chan_ready(chan);
6378 }
6379
6380 l2cap_chan_unlock(chan);
6381 continue;
6382 }
6383
6384 if (!__l2cap_no_conn_pending(chan)) {
6385 l2cap_chan_unlock(chan);
6386 continue;
6387 }
6388
6389 if (!status && (chan->state == BT_CONNECTED ||
6390 chan->state == BT_CONFIG)) {
6391 struct sock *sk = chan->sk;
6392
6393 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
6394 sk->sk_state_change(sk);
6395
6396 l2cap_check_encryption(chan, encrypt);
6397 l2cap_chan_unlock(chan);
6398 continue;
6399 }
6400
6401 if (chan->state == BT_CONNECT) {
6402 if (!status) {
6403 l2cap_start_connection(chan);
6404 } else {
6405 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6406 }
6407 } else if (chan->state == BT_CONNECT2) {
6408 struct sock *sk = chan->sk;
6409 struct l2cap_conn_rsp rsp;
6410 __u16 res, stat;
6411
6412 lock_sock(sk);
6413
6414 if (!status) {
6415 if (test_bit(BT_SK_DEFER_SETUP,
6416 &bt_sk(sk)->flags)) {
6417 res = L2CAP_CR_PEND;
6418 stat = L2CAP_CS_AUTHOR_PEND;
6419 chan->ops->defer(chan);
6420 } else {
6421 __l2cap_state_change(chan, BT_CONFIG);
6422 res = L2CAP_CR_SUCCESS;
6423 stat = L2CAP_CS_NO_INFO;
6424 }
6425 } else {
6426 __l2cap_state_change(chan, BT_DISCONN);
6427 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6428 res = L2CAP_CR_SEC_BLOCK;
6429 stat = L2CAP_CS_NO_INFO;
6430 }
6431
6432 release_sock(sk);
6433
6434 rsp.scid = cpu_to_le16(chan->dcid);
6435 rsp.dcid = cpu_to_le16(chan->scid);
6436 rsp.result = cpu_to_le16(res);
6437 rsp.status = cpu_to_le16(stat);
6438 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6439 sizeof(rsp), &rsp);
6440
6441 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6442 res == L2CAP_CR_SUCCESS) {
6443 char buf[128];
6444 set_bit(CONF_REQ_SENT, &chan->conf_state);
6445 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6446 L2CAP_CONF_REQ,
6447 l2cap_build_conf_req(chan, buf),
6448 buf);
6449 chan->num_conf_req++;
6450 }
6451 }
6452
6453 l2cap_chan_unlock(chan);
6454 }
6455
6456 mutex_unlock(&conn->chan_lock);
6457
6458 return 0;
6459 }
6460
6461 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6462 {
6463 struct l2cap_conn *conn = hcon->l2cap_data;
6464 struct l2cap_hdr *hdr;
6465 int len;
6466
6467 /* For AMP controller do not create l2cap conn */
6468 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6469 goto drop;
6470
6471 if (!conn)
6472 conn = l2cap_conn_add(hcon, 0);
6473
6474 if (!conn)
6475 goto drop;
6476
6477 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6478
6479 switch (flags) {
6480 case ACL_START:
6481 case ACL_START_NO_FLUSH:
6482 case ACL_COMPLETE:
6483 if (conn->rx_len) {
6484 BT_ERR("Unexpected start frame (len %d)", skb->len);
6485 kfree_skb(conn->rx_skb);
6486 conn->rx_skb = NULL;
6487 conn->rx_len = 0;
6488 l2cap_conn_unreliable(conn, ECOMM);
6489 }
6490
6491 /* Start fragment always begin with Basic L2CAP header */
6492 if (skb->len < L2CAP_HDR_SIZE) {
6493 BT_ERR("Frame is too short (len %d)", skb->len);
6494 l2cap_conn_unreliable(conn, ECOMM);
6495 goto drop;
6496 }
6497
6498 hdr = (struct l2cap_hdr *) skb->data;
6499 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6500
6501 if (len == skb->len) {
6502 /* Complete frame received */
6503 l2cap_recv_frame(conn, skb);
6504 return 0;
6505 }
6506
6507 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6508
6509 if (skb->len > len) {
6510 BT_ERR("Frame is too long (len %d, expected len %d)",
6511 skb->len, len);
6512 l2cap_conn_unreliable(conn, ECOMM);
6513 goto drop;
6514 }
6515
6516 /* Allocate skb for the complete frame (with header) */
6517 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6518 if (!conn->rx_skb)
6519 goto drop;
6520
6521 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6522 skb->len);
6523 conn->rx_len = len - skb->len;
6524 break;
6525
6526 case ACL_CONT:
6527 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6528
6529 if (!conn->rx_len) {
6530 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6531 l2cap_conn_unreliable(conn, ECOMM);
6532 goto drop;
6533 }
6534
6535 if (skb->len > conn->rx_len) {
6536 BT_ERR("Fragment is too long (len %d, expected %d)",
6537 skb->len, conn->rx_len);
6538 kfree_skb(conn->rx_skb);
6539 conn->rx_skb = NULL;
6540 conn->rx_len = 0;
6541 l2cap_conn_unreliable(conn, ECOMM);
6542 goto drop;
6543 }
6544
6545 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6546 skb->len);
6547 conn->rx_len -= skb->len;
6548
6549 if (!conn->rx_len) {
6550 /* Complete frame received */
6551 l2cap_recv_frame(conn, conn->rx_skb);
6552 conn->rx_skb = NULL;
6553 }
6554 break;
6555 }
6556
6557 drop:
6558 kfree_skb(skb);
6559 return 0;
6560 }
6561
6562 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6563 {
6564 struct l2cap_chan *c;
6565
6566 read_lock(&chan_list_lock);
6567
6568 list_for_each_entry(c, &chan_list, global_l) {
6569 struct sock *sk = c->sk;
6570
6571 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6572 &bt_sk(sk)->src, &bt_sk(sk)->dst,
6573 c->state, __le16_to_cpu(c->psm),
6574 c->scid, c->dcid, c->imtu, c->omtu,
6575 c->sec_level, c->mode);
6576 }
6577
6578 read_unlock(&chan_list_lock);
6579
6580 return 0;
6581 }
6582
6583 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6584 {
6585 return single_open(file, l2cap_debugfs_show, inode->i_private);
6586 }
6587
6588 static const struct file_operations l2cap_debugfs_fops = {
6589 .open = l2cap_debugfs_open,
6590 .read = seq_read,
6591 .llseek = seq_lseek,
6592 .release = single_release,
6593 };
6594
6595 static struct dentry *l2cap_debugfs;
6596
6597 int __init l2cap_init(void)
6598 {
6599 int err;
6600
6601 err = l2cap_init_sockets();
6602 if (err < 0)
6603 return err;
6604
6605 if (bt_debugfs) {
6606 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6607 NULL, &l2cap_debugfs_fops);
6608 if (!l2cap_debugfs)
6609 BT_ERR("Failed to create L2CAP debug file");
6610 }
6611
6612 return 0;
6613 }
6614
6615 void l2cap_exit(void)
6616 {
6617 debugfs_remove(l2cap_debugfs);
6618 l2cap_cleanup_sockets();
6619 }
6620
6621 module_param(disable_ertm, bool, 0644);
6622 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");