Merge branch 'for-linville' of git://github.com/kvalo/ath6kl
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41 #include <net/bluetooth/amp.h>
42
43 bool disable_ertm;
44
45 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
46 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
47
48 static LIST_HEAD(chan_list);
49 static DEFINE_RWLOCK(chan_list_lock);
50
51 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
52 u8 code, u8 ident, u16 dlen, void *data);
53 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
54 void *data);
55 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
56 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
57 struct l2cap_chan *chan, int err);
58
59 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
60 struct sk_buff_head *skbs, u8 event);
61
62 /* ---- L2CAP channels ---- */
63
64 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
65 u16 cid)
66 {
67 struct l2cap_chan *c;
68
69 list_for_each_entry(c, &conn->chan_l, list) {
70 if (c->dcid == cid)
71 return c;
72 }
73 return NULL;
74 }
75
76 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
77 u16 cid)
78 {
79 struct l2cap_chan *c;
80
81 list_for_each_entry(c, &conn->chan_l, list) {
82 if (c->scid == cid)
83 return c;
84 }
85 return NULL;
86 }
87
88 /* Find channel with given SCID.
89 * Returns locked channel. */
90 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
91 u16 cid)
92 {
93 struct l2cap_chan *c;
94
95 mutex_lock(&conn->chan_lock);
96 c = __l2cap_get_chan_by_scid(conn, cid);
97 if (c)
98 l2cap_chan_lock(c);
99 mutex_unlock(&conn->chan_lock);
100
101 return c;
102 }
103
104 /* Find channel with given DCID.
105 * Returns locked channel.
106 */
107 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
108 u16 cid)
109 {
110 struct l2cap_chan *c;
111
112 mutex_lock(&conn->chan_lock);
113 c = __l2cap_get_chan_by_dcid(conn, cid);
114 if (c)
115 l2cap_chan_lock(c);
116 mutex_unlock(&conn->chan_lock);
117
118 return c;
119 }
120
121 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
122 u8 ident)
123 {
124 struct l2cap_chan *c;
125
126 list_for_each_entry(c, &conn->chan_l, list) {
127 if (c->ident == ident)
128 return c;
129 }
130 return NULL;
131 }
132
133 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
134 u8 ident)
135 {
136 struct l2cap_chan *c;
137
138 mutex_lock(&conn->chan_lock);
139 c = __l2cap_get_chan_by_ident(conn, ident);
140 if (c)
141 l2cap_chan_lock(c);
142 mutex_unlock(&conn->chan_lock);
143
144 return c;
145 }
146
147 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
148 {
149 struct l2cap_chan *c;
150
151 list_for_each_entry(c, &chan_list, global_l) {
152 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
153 return c;
154 }
155 return NULL;
156 }
157
158 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
159 {
160 int err;
161
162 write_lock(&chan_list_lock);
163
164 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
165 err = -EADDRINUSE;
166 goto done;
167 }
168
169 if (psm) {
170 chan->psm = psm;
171 chan->sport = psm;
172 err = 0;
173 } else {
174 u16 p;
175
176 err = -EINVAL;
177 for (p = 0x1001; p < 0x1100; p += 2)
178 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
179 chan->psm = cpu_to_le16(p);
180 chan->sport = cpu_to_le16(p);
181 err = 0;
182 break;
183 }
184 }
185
186 done:
187 write_unlock(&chan_list_lock);
188 return err;
189 }
190
191 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
192 {
193 write_lock(&chan_list_lock);
194
195 chan->scid = scid;
196
197 write_unlock(&chan_list_lock);
198
199 return 0;
200 }
201
202 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
203 {
204 u16 cid = L2CAP_CID_DYN_START;
205
206 for (; cid < L2CAP_CID_DYN_END; cid++) {
207 if (!__l2cap_get_chan_by_scid(conn, cid))
208 return cid;
209 }
210
211 return 0;
212 }
213
214 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
215 {
216 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
217 state_to_string(state));
218
219 chan->state = state;
220 chan->ops->state_change(chan, state);
221 }
222
223 static void l2cap_state_change(struct l2cap_chan *chan, int state)
224 {
225 struct sock *sk = chan->sk;
226
227 lock_sock(sk);
228 __l2cap_state_change(chan, state);
229 release_sock(sk);
230 }
231
232 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
233 {
234 struct sock *sk = chan->sk;
235
236 sk->sk_err = err;
237 }
238
239 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
240 {
241 struct sock *sk = chan->sk;
242
243 lock_sock(sk);
244 __l2cap_chan_set_err(chan, err);
245 release_sock(sk);
246 }
247
248 static void __set_retrans_timer(struct l2cap_chan *chan)
249 {
250 if (!delayed_work_pending(&chan->monitor_timer) &&
251 chan->retrans_timeout) {
252 l2cap_set_timer(chan, &chan->retrans_timer,
253 msecs_to_jiffies(chan->retrans_timeout));
254 }
255 }
256
257 static void __set_monitor_timer(struct l2cap_chan *chan)
258 {
259 __clear_retrans_timer(chan);
260 if (chan->monitor_timeout) {
261 l2cap_set_timer(chan, &chan->monitor_timer,
262 msecs_to_jiffies(chan->monitor_timeout));
263 }
264 }
265
266 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
267 u16 seq)
268 {
269 struct sk_buff *skb;
270
271 skb_queue_walk(head, skb) {
272 if (bt_cb(skb)->control.txseq == seq)
273 return skb;
274 }
275
276 return NULL;
277 }
278
279 /* ---- L2CAP sequence number lists ---- */
280
281 /* For ERTM, ordered lists of sequence numbers must be tracked for
282 * SREJ requests that are received and for frames that are to be
283 * retransmitted. These seq_list functions implement a singly-linked
284 * list in an array, where membership in the list can also be checked
285 * in constant time. Items can also be added to the tail of the list
286 * and removed from the head in constant time, without further memory
287 * allocs or frees.
288 */
289
290 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
291 {
292 size_t alloc_size, i;
293
294 /* Allocated size is a power of 2 to map sequence numbers
295 * (which may be up to 14 bits) in to a smaller array that is
296 * sized for the negotiated ERTM transmit windows.
297 */
298 alloc_size = roundup_pow_of_two(size);
299
300 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
301 if (!seq_list->list)
302 return -ENOMEM;
303
304 seq_list->mask = alloc_size - 1;
305 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
306 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
307 for (i = 0; i < alloc_size; i++)
308 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
309
310 return 0;
311 }
312
313 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
314 {
315 kfree(seq_list->list);
316 }
317
318 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
319 u16 seq)
320 {
321 /* Constant-time check for list membership */
322 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
323 }
324
325 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
326 {
327 u16 mask = seq_list->mask;
328
329 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
330 /* In case someone tries to pop the head of an empty list */
331 return L2CAP_SEQ_LIST_CLEAR;
332 } else if (seq_list->head == seq) {
333 /* Head can be removed in constant time */
334 seq_list->head = seq_list->list[seq & mask];
335 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
336
337 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
338 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
339 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
340 }
341 } else {
342 /* Walk the list to find the sequence number */
343 u16 prev = seq_list->head;
344 while (seq_list->list[prev & mask] != seq) {
345 prev = seq_list->list[prev & mask];
346 if (prev == L2CAP_SEQ_LIST_TAIL)
347 return L2CAP_SEQ_LIST_CLEAR;
348 }
349
350 /* Unlink the number from the list and clear it */
351 seq_list->list[prev & mask] = seq_list->list[seq & mask];
352 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
353 if (seq_list->tail == seq)
354 seq_list->tail = prev;
355 }
356 return seq;
357 }
358
359 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
360 {
361 /* Remove the head in constant time */
362 return l2cap_seq_list_remove(seq_list, seq_list->head);
363 }
364
365 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
366 {
367 u16 i;
368
369 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
370 return;
371
372 for (i = 0; i <= seq_list->mask; i++)
373 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
374
375 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
376 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
377 }
378
379 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
380 {
381 u16 mask = seq_list->mask;
382
383 /* All appends happen in constant time */
384
385 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
386 return;
387
388 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
389 seq_list->head = seq;
390 else
391 seq_list->list[seq_list->tail & mask] = seq;
392
393 seq_list->tail = seq;
394 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
395 }
396
397 static void l2cap_chan_timeout(struct work_struct *work)
398 {
399 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
400 chan_timer.work);
401 struct l2cap_conn *conn = chan->conn;
402 int reason;
403
404 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
405
406 mutex_lock(&conn->chan_lock);
407 l2cap_chan_lock(chan);
408
409 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
410 reason = ECONNREFUSED;
411 else if (chan->state == BT_CONNECT &&
412 chan->sec_level != BT_SECURITY_SDP)
413 reason = ECONNREFUSED;
414 else
415 reason = ETIMEDOUT;
416
417 l2cap_chan_close(chan, reason);
418
419 l2cap_chan_unlock(chan);
420
421 chan->ops->close(chan);
422 mutex_unlock(&conn->chan_lock);
423
424 l2cap_chan_put(chan);
425 }
426
427 struct l2cap_chan *l2cap_chan_create(void)
428 {
429 struct l2cap_chan *chan;
430
431 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
432 if (!chan)
433 return NULL;
434
435 mutex_init(&chan->lock);
436
437 write_lock(&chan_list_lock);
438 list_add(&chan->global_l, &chan_list);
439 write_unlock(&chan_list_lock);
440
441 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
442
443 chan->state = BT_OPEN;
444
445 kref_init(&chan->kref);
446
447 /* This flag is cleared in l2cap_chan_ready() */
448 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
449
450 BT_DBG("chan %p", chan);
451
452 return chan;
453 }
454
455 static void l2cap_chan_destroy(struct kref *kref)
456 {
457 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
458
459 BT_DBG("chan %p", chan);
460
461 write_lock(&chan_list_lock);
462 list_del(&chan->global_l);
463 write_unlock(&chan_list_lock);
464
465 kfree(chan);
466 }
467
468 void l2cap_chan_hold(struct l2cap_chan *c)
469 {
470 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
471
472 kref_get(&c->kref);
473 }
474
475 void l2cap_chan_put(struct l2cap_chan *c)
476 {
477 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
478
479 kref_put(&c->kref, l2cap_chan_destroy);
480 }
481
482 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
483 {
484 chan->fcs = L2CAP_FCS_CRC16;
485 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
486 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
487 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
488 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
489 chan->sec_level = BT_SECURITY_LOW;
490
491 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
492 }
493
494 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
495 {
496 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
497 __le16_to_cpu(chan->psm), chan->dcid);
498
499 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
500
501 chan->conn = conn;
502
503 switch (chan->chan_type) {
504 case L2CAP_CHAN_CONN_ORIENTED:
505 if (conn->hcon->type == LE_LINK) {
506 /* LE connection */
507 chan->omtu = L2CAP_DEFAULT_MTU;
508 chan->scid = L2CAP_CID_LE_DATA;
509 chan->dcid = L2CAP_CID_LE_DATA;
510 } else {
511 /* Alloc CID for connection-oriented socket */
512 chan->scid = l2cap_alloc_cid(conn);
513 chan->omtu = L2CAP_DEFAULT_MTU;
514 }
515 break;
516
517 case L2CAP_CHAN_CONN_LESS:
518 /* Connectionless socket */
519 chan->scid = L2CAP_CID_CONN_LESS;
520 chan->dcid = L2CAP_CID_CONN_LESS;
521 chan->omtu = L2CAP_DEFAULT_MTU;
522 break;
523
524 case L2CAP_CHAN_CONN_FIX_A2MP:
525 chan->scid = L2CAP_CID_A2MP;
526 chan->dcid = L2CAP_CID_A2MP;
527 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
528 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
529 break;
530
531 default:
532 /* Raw socket can send/recv signalling messages only */
533 chan->scid = L2CAP_CID_SIGNALING;
534 chan->dcid = L2CAP_CID_SIGNALING;
535 chan->omtu = L2CAP_DEFAULT_MTU;
536 }
537
538 chan->local_id = L2CAP_BESTEFFORT_ID;
539 chan->local_stype = L2CAP_SERV_BESTEFFORT;
540 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
541 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
542 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
543 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
544
545 l2cap_chan_hold(chan);
546
547 list_add(&chan->list, &conn->chan_l);
548 }
549
550 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
551 {
552 mutex_lock(&conn->chan_lock);
553 __l2cap_chan_add(conn, chan);
554 mutex_unlock(&conn->chan_lock);
555 }
556
557 void l2cap_chan_del(struct l2cap_chan *chan, int err)
558 {
559 struct l2cap_conn *conn = chan->conn;
560
561 __clear_chan_timer(chan);
562
563 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
564
565 if (conn) {
566 struct amp_mgr *mgr = conn->hcon->amp_mgr;
567 /* Delete from channel list */
568 list_del(&chan->list);
569
570 l2cap_chan_put(chan);
571
572 chan->conn = NULL;
573
574 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
575 hci_conn_put(conn->hcon);
576
577 if (mgr && mgr->bredr_chan == chan)
578 mgr->bredr_chan = NULL;
579 }
580
581 if (chan->hs_hchan) {
582 struct hci_chan *hs_hchan = chan->hs_hchan;
583
584 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
585 amp_disconnect_logical_link(hs_hchan);
586 }
587
588 chan->ops->teardown(chan, err);
589
590 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
591 return;
592
593 switch(chan->mode) {
594 case L2CAP_MODE_BASIC:
595 break;
596
597 case L2CAP_MODE_ERTM:
598 __clear_retrans_timer(chan);
599 __clear_monitor_timer(chan);
600 __clear_ack_timer(chan);
601
602 skb_queue_purge(&chan->srej_q);
603
604 l2cap_seq_list_free(&chan->srej_list);
605 l2cap_seq_list_free(&chan->retrans_list);
606
607 /* fall through */
608
609 case L2CAP_MODE_STREAMING:
610 skb_queue_purge(&chan->tx_q);
611 break;
612 }
613
614 return;
615 }
616
617 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
618 {
619 struct l2cap_conn *conn = chan->conn;
620 struct sock *sk = chan->sk;
621
622 BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
623 sk);
624
625 switch (chan->state) {
626 case BT_LISTEN:
627 chan->ops->teardown(chan, 0);
628 break;
629
630 case BT_CONNECTED:
631 case BT_CONFIG:
632 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
633 conn->hcon->type == ACL_LINK) {
634 __set_chan_timer(chan, sk->sk_sndtimeo);
635 l2cap_send_disconn_req(conn, chan, reason);
636 } else
637 l2cap_chan_del(chan, reason);
638 break;
639
640 case BT_CONNECT2:
641 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
642 conn->hcon->type == ACL_LINK) {
643 struct l2cap_conn_rsp rsp;
644 __u16 result;
645
646 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
647 result = L2CAP_CR_SEC_BLOCK;
648 else
649 result = L2CAP_CR_BAD_PSM;
650 l2cap_state_change(chan, BT_DISCONN);
651
652 rsp.scid = cpu_to_le16(chan->dcid);
653 rsp.dcid = cpu_to_le16(chan->scid);
654 rsp.result = cpu_to_le16(result);
655 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
656 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
657 sizeof(rsp), &rsp);
658 }
659
660 l2cap_chan_del(chan, reason);
661 break;
662
663 case BT_CONNECT:
664 case BT_DISCONN:
665 l2cap_chan_del(chan, reason);
666 break;
667
668 default:
669 chan->ops->teardown(chan, 0);
670 break;
671 }
672 }
673
674 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
675 {
676 if (chan->chan_type == L2CAP_CHAN_RAW) {
677 switch (chan->sec_level) {
678 case BT_SECURITY_HIGH:
679 return HCI_AT_DEDICATED_BONDING_MITM;
680 case BT_SECURITY_MEDIUM:
681 return HCI_AT_DEDICATED_BONDING;
682 default:
683 return HCI_AT_NO_BONDING;
684 }
685 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
686 if (chan->sec_level == BT_SECURITY_LOW)
687 chan->sec_level = BT_SECURITY_SDP;
688
689 if (chan->sec_level == BT_SECURITY_HIGH)
690 return HCI_AT_NO_BONDING_MITM;
691 else
692 return HCI_AT_NO_BONDING;
693 } else {
694 switch (chan->sec_level) {
695 case BT_SECURITY_HIGH:
696 return HCI_AT_GENERAL_BONDING_MITM;
697 case BT_SECURITY_MEDIUM:
698 return HCI_AT_GENERAL_BONDING;
699 default:
700 return HCI_AT_NO_BONDING;
701 }
702 }
703 }
704
705 /* Service level security */
706 int l2cap_chan_check_security(struct l2cap_chan *chan)
707 {
708 struct l2cap_conn *conn = chan->conn;
709 __u8 auth_type;
710
711 auth_type = l2cap_get_auth_type(chan);
712
713 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
714 }
715
716 static u8 l2cap_get_ident(struct l2cap_conn *conn)
717 {
718 u8 id;
719
720 /* Get next available identificator.
721 * 1 - 128 are used by kernel.
722 * 129 - 199 are reserved.
723 * 200 - 254 are used by utilities like l2ping, etc.
724 */
725
726 spin_lock(&conn->lock);
727
728 if (++conn->tx_ident > 128)
729 conn->tx_ident = 1;
730
731 id = conn->tx_ident;
732
733 spin_unlock(&conn->lock);
734
735 return id;
736 }
737
738 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
739 void *data)
740 {
741 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
742 u8 flags;
743
744 BT_DBG("code 0x%2.2x", code);
745
746 if (!skb)
747 return;
748
749 if (lmp_no_flush_capable(conn->hcon->hdev))
750 flags = ACL_START_NO_FLUSH;
751 else
752 flags = ACL_START;
753
754 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
755 skb->priority = HCI_PRIO_MAX;
756
757 hci_send_acl(conn->hchan, skb, flags);
758 }
759
760 static bool __chan_is_moving(struct l2cap_chan *chan)
761 {
762 return chan->move_state != L2CAP_MOVE_STABLE &&
763 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
764 }
765
766 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
767 {
768 struct hci_conn *hcon = chan->conn->hcon;
769 u16 flags;
770
771 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
772 skb->priority);
773
774 if (chan->hs_hcon && !__chan_is_moving(chan)) {
775 if (chan->hs_hchan)
776 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
777 else
778 kfree_skb(skb);
779
780 return;
781 }
782
783 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
784 lmp_no_flush_capable(hcon->hdev))
785 flags = ACL_START_NO_FLUSH;
786 else
787 flags = ACL_START;
788
789 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
790 hci_send_acl(chan->conn->hchan, skb, flags);
791 }
792
793 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
794 {
795 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
796 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
797
798 if (enh & L2CAP_CTRL_FRAME_TYPE) {
799 /* S-Frame */
800 control->sframe = 1;
801 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
802 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
803
804 control->sar = 0;
805 control->txseq = 0;
806 } else {
807 /* I-Frame */
808 control->sframe = 0;
809 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
810 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
811
812 control->poll = 0;
813 control->super = 0;
814 }
815 }
816
817 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
818 {
819 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
820 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
821
822 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
823 /* S-Frame */
824 control->sframe = 1;
825 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
826 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
827
828 control->sar = 0;
829 control->txseq = 0;
830 } else {
831 /* I-Frame */
832 control->sframe = 0;
833 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
834 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
835
836 control->poll = 0;
837 control->super = 0;
838 }
839 }
840
841 static inline void __unpack_control(struct l2cap_chan *chan,
842 struct sk_buff *skb)
843 {
844 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
845 __unpack_extended_control(get_unaligned_le32(skb->data),
846 &bt_cb(skb)->control);
847 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
848 } else {
849 __unpack_enhanced_control(get_unaligned_le16(skb->data),
850 &bt_cb(skb)->control);
851 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
852 }
853 }
854
855 static u32 __pack_extended_control(struct l2cap_ctrl *control)
856 {
857 u32 packed;
858
859 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
860 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
861
862 if (control->sframe) {
863 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
864 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
865 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
866 } else {
867 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
868 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
869 }
870
871 return packed;
872 }
873
874 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
875 {
876 u16 packed;
877
878 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
879 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
880
881 if (control->sframe) {
882 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
883 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
884 packed |= L2CAP_CTRL_FRAME_TYPE;
885 } else {
886 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
887 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
888 }
889
890 return packed;
891 }
892
893 static inline void __pack_control(struct l2cap_chan *chan,
894 struct l2cap_ctrl *control,
895 struct sk_buff *skb)
896 {
897 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
898 put_unaligned_le32(__pack_extended_control(control),
899 skb->data + L2CAP_HDR_SIZE);
900 } else {
901 put_unaligned_le16(__pack_enhanced_control(control),
902 skb->data + L2CAP_HDR_SIZE);
903 }
904 }
905
906 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
907 {
908 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
909 return L2CAP_EXT_HDR_SIZE;
910 else
911 return L2CAP_ENH_HDR_SIZE;
912 }
913
914 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
915 u32 control)
916 {
917 struct sk_buff *skb;
918 struct l2cap_hdr *lh;
919 int hlen = __ertm_hdr_size(chan);
920
921 if (chan->fcs == L2CAP_FCS_CRC16)
922 hlen += L2CAP_FCS_SIZE;
923
924 skb = bt_skb_alloc(hlen, GFP_KERNEL);
925
926 if (!skb)
927 return ERR_PTR(-ENOMEM);
928
929 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
930 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
931 lh->cid = cpu_to_le16(chan->dcid);
932
933 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
934 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
935 else
936 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
937
938 if (chan->fcs == L2CAP_FCS_CRC16) {
939 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
940 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
941 }
942
943 skb->priority = HCI_PRIO_MAX;
944 return skb;
945 }
946
947 static void l2cap_send_sframe(struct l2cap_chan *chan,
948 struct l2cap_ctrl *control)
949 {
950 struct sk_buff *skb;
951 u32 control_field;
952
953 BT_DBG("chan %p, control %p", chan, control);
954
955 if (!control->sframe)
956 return;
957
958 if (__chan_is_moving(chan))
959 return;
960
961 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
962 !control->poll)
963 control->final = 1;
964
965 if (control->super == L2CAP_SUPER_RR)
966 clear_bit(CONN_RNR_SENT, &chan->conn_state);
967 else if (control->super == L2CAP_SUPER_RNR)
968 set_bit(CONN_RNR_SENT, &chan->conn_state);
969
970 if (control->super != L2CAP_SUPER_SREJ) {
971 chan->last_acked_seq = control->reqseq;
972 __clear_ack_timer(chan);
973 }
974
975 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
976 control->final, control->poll, control->super);
977
978 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
979 control_field = __pack_extended_control(control);
980 else
981 control_field = __pack_enhanced_control(control);
982
983 skb = l2cap_create_sframe_pdu(chan, control_field);
984 if (!IS_ERR(skb))
985 l2cap_do_send(chan, skb);
986 }
987
988 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
989 {
990 struct l2cap_ctrl control;
991
992 BT_DBG("chan %p, poll %d", chan, poll);
993
994 memset(&control, 0, sizeof(control));
995 control.sframe = 1;
996 control.poll = poll;
997
998 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
999 control.super = L2CAP_SUPER_RNR;
1000 else
1001 control.super = L2CAP_SUPER_RR;
1002
1003 control.reqseq = chan->buffer_seq;
1004 l2cap_send_sframe(chan, &control);
1005 }
1006
1007 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1008 {
1009 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1010 }
1011
1012 static bool __amp_capable(struct l2cap_chan *chan)
1013 {
1014 struct l2cap_conn *conn = chan->conn;
1015
1016 if (enable_hs &&
1017 chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
1018 conn->fixed_chan_mask & L2CAP_FC_A2MP)
1019 return true;
1020 else
1021 return false;
1022 }
1023
1024 static bool l2cap_check_efs(struct l2cap_chan *chan)
1025 {
1026 /* Check EFS parameters */
1027 return true;
1028 }
1029
1030 void l2cap_send_conn_req(struct l2cap_chan *chan)
1031 {
1032 struct l2cap_conn *conn = chan->conn;
1033 struct l2cap_conn_req req;
1034
1035 req.scid = cpu_to_le16(chan->scid);
1036 req.psm = chan->psm;
1037
1038 chan->ident = l2cap_get_ident(conn);
1039
1040 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1041
1042 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1043 }
1044
1045 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1046 {
1047 struct l2cap_create_chan_req req;
1048 req.scid = cpu_to_le16(chan->scid);
1049 req.psm = chan->psm;
1050 req.amp_id = amp_id;
1051
1052 chan->ident = l2cap_get_ident(chan->conn);
1053
1054 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1055 sizeof(req), &req);
1056 }
1057
1058 static void l2cap_move_setup(struct l2cap_chan *chan)
1059 {
1060 struct sk_buff *skb;
1061
1062 BT_DBG("chan %p", chan);
1063
1064 if (chan->mode != L2CAP_MODE_ERTM)
1065 return;
1066
1067 __clear_retrans_timer(chan);
1068 __clear_monitor_timer(chan);
1069 __clear_ack_timer(chan);
1070
1071 chan->retry_count = 0;
1072 skb_queue_walk(&chan->tx_q, skb) {
1073 if (bt_cb(skb)->control.retries)
1074 bt_cb(skb)->control.retries = 1;
1075 else
1076 break;
1077 }
1078
1079 chan->expected_tx_seq = chan->buffer_seq;
1080
1081 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1082 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1083 l2cap_seq_list_clear(&chan->retrans_list);
1084 l2cap_seq_list_clear(&chan->srej_list);
1085 skb_queue_purge(&chan->srej_q);
1086
1087 chan->tx_state = L2CAP_TX_STATE_XMIT;
1088 chan->rx_state = L2CAP_RX_STATE_MOVE;
1089
1090 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1091 }
1092
1093 static void l2cap_move_done(struct l2cap_chan *chan)
1094 {
1095 u8 move_role = chan->move_role;
1096 BT_DBG("chan %p", chan);
1097
1098 chan->move_state = L2CAP_MOVE_STABLE;
1099 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1100
1101 if (chan->mode != L2CAP_MODE_ERTM)
1102 return;
1103
1104 switch (move_role) {
1105 case L2CAP_MOVE_ROLE_INITIATOR:
1106 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1107 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1108 break;
1109 case L2CAP_MOVE_ROLE_RESPONDER:
1110 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1111 break;
1112 }
1113 }
1114
1115 static void l2cap_chan_ready(struct l2cap_chan *chan)
1116 {
1117 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1118 chan->conf_state = 0;
1119 __clear_chan_timer(chan);
1120
1121 chan->state = BT_CONNECTED;
1122
1123 chan->ops->ready(chan);
1124 }
1125
1126 static void l2cap_start_connection(struct l2cap_chan *chan)
1127 {
1128 if (__amp_capable(chan)) {
1129 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1130 a2mp_discover_amp(chan);
1131 } else {
1132 l2cap_send_conn_req(chan);
1133 }
1134 }
1135
1136 static void l2cap_do_start(struct l2cap_chan *chan)
1137 {
1138 struct l2cap_conn *conn = chan->conn;
1139
1140 if (conn->hcon->type == LE_LINK) {
1141 l2cap_chan_ready(chan);
1142 return;
1143 }
1144
1145 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1146 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1147 return;
1148
1149 if (l2cap_chan_check_security(chan) &&
1150 __l2cap_no_conn_pending(chan)) {
1151 l2cap_start_connection(chan);
1152 }
1153 } else {
1154 struct l2cap_info_req req;
1155 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1156
1157 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1158 conn->info_ident = l2cap_get_ident(conn);
1159
1160 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1161
1162 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1163 sizeof(req), &req);
1164 }
1165 }
1166
1167 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1168 {
1169 u32 local_feat_mask = l2cap_feat_mask;
1170 if (!disable_ertm)
1171 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1172
1173 switch (mode) {
1174 case L2CAP_MODE_ERTM:
1175 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1176 case L2CAP_MODE_STREAMING:
1177 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1178 default:
1179 return 0x00;
1180 }
1181 }
1182
1183 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
1184 struct l2cap_chan *chan, int err)
1185 {
1186 struct sock *sk = chan->sk;
1187 struct l2cap_disconn_req req;
1188
1189 if (!conn)
1190 return;
1191
1192 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1193 __clear_retrans_timer(chan);
1194 __clear_monitor_timer(chan);
1195 __clear_ack_timer(chan);
1196 }
1197
1198 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1199 l2cap_state_change(chan, BT_DISCONN);
1200 return;
1201 }
1202
1203 req.dcid = cpu_to_le16(chan->dcid);
1204 req.scid = cpu_to_le16(chan->scid);
1205 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1206 sizeof(req), &req);
1207
1208 lock_sock(sk);
1209 __l2cap_state_change(chan, BT_DISCONN);
1210 __l2cap_chan_set_err(chan, err);
1211 release_sock(sk);
1212 }
1213
1214 /* ---- L2CAP connections ---- */
1215 static void l2cap_conn_start(struct l2cap_conn *conn)
1216 {
1217 struct l2cap_chan *chan, *tmp;
1218
1219 BT_DBG("conn %p", conn);
1220
1221 mutex_lock(&conn->chan_lock);
1222
1223 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1224 struct sock *sk = chan->sk;
1225
1226 l2cap_chan_lock(chan);
1227
1228 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1229 l2cap_chan_unlock(chan);
1230 continue;
1231 }
1232
1233 if (chan->state == BT_CONNECT) {
1234 if (!l2cap_chan_check_security(chan) ||
1235 !__l2cap_no_conn_pending(chan)) {
1236 l2cap_chan_unlock(chan);
1237 continue;
1238 }
1239
1240 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1241 && test_bit(CONF_STATE2_DEVICE,
1242 &chan->conf_state)) {
1243 l2cap_chan_close(chan, ECONNRESET);
1244 l2cap_chan_unlock(chan);
1245 continue;
1246 }
1247
1248 l2cap_start_connection(chan);
1249
1250 } else if (chan->state == BT_CONNECT2) {
1251 struct l2cap_conn_rsp rsp;
1252 char buf[128];
1253 rsp.scid = cpu_to_le16(chan->dcid);
1254 rsp.dcid = cpu_to_le16(chan->scid);
1255
1256 if (l2cap_chan_check_security(chan)) {
1257 lock_sock(sk);
1258 if (test_bit(BT_SK_DEFER_SETUP,
1259 &bt_sk(sk)->flags)) {
1260 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1261 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1262 chan->ops->defer(chan);
1263
1264 } else {
1265 __l2cap_state_change(chan, BT_CONFIG);
1266 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1267 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1268 }
1269 release_sock(sk);
1270 } else {
1271 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1272 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1273 }
1274
1275 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1276 sizeof(rsp), &rsp);
1277
1278 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1279 rsp.result != L2CAP_CR_SUCCESS) {
1280 l2cap_chan_unlock(chan);
1281 continue;
1282 }
1283
1284 set_bit(CONF_REQ_SENT, &chan->conf_state);
1285 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1286 l2cap_build_conf_req(chan, buf), buf);
1287 chan->num_conf_req++;
1288 }
1289
1290 l2cap_chan_unlock(chan);
1291 }
1292
1293 mutex_unlock(&conn->chan_lock);
1294 }
1295
1296 /* Find socket with cid and source/destination bdaddr.
1297 * Returns closest match, locked.
1298 */
1299 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1300 bdaddr_t *src,
1301 bdaddr_t *dst)
1302 {
1303 struct l2cap_chan *c, *c1 = NULL;
1304
1305 read_lock(&chan_list_lock);
1306
1307 list_for_each_entry(c, &chan_list, global_l) {
1308 struct sock *sk = c->sk;
1309
1310 if (state && c->state != state)
1311 continue;
1312
1313 if (c->scid == cid) {
1314 int src_match, dst_match;
1315 int src_any, dst_any;
1316
1317 /* Exact match. */
1318 src_match = !bacmp(&bt_sk(sk)->src, src);
1319 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1320 if (src_match && dst_match) {
1321 read_unlock(&chan_list_lock);
1322 return c;
1323 }
1324
1325 /* Closest match */
1326 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1327 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1328 if ((src_match && dst_any) || (src_any && dst_match) ||
1329 (src_any && dst_any))
1330 c1 = c;
1331 }
1332 }
1333
1334 read_unlock(&chan_list_lock);
1335
1336 return c1;
1337 }
1338
1339 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1340 {
1341 struct sock *parent, *sk;
1342 struct l2cap_chan *chan, *pchan;
1343
1344 BT_DBG("");
1345
1346 /* Check if we have socket listening on cid */
1347 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1348 conn->src, conn->dst);
1349 if (!pchan)
1350 return;
1351
1352 parent = pchan->sk;
1353
1354 lock_sock(parent);
1355
1356 chan = pchan->ops->new_connection(pchan);
1357 if (!chan)
1358 goto clean;
1359
1360 sk = chan->sk;
1361
1362 hci_conn_hold(conn->hcon);
1363 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
1364
1365 bacpy(&bt_sk(sk)->src, conn->src);
1366 bacpy(&bt_sk(sk)->dst, conn->dst);
1367
1368 l2cap_chan_add(conn, chan);
1369
1370 l2cap_chan_ready(chan);
1371
1372 clean:
1373 release_sock(parent);
1374 }
1375
1376 static void l2cap_conn_ready(struct l2cap_conn *conn)
1377 {
1378 struct l2cap_chan *chan;
1379 struct hci_conn *hcon = conn->hcon;
1380
1381 BT_DBG("conn %p", conn);
1382
1383 if (!hcon->out && hcon->type == LE_LINK)
1384 l2cap_le_conn_ready(conn);
1385
1386 if (hcon->out && hcon->type == LE_LINK)
1387 smp_conn_security(hcon, hcon->pending_sec_level);
1388
1389 mutex_lock(&conn->chan_lock);
1390
1391 list_for_each_entry(chan, &conn->chan_l, list) {
1392
1393 l2cap_chan_lock(chan);
1394
1395 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1396 l2cap_chan_unlock(chan);
1397 continue;
1398 }
1399
1400 if (hcon->type == LE_LINK) {
1401 if (smp_conn_security(hcon, chan->sec_level))
1402 l2cap_chan_ready(chan);
1403
1404 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1405 struct sock *sk = chan->sk;
1406 __clear_chan_timer(chan);
1407 lock_sock(sk);
1408 __l2cap_state_change(chan, BT_CONNECTED);
1409 sk->sk_state_change(sk);
1410 release_sock(sk);
1411
1412 } else if (chan->state == BT_CONNECT)
1413 l2cap_do_start(chan);
1414
1415 l2cap_chan_unlock(chan);
1416 }
1417
1418 mutex_unlock(&conn->chan_lock);
1419 }
1420
1421 /* Notify sockets that we cannot guaranty reliability anymore */
1422 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1423 {
1424 struct l2cap_chan *chan;
1425
1426 BT_DBG("conn %p", conn);
1427
1428 mutex_lock(&conn->chan_lock);
1429
1430 list_for_each_entry(chan, &conn->chan_l, list) {
1431 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1432 l2cap_chan_set_err(chan, err);
1433 }
1434
1435 mutex_unlock(&conn->chan_lock);
1436 }
1437
1438 static void l2cap_info_timeout(struct work_struct *work)
1439 {
1440 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1441 info_timer.work);
1442
1443 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1444 conn->info_ident = 0;
1445
1446 l2cap_conn_start(conn);
1447 }
1448
1449 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1450 {
1451 struct l2cap_conn *conn = hcon->l2cap_data;
1452 struct l2cap_chan *chan, *l;
1453
1454 if (!conn)
1455 return;
1456
1457 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1458
1459 kfree_skb(conn->rx_skb);
1460
1461 mutex_lock(&conn->chan_lock);
1462
1463 /* Kill channels */
1464 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1465 l2cap_chan_hold(chan);
1466 l2cap_chan_lock(chan);
1467
1468 l2cap_chan_del(chan, err);
1469
1470 l2cap_chan_unlock(chan);
1471
1472 chan->ops->close(chan);
1473 l2cap_chan_put(chan);
1474 }
1475
1476 mutex_unlock(&conn->chan_lock);
1477
1478 hci_chan_del(conn->hchan);
1479
1480 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1481 cancel_delayed_work_sync(&conn->info_timer);
1482
1483 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1484 cancel_delayed_work_sync(&conn->security_timer);
1485 smp_chan_destroy(conn);
1486 }
1487
1488 hcon->l2cap_data = NULL;
1489 kfree(conn);
1490 }
1491
1492 static void security_timeout(struct work_struct *work)
1493 {
1494 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1495 security_timer.work);
1496
1497 BT_DBG("conn %p", conn);
1498
1499 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1500 smp_chan_destroy(conn);
1501 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1502 }
1503 }
1504
1505 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1506 {
1507 struct l2cap_conn *conn = hcon->l2cap_data;
1508 struct hci_chan *hchan;
1509
1510 if (conn || status)
1511 return conn;
1512
1513 hchan = hci_chan_create(hcon);
1514 if (!hchan)
1515 return NULL;
1516
1517 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1518 if (!conn) {
1519 hci_chan_del(hchan);
1520 return NULL;
1521 }
1522
1523 hcon->l2cap_data = conn;
1524 conn->hcon = hcon;
1525 conn->hchan = hchan;
1526
1527 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1528
1529 switch (hcon->type) {
1530 case AMP_LINK:
1531 conn->mtu = hcon->hdev->block_mtu;
1532 break;
1533
1534 case LE_LINK:
1535 if (hcon->hdev->le_mtu) {
1536 conn->mtu = hcon->hdev->le_mtu;
1537 break;
1538 }
1539 /* fall through */
1540
1541 default:
1542 conn->mtu = hcon->hdev->acl_mtu;
1543 break;
1544 }
1545
1546 conn->src = &hcon->hdev->bdaddr;
1547 conn->dst = &hcon->dst;
1548
1549 conn->feat_mask = 0;
1550
1551 spin_lock_init(&conn->lock);
1552 mutex_init(&conn->chan_lock);
1553
1554 INIT_LIST_HEAD(&conn->chan_l);
1555
1556 if (hcon->type == LE_LINK)
1557 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1558 else
1559 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1560
1561 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1562
1563 return conn;
1564 }
1565
1566 /* ---- Socket interface ---- */
1567
1568 /* Find socket with psm and source / destination bdaddr.
1569 * Returns closest match.
1570 */
1571 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1572 bdaddr_t *src,
1573 bdaddr_t *dst)
1574 {
1575 struct l2cap_chan *c, *c1 = NULL;
1576
1577 read_lock(&chan_list_lock);
1578
1579 list_for_each_entry(c, &chan_list, global_l) {
1580 struct sock *sk = c->sk;
1581
1582 if (state && c->state != state)
1583 continue;
1584
1585 if (c->psm == psm) {
1586 int src_match, dst_match;
1587 int src_any, dst_any;
1588
1589 /* Exact match. */
1590 src_match = !bacmp(&bt_sk(sk)->src, src);
1591 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1592 if (src_match && dst_match) {
1593 read_unlock(&chan_list_lock);
1594 return c;
1595 }
1596
1597 /* Closest match */
1598 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1599 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1600 if ((src_match && dst_any) || (src_any && dst_match) ||
1601 (src_any && dst_any))
1602 c1 = c;
1603 }
1604 }
1605
1606 read_unlock(&chan_list_lock);
1607
1608 return c1;
1609 }
1610
1611 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1612 bdaddr_t *dst, u8 dst_type)
1613 {
1614 struct sock *sk = chan->sk;
1615 bdaddr_t *src = &bt_sk(sk)->src;
1616 struct l2cap_conn *conn;
1617 struct hci_conn *hcon;
1618 struct hci_dev *hdev;
1619 __u8 auth_type;
1620 int err;
1621
1622 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1623 dst_type, __le16_to_cpu(psm));
1624
1625 hdev = hci_get_route(dst, src);
1626 if (!hdev)
1627 return -EHOSTUNREACH;
1628
1629 hci_dev_lock(hdev);
1630
1631 l2cap_chan_lock(chan);
1632
1633 /* PSM must be odd and lsb of upper byte must be 0 */
1634 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1635 chan->chan_type != L2CAP_CHAN_RAW) {
1636 err = -EINVAL;
1637 goto done;
1638 }
1639
1640 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1641 err = -EINVAL;
1642 goto done;
1643 }
1644
1645 switch (chan->mode) {
1646 case L2CAP_MODE_BASIC:
1647 break;
1648 case L2CAP_MODE_ERTM:
1649 case L2CAP_MODE_STREAMING:
1650 if (!disable_ertm)
1651 break;
1652 /* fall through */
1653 default:
1654 err = -ENOTSUPP;
1655 goto done;
1656 }
1657
1658 switch (chan->state) {
1659 case BT_CONNECT:
1660 case BT_CONNECT2:
1661 case BT_CONFIG:
1662 /* Already connecting */
1663 err = 0;
1664 goto done;
1665
1666 case BT_CONNECTED:
1667 /* Already connected */
1668 err = -EISCONN;
1669 goto done;
1670
1671 case BT_OPEN:
1672 case BT_BOUND:
1673 /* Can connect */
1674 break;
1675
1676 default:
1677 err = -EBADFD;
1678 goto done;
1679 }
1680
1681 /* Set destination address and psm */
1682 lock_sock(sk);
1683 bacpy(&bt_sk(sk)->dst, dst);
1684 release_sock(sk);
1685
1686 chan->psm = psm;
1687 chan->dcid = cid;
1688
1689 auth_type = l2cap_get_auth_type(chan);
1690
1691 if (chan->dcid == L2CAP_CID_LE_DATA)
1692 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1693 chan->sec_level, auth_type);
1694 else
1695 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1696 chan->sec_level, auth_type);
1697
1698 if (IS_ERR(hcon)) {
1699 err = PTR_ERR(hcon);
1700 goto done;
1701 }
1702
1703 conn = l2cap_conn_add(hcon, 0);
1704 if (!conn) {
1705 hci_conn_put(hcon);
1706 err = -ENOMEM;
1707 goto done;
1708 }
1709
1710 if (hcon->type == LE_LINK) {
1711 err = 0;
1712
1713 if (!list_empty(&conn->chan_l)) {
1714 err = -EBUSY;
1715 hci_conn_put(hcon);
1716 }
1717
1718 if (err)
1719 goto done;
1720 }
1721
1722 /* Update source addr of the socket */
1723 bacpy(src, conn->src);
1724
1725 l2cap_chan_unlock(chan);
1726 l2cap_chan_add(conn, chan);
1727 l2cap_chan_lock(chan);
1728
1729 l2cap_state_change(chan, BT_CONNECT);
1730 __set_chan_timer(chan, sk->sk_sndtimeo);
1731
1732 if (hcon->state == BT_CONNECTED) {
1733 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1734 __clear_chan_timer(chan);
1735 if (l2cap_chan_check_security(chan))
1736 l2cap_state_change(chan, BT_CONNECTED);
1737 } else
1738 l2cap_do_start(chan);
1739 }
1740
1741 err = 0;
1742
1743 done:
1744 l2cap_chan_unlock(chan);
1745 hci_dev_unlock(hdev);
1746 hci_dev_put(hdev);
1747 return err;
1748 }
1749
1750 int __l2cap_wait_ack(struct sock *sk)
1751 {
1752 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1753 DECLARE_WAITQUEUE(wait, current);
1754 int err = 0;
1755 int timeo = HZ/5;
1756
1757 add_wait_queue(sk_sleep(sk), &wait);
1758 set_current_state(TASK_INTERRUPTIBLE);
1759 while (chan->unacked_frames > 0 && chan->conn) {
1760 if (!timeo)
1761 timeo = HZ/5;
1762
1763 if (signal_pending(current)) {
1764 err = sock_intr_errno(timeo);
1765 break;
1766 }
1767
1768 release_sock(sk);
1769 timeo = schedule_timeout(timeo);
1770 lock_sock(sk);
1771 set_current_state(TASK_INTERRUPTIBLE);
1772
1773 err = sock_error(sk);
1774 if (err)
1775 break;
1776 }
1777 set_current_state(TASK_RUNNING);
1778 remove_wait_queue(sk_sleep(sk), &wait);
1779 return err;
1780 }
1781
1782 static void l2cap_monitor_timeout(struct work_struct *work)
1783 {
1784 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1785 monitor_timer.work);
1786
1787 BT_DBG("chan %p", chan);
1788
1789 l2cap_chan_lock(chan);
1790
1791 if (!chan->conn) {
1792 l2cap_chan_unlock(chan);
1793 l2cap_chan_put(chan);
1794 return;
1795 }
1796
1797 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1798
1799 l2cap_chan_unlock(chan);
1800 l2cap_chan_put(chan);
1801 }
1802
1803 static void l2cap_retrans_timeout(struct work_struct *work)
1804 {
1805 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1806 retrans_timer.work);
1807
1808 BT_DBG("chan %p", chan);
1809
1810 l2cap_chan_lock(chan);
1811
1812 if (!chan->conn) {
1813 l2cap_chan_unlock(chan);
1814 l2cap_chan_put(chan);
1815 return;
1816 }
1817
1818 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1819 l2cap_chan_unlock(chan);
1820 l2cap_chan_put(chan);
1821 }
1822
1823 static void l2cap_streaming_send(struct l2cap_chan *chan,
1824 struct sk_buff_head *skbs)
1825 {
1826 struct sk_buff *skb;
1827 struct l2cap_ctrl *control;
1828
1829 BT_DBG("chan %p, skbs %p", chan, skbs);
1830
1831 if (__chan_is_moving(chan))
1832 return;
1833
1834 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1835
1836 while (!skb_queue_empty(&chan->tx_q)) {
1837
1838 skb = skb_dequeue(&chan->tx_q);
1839
1840 bt_cb(skb)->control.retries = 1;
1841 control = &bt_cb(skb)->control;
1842
1843 control->reqseq = 0;
1844 control->txseq = chan->next_tx_seq;
1845
1846 __pack_control(chan, control, skb);
1847
1848 if (chan->fcs == L2CAP_FCS_CRC16) {
1849 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1850 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1851 }
1852
1853 l2cap_do_send(chan, skb);
1854
1855 BT_DBG("Sent txseq %u", control->txseq);
1856
1857 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1858 chan->frames_sent++;
1859 }
1860 }
1861
1862 static int l2cap_ertm_send(struct l2cap_chan *chan)
1863 {
1864 struct sk_buff *skb, *tx_skb;
1865 struct l2cap_ctrl *control;
1866 int sent = 0;
1867
1868 BT_DBG("chan %p", chan);
1869
1870 if (chan->state != BT_CONNECTED)
1871 return -ENOTCONN;
1872
1873 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1874 return 0;
1875
1876 if (__chan_is_moving(chan))
1877 return 0;
1878
1879 while (chan->tx_send_head &&
1880 chan->unacked_frames < chan->remote_tx_win &&
1881 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1882
1883 skb = chan->tx_send_head;
1884
1885 bt_cb(skb)->control.retries = 1;
1886 control = &bt_cb(skb)->control;
1887
1888 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1889 control->final = 1;
1890
1891 control->reqseq = chan->buffer_seq;
1892 chan->last_acked_seq = chan->buffer_seq;
1893 control->txseq = chan->next_tx_seq;
1894
1895 __pack_control(chan, control, skb);
1896
1897 if (chan->fcs == L2CAP_FCS_CRC16) {
1898 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1899 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1900 }
1901
1902 /* Clone after data has been modified. Data is assumed to be
1903 read-only (for locking purposes) on cloned sk_buffs.
1904 */
1905 tx_skb = skb_clone(skb, GFP_KERNEL);
1906
1907 if (!tx_skb)
1908 break;
1909
1910 __set_retrans_timer(chan);
1911
1912 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1913 chan->unacked_frames++;
1914 chan->frames_sent++;
1915 sent++;
1916
1917 if (skb_queue_is_last(&chan->tx_q, skb))
1918 chan->tx_send_head = NULL;
1919 else
1920 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1921
1922 l2cap_do_send(chan, tx_skb);
1923 BT_DBG("Sent txseq %u", control->txseq);
1924 }
1925
1926 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1927 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1928
1929 return sent;
1930 }
1931
1932 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1933 {
1934 struct l2cap_ctrl control;
1935 struct sk_buff *skb;
1936 struct sk_buff *tx_skb;
1937 u16 seq;
1938
1939 BT_DBG("chan %p", chan);
1940
1941 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1942 return;
1943
1944 if (__chan_is_moving(chan))
1945 return;
1946
1947 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1948 seq = l2cap_seq_list_pop(&chan->retrans_list);
1949
1950 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1951 if (!skb) {
1952 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1953 seq);
1954 continue;
1955 }
1956
1957 bt_cb(skb)->control.retries++;
1958 control = bt_cb(skb)->control;
1959
1960 if (chan->max_tx != 0 &&
1961 bt_cb(skb)->control.retries > chan->max_tx) {
1962 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1963 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1964 l2cap_seq_list_clear(&chan->retrans_list);
1965 break;
1966 }
1967
1968 control.reqseq = chan->buffer_seq;
1969 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1970 control.final = 1;
1971 else
1972 control.final = 0;
1973
1974 if (skb_cloned(skb)) {
1975 /* Cloned sk_buffs are read-only, so we need a
1976 * writeable copy
1977 */
1978 tx_skb = skb_copy(skb, GFP_KERNEL);
1979 } else {
1980 tx_skb = skb_clone(skb, GFP_KERNEL);
1981 }
1982
1983 if (!tx_skb) {
1984 l2cap_seq_list_clear(&chan->retrans_list);
1985 break;
1986 }
1987
1988 /* Update skb contents */
1989 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1990 put_unaligned_le32(__pack_extended_control(&control),
1991 tx_skb->data + L2CAP_HDR_SIZE);
1992 } else {
1993 put_unaligned_le16(__pack_enhanced_control(&control),
1994 tx_skb->data + L2CAP_HDR_SIZE);
1995 }
1996
1997 if (chan->fcs == L2CAP_FCS_CRC16) {
1998 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1999 put_unaligned_le16(fcs, skb_put(tx_skb,
2000 L2CAP_FCS_SIZE));
2001 }
2002
2003 l2cap_do_send(chan, tx_skb);
2004
2005 BT_DBG("Resent txseq %d", control.txseq);
2006
2007 chan->last_acked_seq = chan->buffer_seq;
2008 }
2009 }
2010
2011 static void l2cap_retransmit(struct l2cap_chan *chan,
2012 struct l2cap_ctrl *control)
2013 {
2014 BT_DBG("chan %p, control %p", chan, control);
2015
2016 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2017 l2cap_ertm_resend(chan);
2018 }
2019
2020 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2021 struct l2cap_ctrl *control)
2022 {
2023 struct sk_buff *skb;
2024
2025 BT_DBG("chan %p, control %p", chan, control);
2026
2027 if (control->poll)
2028 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2029
2030 l2cap_seq_list_clear(&chan->retrans_list);
2031
2032 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2033 return;
2034
2035 if (chan->unacked_frames) {
2036 skb_queue_walk(&chan->tx_q, skb) {
2037 if (bt_cb(skb)->control.txseq == control->reqseq ||
2038 skb == chan->tx_send_head)
2039 break;
2040 }
2041
2042 skb_queue_walk_from(&chan->tx_q, skb) {
2043 if (skb == chan->tx_send_head)
2044 break;
2045
2046 l2cap_seq_list_append(&chan->retrans_list,
2047 bt_cb(skb)->control.txseq);
2048 }
2049
2050 l2cap_ertm_resend(chan);
2051 }
2052 }
2053
2054 static void l2cap_send_ack(struct l2cap_chan *chan)
2055 {
2056 struct l2cap_ctrl control;
2057 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2058 chan->last_acked_seq);
2059 int threshold;
2060
2061 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2062 chan, chan->last_acked_seq, chan->buffer_seq);
2063
2064 memset(&control, 0, sizeof(control));
2065 control.sframe = 1;
2066
2067 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2068 chan->rx_state == L2CAP_RX_STATE_RECV) {
2069 __clear_ack_timer(chan);
2070 control.super = L2CAP_SUPER_RNR;
2071 control.reqseq = chan->buffer_seq;
2072 l2cap_send_sframe(chan, &control);
2073 } else {
2074 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2075 l2cap_ertm_send(chan);
2076 /* If any i-frames were sent, they included an ack */
2077 if (chan->buffer_seq == chan->last_acked_seq)
2078 frames_to_ack = 0;
2079 }
2080
2081 /* Ack now if the window is 3/4ths full.
2082 * Calculate without mul or div
2083 */
2084 threshold = chan->ack_win;
2085 threshold += threshold << 1;
2086 threshold >>= 2;
2087
2088 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2089 threshold);
2090
2091 if (frames_to_ack >= threshold) {
2092 __clear_ack_timer(chan);
2093 control.super = L2CAP_SUPER_RR;
2094 control.reqseq = chan->buffer_seq;
2095 l2cap_send_sframe(chan, &control);
2096 frames_to_ack = 0;
2097 }
2098
2099 if (frames_to_ack)
2100 __set_ack_timer(chan);
2101 }
2102 }
2103
2104 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2105 struct msghdr *msg, int len,
2106 int count, struct sk_buff *skb)
2107 {
2108 struct l2cap_conn *conn = chan->conn;
2109 struct sk_buff **frag;
2110 int sent = 0;
2111
2112 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2113 return -EFAULT;
2114
2115 sent += count;
2116 len -= count;
2117
2118 /* Continuation fragments (no L2CAP header) */
2119 frag = &skb_shinfo(skb)->frag_list;
2120 while (len) {
2121 struct sk_buff *tmp;
2122
2123 count = min_t(unsigned int, conn->mtu, len);
2124
2125 tmp = chan->ops->alloc_skb(chan, count,
2126 msg->msg_flags & MSG_DONTWAIT);
2127 if (IS_ERR(tmp))
2128 return PTR_ERR(tmp);
2129
2130 *frag = tmp;
2131
2132 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2133 return -EFAULT;
2134
2135 (*frag)->priority = skb->priority;
2136
2137 sent += count;
2138 len -= count;
2139
2140 skb->len += (*frag)->len;
2141 skb->data_len += (*frag)->len;
2142
2143 frag = &(*frag)->next;
2144 }
2145
2146 return sent;
2147 }
2148
2149 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2150 struct msghdr *msg, size_t len,
2151 u32 priority)
2152 {
2153 struct l2cap_conn *conn = chan->conn;
2154 struct sk_buff *skb;
2155 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2156 struct l2cap_hdr *lh;
2157
2158 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
2159
2160 count = min_t(unsigned int, (conn->mtu - hlen), len);
2161
2162 skb = chan->ops->alloc_skb(chan, count + hlen,
2163 msg->msg_flags & MSG_DONTWAIT);
2164 if (IS_ERR(skb))
2165 return skb;
2166
2167 skb->priority = priority;
2168
2169 /* Create L2CAP header */
2170 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2171 lh->cid = cpu_to_le16(chan->dcid);
2172 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2173 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2174
2175 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2176 if (unlikely(err < 0)) {
2177 kfree_skb(skb);
2178 return ERR_PTR(err);
2179 }
2180 return skb;
2181 }
2182
2183 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2184 struct msghdr *msg, size_t len,
2185 u32 priority)
2186 {
2187 struct l2cap_conn *conn = chan->conn;
2188 struct sk_buff *skb;
2189 int err, count;
2190 struct l2cap_hdr *lh;
2191
2192 BT_DBG("chan %p len %zu", chan, len);
2193
2194 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2195
2196 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2197 msg->msg_flags & MSG_DONTWAIT);
2198 if (IS_ERR(skb))
2199 return skb;
2200
2201 skb->priority = priority;
2202
2203 /* Create L2CAP header */
2204 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2205 lh->cid = cpu_to_le16(chan->dcid);
2206 lh->len = cpu_to_le16(len);
2207
2208 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2209 if (unlikely(err < 0)) {
2210 kfree_skb(skb);
2211 return ERR_PTR(err);
2212 }
2213 return skb;
2214 }
2215
2216 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2217 struct msghdr *msg, size_t len,
2218 u16 sdulen)
2219 {
2220 struct l2cap_conn *conn = chan->conn;
2221 struct sk_buff *skb;
2222 int err, count, hlen;
2223 struct l2cap_hdr *lh;
2224
2225 BT_DBG("chan %p len %zu", chan, len);
2226
2227 if (!conn)
2228 return ERR_PTR(-ENOTCONN);
2229
2230 hlen = __ertm_hdr_size(chan);
2231
2232 if (sdulen)
2233 hlen += L2CAP_SDULEN_SIZE;
2234
2235 if (chan->fcs == L2CAP_FCS_CRC16)
2236 hlen += L2CAP_FCS_SIZE;
2237
2238 count = min_t(unsigned int, (conn->mtu - hlen), len);
2239
2240 skb = chan->ops->alloc_skb(chan, count + hlen,
2241 msg->msg_flags & MSG_DONTWAIT);
2242 if (IS_ERR(skb))
2243 return skb;
2244
2245 /* Create L2CAP header */
2246 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2247 lh->cid = cpu_to_le16(chan->dcid);
2248 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2249
2250 /* Control header is populated later */
2251 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2252 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2253 else
2254 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2255
2256 if (sdulen)
2257 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2258
2259 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2260 if (unlikely(err < 0)) {
2261 kfree_skb(skb);
2262 return ERR_PTR(err);
2263 }
2264
2265 bt_cb(skb)->control.fcs = chan->fcs;
2266 bt_cb(skb)->control.retries = 0;
2267 return skb;
2268 }
2269
2270 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2271 struct sk_buff_head *seg_queue,
2272 struct msghdr *msg, size_t len)
2273 {
2274 struct sk_buff *skb;
2275 u16 sdu_len;
2276 size_t pdu_len;
2277 u8 sar;
2278
2279 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2280
2281 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2282 * so fragmented skbs are not used. The HCI layer's handling
2283 * of fragmented skbs is not compatible with ERTM's queueing.
2284 */
2285
2286 /* PDU size is derived from the HCI MTU */
2287 pdu_len = chan->conn->mtu;
2288
2289 /* Constrain PDU size for BR/EDR connections */
2290 if (!chan->hs_hcon)
2291 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2292
2293 /* Adjust for largest possible L2CAP overhead. */
2294 if (chan->fcs)
2295 pdu_len -= L2CAP_FCS_SIZE;
2296
2297 pdu_len -= __ertm_hdr_size(chan);
2298
2299 /* Remote device may have requested smaller PDUs */
2300 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2301
2302 if (len <= pdu_len) {
2303 sar = L2CAP_SAR_UNSEGMENTED;
2304 sdu_len = 0;
2305 pdu_len = len;
2306 } else {
2307 sar = L2CAP_SAR_START;
2308 sdu_len = len;
2309 pdu_len -= L2CAP_SDULEN_SIZE;
2310 }
2311
2312 while (len > 0) {
2313 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2314
2315 if (IS_ERR(skb)) {
2316 __skb_queue_purge(seg_queue);
2317 return PTR_ERR(skb);
2318 }
2319
2320 bt_cb(skb)->control.sar = sar;
2321 __skb_queue_tail(seg_queue, skb);
2322
2323 len -= pdu_len;
2324 if (sdu_len) {
2325 sdu_len = 0;
2326 pdu_len += L2CAP_SDULEN_SIZE;
2327 }
2328
2329 if (len <= pdu_len) {
2330 sar = L2CAP_SAR_END;
2331 pdu_len = len;
2332 } else {
2333 sar = L2CAP_SAR_CONTINUE;
2334 }
2335 }
2336
2337 return 0;
2338 }
2339
2340 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2341 u32 priority)
2342 {
2343 struct sk_buff *skb;
2344 int err;
2345 struct sk_buff_head seg_queue;
2346
2347 /* Connectionless channel */
2348 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2349 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2350 if (IS_ERR(skb))
2351 return PTR_ERR(skb);
2352
2353 l2cap_do_send(chan, skb);
2354 return len;
2355 }
2356
2357 switch (chan->mode) {
2358 case L2CAP_MODE_BASIC:
2359 /* Check outgoing MTU */
2360 if (len > chan->omtu)
2361 return -EMSGSIZE;
2362
2363 /* Create a basic PDU */
2364 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2365 if (IS_ERR(skb))
2366 return PTR_ERR(skb);
2367
2368 l2cap_do_send(chan, skb);
2369 err = len;
2370 break;
2371
2372 case L2CAP_MODE_ERTM:
2373 case L2CAP_MODE_STREAMING:
2374 /* Check outgoing MTU */
2375 if (len > chan->omtu) {
2376 err = -EMSGSIZE;
2377 break;
2378 }
2379
2380 __skb_queue_head_init(&seg_queue);
2381
2382 /* Do segmentation before calling in to the state machine,
2383 * since it's possible to block while waiting for memory
2384 * allocation.
2385 */
2386 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2387
2388 /* The channel could have been closed while segmenting,
2389 * check that it is still connected.
2390 */
2391 if (chan->state != BT_CONNECTED) {
2392 __skb_queue_purge(&seg_queue);
2393 err = -ENOTCONN;
2394 }
2395
2396 if (err)
2397 break;
2398
2399 if (chan->mode == L2CAP_MODE_ERTM)
2400 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2401 else
2402 l2cap_streaming_send(chan, &seg_queue);
2403
2404 err = len;
2405
2406 /* If the skbs were not queued for sending, they'll still be in
2407 * seg_queue and need to be purged.
2408 */
2409 __skb_queue_purge(&seg_queue);
2410 break;
2411
2412 default:
2413 BT_DBG("bad state %1.1x", chan->mode);
2414 err = -EBADFD;
2415 }
2416
2417 return err;
2418 }
2419
2420 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2421 {
2422 struct l2cap_ctrl control;
2423 u16 seq;
2424
2425 BT_DBG("chan %p, txseq %u", chan, txseq);
2426
2427 memset(&control, 0, sizeof(control));
2428 control.sframe = 1;
2429 control.super = L2CAP_SUPER_SREJ;
2430
2431 for (seq = chan->expected_tx_seq; seq != txseq;
2432 seq = __next_seq(chan, seq)) {
2433 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2434 control.reqseq = seq;
2435 l2cap_send_sframe(chan, &control);
2436 l2cap_seq_list_append(&chan->srej_list, seq);
2437 }
2438 }
2439
2440 chan->expected_tx_seq = __next_seq(chan, txseq);
2441 }
2442
2443 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2444 {
2445 struct l2cap_ctrl control;
2446
2447 BT_DBG("chan %p", chan);
2448
2449 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2450 return;
2451
2452 memset(&control, 0, sizeof(control));
2453 control.sframe = 1;
2454 control.super = L2CAP_SUPER_SREJ;
2455 control.reqseq = chan->srej_list.tail;
2456 l2cap_send_sframe(chan, &control);
2457 }
2458
2459 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2460 {
2461 struct l2cap_ctrl control;
2462 u16 initial_head;
2463 u16 seq;
2464
2465 BT_DBG("chan %p, txseq %u", chan, txseq);
2466
2467 memset(&control, 0, sizeof(control));
2468 control.sframe = 1;
2469 control.super = L2CAP_SUPER_SREJ;
2470
2471 /* Capture initial list head to allow only one pass through the list. */
2472 initial_head = chan->srej_list.head;
2473
2474 do {
2475 seq = l2cap_seq_list_pop(&chan->srej_list);
2476 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2477 break;
2478
2479 control.reqseq = seq;
2480 l2cap_send_sframe(chan, &control);
2481 l2cap_seq_list_append(&chan->srej_list, seq);
2482 } while (chan->srej_list.head != initial_head);
2483 }
2484
2485 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2486 {
2487 struct sk_buff *acked_skb;
2488 u16 ackseq;
2489
2490 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2491
2492 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2493 return;
2494
2495 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2496 chan->expected_ack_seq, chan->unacked_frames);
2497
2498 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2499 ackseq = __next_seq(chan, ackseq)) {
2500
2501 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2502 if (acked_skb) {
2503 skb_unlink(acked_skb, &chan->tx_q);
2504 kfree_skb(acked_skb);
2505 chan->unacked_frames--;
2506 }
2507 }
2508
2509 chan->expected_ack_seq = reqseq;
2510
2511 if (chan->unacked_frames == 0)
2512 __clear_retrans_timer(chan);
2513
2514 BT_DBG("unacked_frames %u", chan->unacked_frames);
2515 }
2516
2517 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2518 {
2519 BT_DBG("chan %p", chan);
2520
2521 chan->expected_tx_seq = chan->buffer_seq;
2522 l2cap_seq_list_clear(&chan->srej_list);
2523 skb_queue_purge(&chan->srej_q);
2524 chan->rx_state = L2CAP_RX_STATE_RECV;
2525 }
2526
2527 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2528 struct l2cap_ctrl *control,
2529 struct sk_buff_head *skbs, u8 event)
2530 {
2531 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2532 event);
2533
2534 switch (event) {
2535 case L2CAP_EV_DATA_REQUEST:
2536 if (chan->tx_send_head == NULL)
2537 chan->tx_send_head = skb_peek(skbs);
2538
2539 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2540 l2cap_ertm_send(chan);
2541 break;
2542 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2543 BT_DBG("Enter LOCAL_BUSY");
2544 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2545
2546 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2547 /* The SREJ_SENT state must be aborted if we are to
2548 * enter the LOCAL_BUSY state.
2549 */
2550 l2cap_abort_rx_srej_sent(chan);
2551 }
2552
2553 l2cap_send_ack(chan);
2554
2555 break;
2556 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2557 BT_DBG("Exit LOCAL_BUSY");
2558 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2559
2560 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2561 struct l2cap_ctrl local_control;
2562
2563 memset(&local_control, 0, sizeof(local_control));
2564 local_control.sframe = 1;
2565 local_control.super = L2CAP_SUPER_RR;
2566 local_control.poll = 1;
2567 local_control.reqseq = chan->buffer_seq;
2568 l2cap_send_sframe(chan, &local_control);
2569
2570 chan->retry_count = 1;
2571 __set_monitor_timer(chan);
2572 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2573 }
2574 break;
2575 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2576 l2cap_process_reqseq(chan, control->reqseq);
2577 break;
2578 case L2CAP_EV_EXPLICIT_POLL:
2579 l2cap_send_rr_or_rnr(chan, 1);
2580 chan->retry_count = 1;
2581 __set_monitor_timer(chan);
2582 __clear_ack_timer(chan);
2583 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2584 break;
2585 case L2CAP_EV_RETRANS_TO:
2586 l2cap_send_rr_or_rnr(chan, 1);
2587 chan->retry_count = 1;
2588 __set_monitor_timer(chan);
2589 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2590 break;
2591 case L2CAP_EV_RECV_FBIT:
2592 /* Nothing to process */
2593 break;
2594 default:
2595 break;
2596 }
2597 }
2598
2599 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2600 struct l2cap_ctrl *control,
2601 struct sk_buff_head *skbs, u8 event)
2602 {
2603 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2604 event);
2605
2606 switch (event) {
2607 case L2CAP_EV_DATA_REQUEST:
2608 if (chan->tx_send_head == NULL)
2609 chan->tx_send_head = skb_peek(skbs);
2610 /* Queue data, but don't send. */
2611 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2612 break;
2613 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2614 BT_DBG("Enter LOCAL_BUSY");
2615 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2616
2617 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2618 /* The SREJ_SENT state must be aborted if we are to
2619 * enter the LOCAL_BUSY state.
2620 */
2621 l2cap_abort_rx_srej_sent(chan);
2622 }
2623
2624 l2cap_send_ack(chan);
2625
2626 break;
2627 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2628 BT_DBG("Exit LOCAL_BUSY");
2629 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2630
2631 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2632 struct l2cap_ctrl local_control;
2633 memset(&local_control, 0, sizeof(local_control));
2634 local_control.sframe = 1;
2635 local_control.super = L2CAP_SUPER_RR;
2636 local_control.poll = 1;
2637 local_control.reqseq = chan->buffer_seq;
2638 l2cap_send_sframe(chan, &local_control);
2639
2640 chan->retry_count = 1;
2641 __set_monitor_timer(chan);
2642 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2643 }
2644 break;
2645 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2646 l2cap_process_reqseq(chan, control->reqseq);
2647
2648 /* Fall through */
2649
2650 case L2CAP_EV_RECV_FBIT:
2651 if (control && control->final) {
2652 __clear_monitor_timer(chan);
2653 if (chan->unacked_frames > 0)
2654 __set_retrans_timer(chan);
2655 chan->retry_count = 0;
2656 chan->tx_state = L2CAP_TX_STATE_XMIT;
2657 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2658 }
2659 break;
2660 case L2CAP_EV_EXPLICIT_POLL:
2661 /* Ignore */
2662 break;
2663 case L2CAP_EV_MONITOR_TO:
2664 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2665 l2cap_send_rr_or_rnr(chan, 1);
2666 __set_monitor_timer(chan);
2667 chan->retry_count++;
2668 } else {
2669 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2670 }
2671 break;
2672 default:
2673 break;
2674 }
2675 }
2676
2677 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2678 struct sk_buff_head *skbs, u8 event)
2679 {
2680 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2681 chan, control, skbs, event, chan->tx_state);
2682
2683 switch (chan->tx_state) {
2684 case L2CAP_TX_STATE_XMIT:
2685 l2cap_tx_state_xmit(chan, control, skbs, event);
2686 break;
2687 case L2CAP_TX_STATE_WAIT_F:
2688 l2cap_tx_state_wait_f(chan, control, skbs, event);
2689 break;
2690 default:
2691 /* Ignore event */
2692 break;
2693 }
2694 }
2695
2696 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2697 struct l2cap_ctrl *control)
2698 {
2699 BT_DBG("chan %p, control %p", chan, control);
2700 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2701 }
2702
2703 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2704 struct l2cap_ctrl *control)
2705 {
2706 BT_DBG("chan %p, control %p", chan, control);
2707 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2708 }
2709
2710 /* Copy frame to all raw sockets on that connection */
2711 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2712 {
2713 struct sk_buff *nskb;
2714 struct l2cap_chan *chan;
2715
2716 BT_DBG("conn %p", conn);
2717
2718 mutex_lock(&conn->chan_lock);
2719
2720 list_for_each_entry(chan, &conn->chan_l, list) {
2721 struct sock *sk = chan->sk;
2722 if (chan->chan_type != L2CAP_CHAN_RAW)
2723 continue;
2724
2725 /* Don't send frame to the socket it came from */
2726 if (skb->sk == sk)
2727 continue;
2728 nskb = skb_clone(skb, GFP_KERNEL);
2729 if (!nskb)
2730 continue;
2731
2732 if (chan->ops->recv(chan, nskb))
2733 kfree_skb(nskb);
2734 }
2735
2736 mutex_unlock(&conn->chan_lock);
2737 }
2738
2739 /* ---- L2CAP signalling commands ---- */
2740 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2741 u8 ident, u16 dlen, void *data)
2742 {
2743 struct sk_buff *skb, **frag;
2744 struct l2cap_cmd_hdr *cmd;
2745 struct l2cap_hdr *lh;
2746 int len, count;
2747
2748 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2749 conn, code, ident, dlen);
2750
2751 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2752 count = min_t(unsigned int, conn->mtu, len);
2753
2754 skb = bt_skb_alloc(count, GFP_KERNEL);
2755 if (!skb)
2756 return NULL;
2757
2758 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2759 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2760
2761 if (conn->hcon->type == LE_LINK)
2762 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2763 else
2764 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2765
2766 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2767 cmd->code = code;
2768 cmd->ident = ident;
2769 cmd->len = cpu_to_le16(dlen);
2770
2771 if (dlen) {
2772 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2773 memcpy(skb_put(skb, count), data, count);
2774 data += count;
2775 }
2776
2777 len -= skb->len;
2778
2779 /* Continuation fragments (no L2CAP header) */
2780 frag = &skb_shinfo(skb)->frag_list;
2781 while (len) {
2782 count = min_t(unsigned int, conn->mtu, len);
2783
2784 *frag = bt_skb_alloc(count, GFP_KERNEL);
2785 if (!*frag)
2786 goto fail;
2787
2788 memcpy(skb_put(*frag, count), data, count);
2789
2790 len -= count;
2791 data += count;
2792
2793 frag = &(*frag)->next;
2794 }
2795
2796 return skb;
2797
2798 fail:
2799 kfree_skb(skb);
2800 return NULL;
2801 }
2802
2803 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2804 unsigned long *val)
2805 {
2806 struct l2cap_conf_opt *opt = *ptr;
2807 int len;
2808
2809 len = L2CAP_CONF_OPT_SIZE + opt->len;
2810 *ptr += len;
2811
2812 *type = opt->type;
2813 *olen = opt->len;
2814
2815 switch (opt->len) {
2816 case 1:
2817 *val = *((u8 *) opt->val);
2818 break;
2819
2820 case 2:
2821 *val = get_unaligned_le16(opt->val);
2822 break;
2823
2824 case 4:
2825 *val = get_unaligned_le32(opt->val);
2826 break;
2827
2828 default:
2829 *val = (unsigned long) opt->val;
2830 break;
2831 }
2832
2833 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2834 return len;
2835 }
2836
2837 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2838 {
2839 struct l2cap_conf_opt *opt = *ptr;
2840
2841 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2842
2843 opt->type = type;
2844 opt->len = len;
2845
2846 switch (len) {
2847 case 1:
2848 *((u8 *) opt->val) = val;
2849 break;
2850
2851 case 2:
2852 put_unaligned_le16(val, opt->val);
2853 break;
2854
2855 case 4:
2856 put_unaligned_le32(val, opt->val);
2857 break;
2858
2859 default:
2860 memcpy(opt->val, (void *) val, len);
2861 break;
2862 }
2863
2864 *ptr += L2CAP_CONF_OPT_SIZE + len;
2865 }
2866
2867 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2868 {
2869 struct l2cap_conf_efs efs;
2870
2871 switch (chan->mode) {
2872 case L2CAP_MODE_ERTM:
2873 efs.id = chan->local_id;
2874 efs.stype = chan->local_stype;
2875 efs.msdu = cpu_to_le16(chan->local_msdu);
2876 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2877 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2878 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2879 break;
2880
2881 case L2CAP_MODE_STREAMING:
2882 efs.id = 1;
2883 efs.stype = L2CAP_SERV_BESTEFFORT;
2884 efs.msdu = cpu_to_le16(chan->local_msdu);
2885 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2886 efs.acc_lat = 0;
2887 efs.flush_to = 0;
2888 break;
2889
2890 default:
2891 return;
2892 }
2893
2894 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2895 (unsigned long) &efs);
2896 }
2897
2898 static void l2cap_ack_timeout(struct work_struct *work)
2899 {
2900 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2901 ack_timer.work);
2902 u16 frames_to_ack;
2903
2904 BT_DBG("chan %p", chan);
2905
2906 l2cap_chan_lock(chan);
2907
2908 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2909 chan->last_acked_seq);
2910
2911 if (frames_to_ack)
2912 l2cap_send_rr_or_rnr(chan, 0);
2913
2914 l2cap_chan_unlock(chan);
2915 l2cap_chan_put(chan);
2916 }
2917
2918 int l2cap_ertm_init(struct l2cap_chan *chan)
2919 {
2920 int err;
2921
2922 chan->next_tx_seq = 0;
2923 chan->expected_tx_seq = 0;
2924 chan->expected_ack_seq = 0;
2925 chan->unacked_frames = 0;
2926 chan->buffer_seq = 0;
2927 chan->frames_sent = 0;
2928 chan->last_acked_seq = 0;
2929 chan->sdu = NULL;
2930 chan->sdu_last_frag = NULL;
2931 chan->sdu_len = 0;
2932
2933 skb_queue_head_init(&chan->tx_q);
2934
2935 chan->local_amp_id = 0;
2936 chan->move_id = 0;
2937 chan->move_state = L2CAP_MOVE_STABLE;
2938 chan->move_role = L2CAP_MOVE_ROLE_NONE;
2939
2940 if (chan->mode != L2CAP_MODE_ERTM)
2941 return 0;
2942
2943 chan->rx_state = L2CAP_RX_STATE_RECV;
2944 chan->tx_state = L2CAP_TX_STATE_XMIT;
2945
2946 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2947 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2948 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2949
2950 skb_queue_head_init(&chan->srej_q);
2951
2952 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2953 if (err < 0)
2954 return err;
2955
2956 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2957 if (err < 0)
2958 l2cap_seq_list_free(&chan->srej_list);
2959
2960 return err;
2961 }
2962
2963 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2964 {
2965 switch (mode) {
2966 case L2CAP_MODE_STREAMING:
2967 case L2CAP_MODE_ERTM:
2968 if (l2cap_mode_supported(mode, remote_feat_mask))
2969 return mode;
2970 /* fall through */
2971 default:
2972 return L2CAP_MODE_BASIC;
2973 }
2974 }
2975
2976 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2977 {
2978 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2979 }
2980
2981 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2982 {
2983 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2984 }
2985
2986 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
2987 struct l2cap_conf_rfc *rfc)
2988 {
2989 if (chan->local_amp_id && chan->hs_hcon) {
2990 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
2991
2992 /* Class 1 devices have must have ERTM timeouts
2993 * exceeding the Link Supervision Timeout. The
2994 * default Link Supervision Timeout for AMP
2995 * controllers is 10 seconds.
2996 *
2997 * Class 1 devices use 0xffffffff for their
2998 * best-effort flush timeout, so the clamping logic
2999 * will result in a timeout that meets the above
3000 * requirement. ERTM timeouts are 16-bit values, so
3001 * the maximum timeout is 65.535 seconds.
3002 */
3003
3004 /* Convert timeout to milliseconds and round */
3005 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3006
3007 /* This is the recommended formula for class 2 devices
3008 * that start ERTM timers when packets are sent to the
3009 * controller.
3010 */
3011 ertm_to = 3 * ertm_to + 500;
3012
3013 if (ertm_to > 0xffff)
3014 ertm_to = 0xffff;
3015
3016 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3017 rfc->monitor_timeout = rfc->retrans_timeout;
3018 } else {
3019 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3020 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3021 }
3022 }
3023
3024 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3025 {
3026 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3027 __l2cap_ews_supported(chan)) {
3028 /* use extended control field */
3029 set_bit(FLAG_EXT_CTRL, &chan->flags);
3030 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3031 } else {
3032 chan->tx_win = min_t(u16, chan->tx_win,
3033 L2CAP_DEFAULT_TX_WINDOW);
3034 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3035 }
3036 chan->ack_win = chan->tx_win;
3037 }
3038
3039 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3040 {
3041 struct l2cap_conf_req *req = data;
3042 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3043 void *ptr = req->data;
3044 u16 size;
3045
3046 BT_DBG("chan %p", chan);
3047
3048 if (chan->num_conf_req || chan->num_conf_rsp)
3049 goto done;
3050
3051 switch (chan->mode) {
3052 case L2CAP_MODE_STREAMING:
3053 case L2CAP_MODE_ERTM:
3054 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3055 break;
3056
3057 if (__l2cap_efs_supported(chan))
3058 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3059
3060 /* fall through */
3061 default:
3062 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3063 break;
3064 }
3065
3066 done:
3067 if (chan->imtu != L2CAP_DEFAULT_MTU)
3068 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3069
3070 switch (chan->mode) {
3071 case L2CAP_MODE_BASIC:
3072 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3073 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3074 break;
3075
3076 rfc.mode = L2CAP_MODE_BASIC;
3077 rfc.txwin_size = 0;
3078 rfc.max_transmit = 0;
3079 rfc.retrans_timeout = 0;
3080 rfc.monitor_timeout = 0;
3081 rfc.max_pdu_size = 0;
3082
3083 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3084 (unsigned long) &rfc);
3085 break;
3086
3087 case L2CAP_MODE_ERTM:
3088 rfc.mode = L2CAP_MODE_ERTM;
3089 rfc.max_transmit = chan->max_tx;
3090
3091 __l2cap_set_ertm_timeouts(chan, &rfc);
3092
3093 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3094 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3095 L2CAP_FCS_SIZE);
3096 rfc.max_pdu_size = cpu_to_le16(size);
3097
3098 l2cap_txwin_setup(chan);
3099
3100 rfc.txwin_size = min_t(u16, chan->tx_win,
3101 L2CAP_DEFAULT_TX_WINDOW);
3102
3103 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3104 (unsigned long) &rfc);
3105
3106 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3107 l2cap_add_opt_efs(&ptr, chan);
3108
3109 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
3110 break;
3111
3112 if (chan->fcs == L2CAP_FCS_NONE ||
3113 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
3114 chan->fcs = L2CAP_FCS_NONE;
3115 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
3116 }
3117
3118 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3119 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3120 chan->tx_win);
3121 break;
3122
3123 case L2CAP_MODE_STREAMING:
3124 l2cap_txwin_setup(chan);
3125 rfc.mode = L2CAP_MODE_STREAMING;
3126 rfc.txwin_size = 0;
3127 rfc.max_transmit = 0;
3128 rfc.retrans_timeout = 0;
3129 rfc.monitor_timeout = 0;
3130
3131 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3132 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3133 L2CAP_FCS_SIZE);
3134 rfc.max_pdu_size = cpu_to_le16(size);
3135
3136 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3137 (unsigned long) &rfc);
3138
3139 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3140 l2cap_add_opt_efs(&ptr, chan);
3141
3142 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
3143 break;
3144
3145 if (chan->fcs == L2CAP_FCS_NONE ||
3146 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
3147 chan->fcs = L2CAP_FCS_NONE;
3148 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
3149 }
3150 break;
3151 }
3152
3153 req->dcid = cpu_to_le16(chan->dcid);
3154 req->flags = __constant_cpu_to_le16(0);
3155
3156 return ptr - data;
3157 }
3158
3159 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3160 {
3161 struct l2cap_conf_rsp *rsp = data;
3162 void *ptr = rsp->data;
3163 void *req = chan->conf_req;
3164 int len = chan->conf_len;
3165 int type, hint, olen;
3166 unsigned long val;
3167 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3168 struct l2cap_conf_efs efs;
3169 u8 remote_efs = 0;
3170 u16 mtu = L2CAP_DEFAULT_MTU;
3171 u16 result = L2CAP_CONF_SUCCESS;
3172 u16 size;
3173
3174 BT_DBG("chan %p", chan);
3175
3176 while (len >= L2CAP_CONF_OPT_SIZE) {
3177 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3178
3179 hint = type & L2CAP_CONF_HINT;
3180 type &= L2CAP_CONF_MASK;
3181
3182 switch (type) {
3183 case L2CAP_CONF_MTU:
3184 mtu = val;
3185 break;
3186
3187 case L2CAP_CONF_FLUSH_TO:
3188 chan->flush_to = val;
3189 break;
3190
3191 case L2CAP_CONF_QOS:
3192 break;
3193
3194 case L2CAP_CONF_RFC:
3195 if (olen == sizeof(rfc))
3196 memcpy(&rfc, (void *) val, olen);
3197 break;
3198
3199 case L2CAP_CONF_FCS:
3200 if (val == L2CAP_FCS_NONE)
3201 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
3202 break;
3203
3204 case L2CAP_CONF_EFS:
3205 remote_efs = 1;
3206 if (olen == sizeof(efs))
3207 memcpy(&efs, (void *) val, olen);
3208 break;
3209
3210 case L2CAP_CONF_EWS:
3211 if (!enable_hs)
3212 return -ECONNREFUSED;
3213
3214 set_bit(FLAG_EXT_CTRL, &chan->flags);
3215 set_bit(CONF_EWS_RECV, &chan->conf_state);
3216 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3217 chan->remote_tx_win = val;
3218 break;
3219
3220 default:
3221 if (hint)
3222 break;
3223
3224 result = L2CAP_CONF_UNKNOWN;
3225 *((u8 *) ptr++) = type;
3226 break;
3227 }
3228 }
3229
3230 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3231 goto done;
3232
3233 switch (chan->mode) {
3234 case L2CAP_MODE_STREAMING:
3235 case L2CAP_MODE_ERTM:
3236 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3237 chan->mode = l2cap_select_mode(rfc.mode,
3238 chan->conn->feat_mask);
3239 break;
3240 }
3241
3242 if (remote_efs) {
3243 if (__l2cap_efs_supported(chan))
3244 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3245 else
3246 return -ECONNREFUSED;
3247 }
3248
3249 if (chan->mode != rfc.mode)
3250 return -ECONNREFUSED;
3251
3252 break;
3253 }
3254
3255 done:
3256 if (chan->mode != rfc.mode) {
3257 result = L2CAP_CONF_UNACCEPT;
3258 rfc.mode = chan->mode;
3259
3260 if (chan->num_conf_rsp == 1)
3261 return -ECONNREFUSED;
3262
3263 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3264 (unsigned long) &rfc);
3265 }
3266
3267 if (result == L2CAP_CONF_SUCCESS) {
3268 /* Configure output options and let the other side know
3269 * which ones we don't like. */
3270
3271 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3272 result = L2CAP_CONF_UNACCEPT;
3273 else {
3274 chan->omtu = mtu;
3275 set_bit(CONF_MTU_DONE, &chan->conf_state);
3276 }
3277 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3278
3279 if (remote_efs) {
3280 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3281 efs.stype != L2CAP_SERV_NOTRAFIC &&
3282 efs.stype != chan->local_stype) {
3283
3284 result = L2CAP_CONF_UNACCEPT;
3285
3286 if (chan->num_conf_req >= 1)
3287 return -ECONNREFUSED;
3288
3289 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3290 sizeof(efs),
3291 (unsigned long) &efs);
3292 } else {
3293 /* Send PENDING Conf Rsp */
3294 result = L2CAP_CONF_PENDING;
3295 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3296 }
3297 }
3298
3299 switch (rfc.mode) {
3300 case L2CAP_MODE_BASIC:
3301 chan->fcs = L2CAP_FCS_NONE;
3302 set_bit(CONF_MODE_DONE, &chan->conf_state);
3303 break;
3304
3305 case L2CAP_MODE_ERTM:
3306 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3307 chan->remote_tx_win = rfc.txwin_size;
3308 else
3309 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3310
3311 chan->remote_max_tx = rfc.max_transmit;
3312
3313 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3314 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3315 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3316 rfc.max_pdu_size = cpu_to_le16(size);
3317 chan->remote_mps = size;
3318
3319 __l2cap_set_ertm_timeouts(chan, &rfc);
3320
3321 set_bit(CONF_MODE_DONE, &chan->conf_state);
3322
3323 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3324 sizeof(rfc), (unsigned long) &rfc);
3325
3326 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3327 chan->remote_id = efs.id;
3328 chan->remote_stype = efs.stype;
3329 chan->remote_msdu = le16_to_cpu(efs.msdu);
3330 chan->remote_flush_to =
3331 le32_to_cpu(efs.flush_to);
3332 chan->remote_acc_lat =
3333 le32_to_cpu(efs.acc_lat);
3334 chan->remote_sdu_itime =
3335 le32_to_cpu(efs.sdu_itime);
3336 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3337 sizeof(efs),
3338 (unsigned long) &efs);
3339 }
3340 break;
3341
3342 case L2CAP_MODE_STREAMING:
3343 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3344 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3345 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3346 rfc.max_pdu_size = cpu_to_le16(size);
3347 chan->remote_mps = size;
3348
3349 set_bit(CONF_MODE_DONE, &chan->conf_state);
3350
3351 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3352 (unsigned long) &rfc);
3353
3354 break;
3355
3356 default:
3357 result = L2CAP_CONF_UNACCEPT;
3358
3359 memset(&rfc, 0, sizeof(rfc));
3360 rfc.mode = chan->mode;
3361 }
3362
3363 if (result == L2CAP_CONF_SUCCESS)
3364 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3365 }
3366 rsp->scid = cpu_to_le16(chan->dcid);
3367 rsp->result = cpu_to_le16(result);
3368 rsp->flags = __constant_cpu_to_le16(0);
3369
3370 return ptr - data;
3371 }
3372
3373 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3374 void *data, u16 *result)
3375 {
3376 struct l2cap_conf_req *req = data;
3377 void *ptr = req->data;
3378 int type, olen;
3379 unsigned long val;
3380 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3381 struct l2cap_conf_efs efs;
3382
3383 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3384
3385 while (len >= L2CAP_CONF_OPT_SIZE) {
3386 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3387
3388 switch (type) {
3389 case L2CAP_CONF_MTU:
3390 if (val < L2CAP_DEFAULT_MIN_MTU) {
3391 *result = L2CAP_CONF_UNACCEPT;
3392 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3393 } else
3394 chan->imtu = val;
3395 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3396 break;
3397
3398 case L2CAP_CONF_FLUSH_TO:
3399 chan->flush_to = val;
3400 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3401 2, chan->flush_to);
3402 break;
3403
3404 case L2CAP_CONF_RFC:
3405 if (olen == sizeof(rfc))
3406 memcpy(&rfc, (void *)val, olen);
3407
3408 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3409 rfc.mode != chan->mode)
3410 return -ECONNREFUSED;
3411
3412 chan->fcs = 0;
3413
3414 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3415 sizeof(rfc), (unsigned long) &rfc);
3416 break;
3417
3418 case L2CAP_CONF_EWS:
3419 chan->ack_win = min_t(u16, val, chan->ack_win);
3420 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3421 chan->tx_win);
3422 break;
3423
3424 case L2CAP_CONF_EFS:
3425 if (olen == sizeof(efs))
3426 memcpy(&efs, (void *)val, olen);
3427
3428 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3429 efs.stype != L2CAP_SERV_NOTRAFIC &&
3430 efs.stype != chan->local_stype)
3431 return -ECONNREFUSED;
3432
3433 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3434 (unsigned long) &efs);
3435 break;
3436 }
3437 }
3438
3439 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3440 return -ECONNREFUSED;
3441
3442 chan->mode = rfc.mode;
3443
3444 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3445 switch (rfc.mode) {
3446 case L2CAP_MODE_ERTM:
3447 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3448 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3449 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3450 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3451 chan->ack_win = min_t(u16, chan->ack_win,
3452 rfc.txwin_size);
3453
3454 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3455 chan->local_msdu = le16_to_cpu(efs.msdu);
3456 chan->local_sdu_itime =
3457 le32_to_cpu(efs.sdu_itime);
3458 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3459 chan->local_flush_to =
3460 le32_to_cpu(efs.flush_to);
3461 }
3462 break;
3463
3464 case L2CAP_MODE_STREAMING:
3465 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3466 }
3467 }
3468
3469 req->dcid = cpu_to_le16(chan->dcid);
3470 req->flags = __constant_cpu_to_le16(0);
3471
3472 return ptr - data;
3473 }
3474
3475 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3476 u16 result, u16 flags)
3477 {
3478 struct l2cap_conf_rsp *rsp = data;
3479 void *ptr = rsp->data;
3480
3481 BT_DBG("chan %p", chan);
3482
3483 rsp->scid = cpu_to_le16(chan->dcid);
3484 rsp->result = cpu_to_le16(result);
3485 rsp->flags = cpu_to_le16(flags);
3486
3487 return ptr - data;
3488 }
3489
3490 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3491 {
3492 struct l2cap_conn_rsp rsp;
3493 struct l2cap_conn *conn = chan->conn;
3494 u8 buf[128];
3495 u8 rsp_code;
3496
3497 rsp.scid = cpu_to_le16(chan->dcid);
3498 rsp.dcid = cpu_to_le16(chan->scid);
3499 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3500 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3501
3502 if (chan->hs_hcon)
3503 rsp_code = L2CAP_CREATE_CHAN_RSP;
3504 else
3505 rsp_code = L2CAP_CONN_RSP;
3506
3507 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3508
3509 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3510
3511 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3512 return;
3513
3514 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3515 l2cap_build_conf_req(chan, buf), buf);
3516 chan->num_conf_req++;
3517 }
3518
3519 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3520 {
3521 int type, olen;
3522 unsigned long val;
3523 /* Use sane default values in case a misbehaving remote device
3524 * did not send an RFC or extended window size option.
3525 */
3526 u16 txwin_ext = chan->ack_win;
3527 struct l2cap_conf_rfc rfc = {
3528 .mode = chan->mode,
3529 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3530 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3531 .max_pdu_size = cpu_to_le16(chan->imtu),
3532 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3533 };
3534
3535 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3536
3537 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3538 return;
3539
3540 while (len >= L2CAP_CONF_OPT_SIZE) {
3541 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3542
3543 switch (type) {
3544 case L2CAP_CONF_RFC:
3545 if (olen == sizeof(rfc))
3546 memcpy(&rfc, (void *)val, olen);
3547 break;
3548 case L2CAP_CONF_EWS:
3549 txwin_ext = val;
3550 break;
3551 }
3552 }
3553
3554 switch (rfc.mode) {
3555 case L2CAP_MODE_ERTM:
3556 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3557 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3558 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3559 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3560 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3561 else
3562 chan->ack_win = min_t(u16, chan->ack_win,
3563 rfc.txwin_size);
3564 break;
3565 case L2CAP_MODE_STREAMING:
3566 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3567 }
3568 }
3569
3570 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3571 struct l2cap_cmd_hdr *cmd, u8 *data)
3572 {
3573 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3574
3575 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3576 return 0;
3577
3578 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3579 cmd->ident == conn->info_ident) {
3580 cancel_delayed_work(&conn->info_timer);
3581
3582 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3583 conn->info_ident = 0;
3584
3585 l2cap_conn_start(conn);
3586 }
3587
3588 return 0;
3589 }
3590
3591 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3592 struct l2cap_cmd_hdr *cmd,
3593 u8 *data, u8 rsp_code, u8 amp_id)
3594 {
3595 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3596 struct l2cap_conn_rsp rsp;
3597 struct l2cap_chan *chan = NULL, *pchan;
3598 struct sock *parent, *sk = NULL;
3599 int result, status = L2CAP_CS_NO_INFO;
3600
3601 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3602 __le16 psm = req->psm;
3603
3604 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3605
3606 /* Check if we have socket listening on psm */
3607 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3608 if (!pchan) {
3609 result = L2CAP_CR_BAD_PSM;
3610 goto sendresp;
3611 }
3612
3613 parent = pchan->sk;
3614
3615 mutex_lock(&conn->chan_lock);
3616 lock_sock(parent);
3617
3618 /* Check if the ACL is secure enough (if not SDP) */
3619 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3620 !hci_conn_check_link_mode(conn->hcon)) {
3621 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3622 result = L2CAP_CR_SEC_BLOCK;
3623 goto response;
3624 }
3625
3626 result = L2CAP_CR_NO_MEM;
3627
3628 /* Check if we already have channel with that dcid */
3629 if (__l2cap_get_chan_by_dcid(conn, scid))
3630 goto response;
3631
3632 chan = pchan->ops->new_connection(pchan);
3633 if (!chan)
3634 goto response;
3635
3636 sk = chan->sk;
3637
3638 hci_conn_hold(conn->hcon);
3639
3640 bacpy(&bt_sk(sk)->src, conn->src);
3641 bacpy(&bt_sk(sk)->dst, conn->dst);
3642 chan->psm = psm;
3643 chan->dcid = scid;
3644 chan->local_amp_id = amp_id;
3645
3646 __l2cap_chan_add(conn, chan);
3647
3648 dcid = chan->scid;
3649
3650 __set_chan_timer(chan, sk->sk_sndtimeo);
3651
3652 chan->ident = cmd->ident;
3653
3654 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3655 if (l2cap_chan_check_security(chan)) {
3656 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3657 __l2cap_state_change(chan, BT_CONNECT2);
3658 result = L2CAP_CR_PEND;
3659 status = L2CAP_CS_AUTHOR_PEND;
3660 chan->ops->defer(chan);
3661 } else {
3662 /* Force pending result for AMP controllers.
3663 * The connection will succeed after the
3664 * physical link is up.
3665 */
3666 if (amp_id) {
3667 __l2cap_state_change(chan, BT_CONNECT2);
3668 result = L2CAP_CR_PEND;
3669 } else {
3670 __l2cap_state_change(chan, BT_CONFIG);
3671 result = L2CAP_CR_SUCCESS;
3672 }
3673 status = L2CAP_CS_NO_INFO;
3674 }
3675 } else {
3676 __l2cap_state_change(chan, BT_CONNECT2);
3677 result = L2CAP_CR_PEND;
3678 status = L2CAP_CS_AUTHEN_PEND;
3679 }
3680 } else {
3681 __l2cap_state_change(chan, BT_CONNECT2);
3682 result = L2CAP_CR_PEND;
3683 status = L2CAP_CS_NO_INFO;
3684 }
3685
3686 response:
3687 release_sock(parent);
3688 mutex_unlock(&conn->chan_lock);
3689
3690 sendresp:
3691 rsp.scid = cpu_to_le16(scid);
3692 rsp.dcid = cpu_to_le16(dcid);
3693 rsp.result = cpu_to_le16(result);
3694 rsp.status = cpu_to_le16(status);
3695 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3696
3697 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3698 struct l2cap_info_req info;
3699 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3700
3701 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3702 conn->info_ident = l2cap_get_ident(conn);
3703
3704 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3705
3706 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3707 sizeof(info), &info);
3708 }
3709
3710 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3711 result == L2CAP_CR_SUCCESS) {
3712 u8 buf[128];
3713 set_bit(CONF_REQ_SENT, &chan->conf_state);
3714 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3715 l2cap_build_conf_req(chan, buf), buf);
3716 chan->num_conf_req++;
3717 }
3718
3719 return chan;
3720 }
3721
3722 static int l2cap_connect_req(struct l2cap_conn *conn,
3723 struct l2cap_cmd_hdr *cmd, u8 *data)
3724 {
3725 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3726 return 0;
3727 }
3728
3729 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3730 struct l2cap_cmd_hdr *cmd, u8 *data)
3731 {
3732 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3733 u16 scid, dcid, result, status;
3734 struct l2cap_chan *chan;
3735 u8 req[128];
3736 int err;
3737
3738 scid = __le16_to_cpu(rsp->scid);
3739 dcid = __le16_to_cpu(rsp->dcid);
3740 result = __le16_to_cpu(rsp->result);
3741 status = __le16_to_cpu(rsp->status);
3742
3743 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3744 dcid, scid, result, status);
3745
3746 mutex_lock(&conn->chan_lock);
3747
3748 if (scid) {
3749 chan = __l2cap_get_chan_by_scid(conn, scid);
3750 if (!chan) {
3751 err = -EFAULT;
3752 goto unlock;
3753 }
3754 } else {
3755 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3756 if (!chan) {
3757 err = -EFAULT;
3758 goto unlock;
3759 }
3760 }
3761
3762 err = 0;
3763
3764 l2cap_chan_lock(chan);
3765
3766 switch (result) {
3767 case L2CAP_CR_SUCCESS:
3768 l2cap_state_change(chan, BT_CONFIG);
3769 chan->ident = 0;
3770 chan->dcid = dcid;
3771 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3772
3773 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3774 break;
3775
3776 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3777 l2cap_build_conf_req(chan, req), req);
3778 chan->num_conf_req++;
3779 break;
3780
3781 case L2CAP_CR_PEND:
3782 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3783 break;
3784
3785 default:
3786 l2cap_chan_del(chan, ECONNREFUSED);
3787 break;
3788 }
3789
3790 l2cap_chan_unlock(chan);
3791
3792 unlock:
3793 mutex_unlock(&conn->chan_lock);
3794
3795 return err;
3796 }
3797
3798 static inline void set_default_fcs(struct l2cap_chan *chan)
3799 {
3800 /* FCS is enabled only in ERTM or streaming mode, if one or both
3801 * sides request it.
3802 */
3803 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3804 chan->fcs = L2CAP_FCS_NONE;
3805 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3806 chan->fcs = L2CAP_FCS_CRC16;
3807 }
3808
3809 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3810 u8 ident, u16 flags)
3811 {
3812 struct l2cap_conn *conn = chan->conn;
3813
3814 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3815 flags);
3816
3817 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3818 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3819
3820 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3821 l2cap_build_conf_rsp(chan, data,
3822 L2CAP_CONF_SUCCESS, flags), data);
3823 }
3824
3825 static inline int l2cap_config_req(struct l2cap_conn *conn,
3826 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3827 u8 *data)
3828 {
3829 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3830 u16 dcid, flags;
3831 u8 rsp[64];
3832 struct l2cap_chan *chan;
3833 int len, err = 0;
3834
3835 dcid = __le16_to_cpu(req->dcid);
3836 flags = __le16_to_cpu(req->flags);
3837
3838 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3839
3840 chan = l2cap_get_chan_by_scid(conn, dcid);
3841 if (!chan)
3842 return -ENOENT;
3843
3844 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3845 struct l2cap_cmd_rej_cid rej;
3846
3847 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3848 rej.scid = cpu_to_le16(chan->scid);
3849 rej.dcid = cpu_to_le16(chan->dcid);
3850
3851 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3852 sizeof(rej), &rej);
3853 goto unlock;
3854 }
3855
3856 /* Reject if config buffer is too small. */
3857 len = cmd_len - sizeof(*req);
3858 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3859 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3860 l2cap_build_conf_rsp(chan, rsp,
3861 L2CAP_CONF_REJECT, flags), rsp);
3862 goto unlock;
3863 }
3864
3865 /* Store config. */
3866 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3867 chan->conf_len += len;
3868
3869 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3870 /* Incomplete config. Send empty response. */
3871 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3872 l2cap_build_conf_rsp(chan, rsp,
3873 L2CAP_CONF_SUCCESS, flags), rsp);
3874 goto unlock;
3875 }
3876
3877 /* Complete config. */
3878 len = l2cap_parse_conf_req(chan, rsp);
3879 if (len < 0) {
3880 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3881 goto unlock;
3882 }
3883
3884 chan->ident = cmd->ident;
3885 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3886 chan->num_conf_rsp++;
3887
3888 /* Reset config buffer. */
3889 chan->conf_len = 0;
3890
3891 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3892 goto unlock;
3893
3894 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3895 set_default_fcs(chan);
3896
3897 if (chan->mode == L2CAP_MODE_ERTM ||
3898 chan->mode == L2CAP_MODE_STREAMING)
3899 err = l2cap_ertm_init(chan);
3900
3901 if (err < 0)
3902 l2cap_send_disconn_req(chan->conn, chan, -err);
3903 else
3904 l2cap_chan_ready(chan);
3905
3906 goto unlock;
3907 }
3908
3909 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3910 u8 buf[64];
3911 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3912 l2cap_build_conf_req(chan, buf), buf);
3913 chan->num_conf_req++;
3914 }
3915
3916 /* Got Conf Rsp PENDING from remote side and asume we sent
3917 Conf Rsp PENDING in the code above */
3918 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3919 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3920
3921 /* check compatibility */
3922
3923 /* Send rsp for BR/EDR channel */
3924 if (!chan->hs_hcon)
3925 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
3926 else
3927 chan->ident = cmd->ident;
3928 }
3929
3930 unlock:
3931 l2cap_chan_unlock(chan);
3932 return err;
3933 }
3934
3935 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
3936 struct l2cap_cmd_hdr *cmd, u8 *data)
3937 {
3938 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3939 u16 scid, flags, result;
3940 struct l2cap_chan *chan;
3941 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3942 int err = 0;
3943
3944 scid = __le16_to_cpu(rsp->scid);
3945 flags = __le16_to_cpu(rsp->flags);
3946 result = __le16_to_cpu(rsp->result);
3947
3948 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3949 result, len);
3950
3951 chan = l2cap_get_chan_by_scid(conn, scid);
3952 if (!chan)
3953 return 0;
3954
3955 switch (result) {
3956 case L2CAP_CONF_SUCCESS:
3957 l2cap_conf_rfc_get(chan, rsp->data, len);
3958 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3959 break;
3960
3961 case L2CAP_CONF_PENDING:
3962 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3963
3964 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3965 char buf[64];
3966
3967 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3968 buf, &result);
3969 if (len < 0) {
3970 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3971 goto done;
3972 }
3973
3974 if (!chan->hs_hcon) {
3975 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
3976 0);
3977 } else {
3978 if (l2cap_check_efs(chan)) {
3979 amp_create_logical_link(chan);
3980 chan->ident = cmd->ident;
3981 }
3982 }
3983 }
3984 goto done;
3985
3986 case L2CAP_CONF_UNACCEPT:
3987 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3988 char req[64];
3989
3990 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3991 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3992 goto done;
3993 }
3994
3995 /* throw out any old stored conf requests */
3996 result = L2CAP_CONF_SUCCESS;
3997 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3998 req, &result);
3999 if (len < 0) {
4000 l2cap_send_disconn_req(conn, chan, ECONNRESET);
4001 goto done;
4002 }
4003
4004 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4005 L2CAP_CONF_REQ, len, req);
4006 chan->num_conf_req++;
4007 if (result != L2CAP_CONF_SUCCESS)
4008 goto done;
4009 break;
4010 }
4011
4012 default:
4013 l2cap_chan_set_err(chan, ECONNRESET);
4014
4015 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4016 l2cap_send_disconn_req(conn, chan, ECONNRESET);
4017 goto done;
4018 }
4019
4020 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4021 goto done;
4022
4023 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4024
4025 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4026 set_default_fcs(chan);
4027
4028 if (chan->mode == L2CAP_MODE_ERTM ||
4029 chan->mode == L2CAP_MODE_STREAMING)
4030 err = l2cap_ertm_init(chan);
4031
4032 if (err < 0)
4033 l2cap_send_disconn_req(chan->conn, chan, -err);
4034 else
4035 l2cap_chan_ready(chan);
4036 }
4037
4038 done:
4039 l2cap_chan_unlock(chan);
4040 return err;
4041 }
4042
4043 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4044 struct l2cap_cmd_hdr *cmd, u8 *data)
4045 {
4046 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4047 struct l2cap_disconn_rsp rsp;
4048 u16 dcid, scid;
4049 struct l2cap_chan *chan;
4050 struct sock *sk;
4051
4052 scid = __le16_to_cpu(req->scid);
4053 dcid = __le16_to_cpu(req->dcid);
4054
4055 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4056
4057 mutex_lock(&conn->chan_lock);
4058
4059 chan = __l2cap_get_chan_by_scid(conn, dcid);
4060 if (!chan) {
4061 mutex_unlock(&conn->chan_lock);
4062 return 0;
4063 }
4064
4065 l2cap_chan_lock(chan);
4066
4067 sk = chan->sk;
4068
4069 rsp.dcid = cpu_to_le16(chan->scid);
4070 rsp.scid = cpu_to_le16(chan->dcid);
4071 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4072
4073 lock_sock(sk);
4074 sk->sk_shutdown = SHUTDOWN_MASK;
4075 release_sock(sk);
4076
4077 l2cap_chan_hold(chan);
4078 l2cap_chan_del(chan, ECONNRESET);
4079
4080 l2cap_chan_unlock(chan);
4081
4082 chan->ops->close(chan);
4083 l2cap_chan_put(chan);
4084
4085 mutex_unlock(&conn->chan_lock);
4086
4087 return 0;
4088 }
4089
4090 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4091 struct l2cap_cmd_hdr *cmd, u8 *data)
4092 {
4093 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4094 u16 dcid, scid;
4095 struct l2cap_chan *chan;
4096
4097 scid = __le16_to_cpu(rsp->scid);
4098 dcid = __le16_to_cpu(rsp->dcid);
4099
4100 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4101
4102 mutex_lock(&conn->chan_lock);
4103
4104 chan = __l2cap_get_chan_by_scid(conn, scid);
4105 if (!chan) {
4106 mutex_unlock(&conn->chan_lock);
4107 return 0;
4108 }
4109
4110 l2cap_chan_lock(chan);
4111
4112 l2cap_chan_hold(chan);
4113 l2cap_chan_del(chan, 0);
4114
4115 l2cap_chan_unlock(chan);
4116
4117 chan->ops->close(chan);
4118 l2cap_chan_put(chan);
4119
4120 mutex_unlock(&conn->chan_lock);
4121
4122 return 0;
4123 }
4124
4125 static inline int l2cap_information_req(struct l2cap_conn *conn,
4126 struct l2cap_cmd_hdr *cmd, u8 *data)
4127 {
4128 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4129 u16 type;
4130
4131 type = __le16_to_cpu(req->type);
4132
4133 BT_DBG("type 0x%4.4x", type);
4134
4135 if (type == L2CAP_IT_FEAT_MASK) {
4136 u8 buf[8];
4137 u32 feat_mask = l2cap_feat_mask;
4138 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4139 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4140 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4141 if (!disable_ertm)
4142 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4143 | L2CAP_FEAT_FCS;
4144 if (enable_hs)
4145 feat_mask |= L2CAP_FEAT_EXT_FLOW
4146 | L2CAP_FEAT_EXT_WINDOW;
4147
4148 put_unaligned_le32(feat_mask, rsp->data);
4149 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4150 buf);
4151 } else if (type == L2CAP_IT_FIXED_CHAN) {
4152 u8 buf[12];
4153 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4154
4155 if (enable_hs)
4156 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4157 else
4158 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4159
4160 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4161 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4162 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4163 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4164 buf);
4165 } else {
4166 struct l2cap_info_rsp rsp;
4167 rsp.type = cpu_to_le16(type);
4168 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4169 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4170 &rsp);
4171 }
4172
4173 return 0;
4174 }
4175
4176 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4177 struct l2cap_cmd_hdr *cmd, u8 *data)
4178 {
4179 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4180 u16 type, result;
4181
4182 type = __le16_to_cpu(rsp->type);
4183 result = __le16_to_cpu(rsp->result);
4184
4185 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4186
4187 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4188 if (cmd->ident != conn->info_ident ||
4189 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4190 return 0;
4191
4192 cancel_delayed_work(&conn->info_timer);
4193
4194 if (result != L2CAP_IR_SUCCESS) {
4195 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4196 conn->info_ident = 0;
4197
4198 l2cap_conn_start(conn);
4199
4200 return 0;
4201 }
4202
4203 switch (type) {
4204 case L2CAP_IT_FEAT_MASK:
4205 conn->feat_mask = get_unaligned_le32(rsp->data);
4206
4207 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4208 struct l2cap_info_req req;
4209 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4210
4211 conn->info_ident = l2cap_get_ident(conn);
4212
4213 l2cap_send_cmd(conn, conn->info_ident,
4214 L2CAP_INFO_REQ, sizeof(req), &req);
4215 } else {
4216 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4217 conn->info_ident = 0;
4218
4219 l2cap_conn_start(conn);
4220 }
4221 break;
4222
4223 case L2CAP_IT_FIXED_CHAN:
4224 conn->fixed_chan_mask = rsp->data[0];
4225 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4226 conn->info_ident = 0;
4227
4228 l2cap_conn_start(conn);
4229 break;
4230 }
4231
4232 return 0;
4233 }
4234
4235 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4236 struct l2cap_cmd_hdr *cmd,
4237 u16 cmd_len, void *data)
4238 {
4239 struct l2cap_create_chan_req *req = data;
4240 struct l2cap_create_chan_rsp rsp;
4241 struct l2cap_chan *chan;
4242 struct hci_dev *hdev;
4243 u16 psm, scid;
4244
4245 if (cmd_len != sizeof(*req))
4246 return -EPROTO;
4247
4248 if (!enable_hs)
4249 return -EINVAL;
4250
4251 psm = le16_to_cpu(req->psm);
4252 scid = le16_to_cpu(req->scid);
4253
4254 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4255
4256 /* For controller id 0 make BR/EDR connection */
4257 if (req->amp_id == HCI_BREDR_ID) {
4258 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4259 req->amp_id);
4260 return 0;
4261 }
4262
4263 /* Validate AMP controller id */
4264 hdev = hci_dev_get(req->amp_id);
4265 if (!hdev)
4266 goto error;
4267
4268 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4269 hci_dev_put(hdev);
4270 goto error;
4271 }
4272
4273 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4274 req->amp_id);
4275 if (chan) {
4276 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4277 struct hci_conn *hs_hcon;
4278
4279 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, conn->dst);
4280 if (!hs_hcon) {
4281 hci_dev_put(hdev);
4282 return -EFAULT;
4283 }
4284
4285 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4286
4287 chan->local_amp_id = req->amp_id;
4288 mgr->bredr_chan = chan;
4289 chan->hs_hcon = hs_hcon;
4290 conn->mtu = hdev->block_mtu;
4291 }
4292
4293 hci_dev_put(hdev);
4294
4295 return 0;
4296
4297 error:
4298 rsp.dcid = 0;
4299 rsp.scid = cpu_to_le16(scid);
4300 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4301 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4302
4303 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4304 sizeof(rsp), &rsp);
4305
4306 return -EFAULT;
4307 }
4308
4309 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4310 {
4311 struct l2cap_move_chan_req req;
4312 u8 ident;
4313
4314 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4315
4316 ident = l2cap_get_ident(chan->conn);
4317 chan->ident = ident;
4318
4319 req.icid = cpu_to_le16(chan->scid);
4320 req.dest_amp_id = dest_amp_id;
4321
4322 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4323 &req);
4324
4325 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4326 }
4327
4328 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4329 {
4330 struct l2cap_move_chan_rsp rsp;
4331
4332 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4333
4334 rsp.icid = cpu_to_le16(chan->dcid);
4335 rsp.result = cpu_to_le16(result);
4336
4337 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4338 sizeof(rsp), &rsp);
4339 }
4340
4341 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4342 {
4343 struct l2cap_move_chan_cfm cfm;
4344
4345 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4346
4347 chan->ident = l2cap_get_ident(chan->conn);
4348
4349 cfm.icid = cpu_to_le16(chan->scid);
4350 cfm.result = cpu_to_le16(result);
4351
4352 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4353 sizeof(cfm), &cfm);
4354
4355 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4356 }
4357
4358 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4359 {
4360 struct l2cap_move_chan_cfm cfm;
4361
4362 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4363
4364 cfm.icid = cpu_to_le16(icid);
4365 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4366
4367 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4368 sizeof(cfm), &cfm);
4369 }
4370
4371 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4372 u16 icid)
4373 {
4374 struct l2cap_move_chan_cfm_rsp rsp;
4375
4376 BT_DBG("icid 0x%4.4x", icid);
4377
4378 rsp.icid = cpu_to_le16(icid);
4379 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4380 }
4381
4382 static void __release_logical_link(struct l2cap_chan *chan)
4383 {
4384 chan->hs_hchan = NULL;
4385 chan->hs_hcon = NULL;
4386
4387 /* Placeholder - release the logical link */
4388 }
4389
4390 static void l2cap_logical_fail(struct l2cap_chan *chan)
4391 {
4392 /* Logical link setup failed */
4393 if (chan->state != BT_CONNECTED) {
4394 /* Create channel failure, disconnect */
4395 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4396 return;
4397 }
4398
4399 switch (chan->move_role) {
4400 case L2CAP_MOVE_ROLE_RESPONDER:
4401 l2cap_move_done(chan);
4402 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4403 break;
4404 case L2CAP_MOVE_ROLE_INITIATOR:
4405 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4406 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4407 /* Remote has only sent pending or
4408 * success responses, clean up
4409 */
4410 l2cap_move_done(chan);
4411 }
4412
4413 /* Other amp move states imply that the move
4414 * has already aborted
4415 */
4416 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4417 break;
4418 }
4419 }
4420
4421 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4422 struct hci_chan *hchan)
4423 {
4424 struct l2cap_conf_rsp rsp;
4425
4426 chan->hs_hchan = hchan;
4427 chan->hs_hcon->l2cap_data = chan->conn;
4428
4429 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4430
4431 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4432 int err;
4433
4434 set_default_fcs(chan);
4435
4436 err = l2cap_ertm_init(chan);
4437 if (err < 0)
4438 l2cap_send_disconn_req(chan->conn, chan, -err);
4439 else
4440 l2cap_chan_ready(chan);
4441 }
4442 }
4443
4444 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4445 struct hci_chan *hchan)
4446 {
4447 chan->hs_hcon = hchan->conn;
4448 chan->hs_hcon->l2cap_data = chan->conn;
4449
4450 BT_DBG("move_state %d", chan->move_state);
4451
4452 switch (chan->move_state) {
4453 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4454 /* Move confirm will be sent after a success
4455 * response is received
4456 */
4457 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4458 break;
4459 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4460 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4461 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4462 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4463 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4464 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4465 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4466 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4467 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4468 }
4469 break;
4470 default:
4471 /* Move was not in expected state, free the channel */
4472 __release_logical_link(chan);
4473
4474 chan->move_state = L2CAP_MOVE_STABLE;
4475 }
4476 }
4477
4478 /* Call with chan locked */
4479 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4480 u8 status)
4481 {
4482 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4483
4484 if (status) {
4485 l2cap_logical_fail(chan);
4486 __release_logical_link(chan);
4487 return;
4488 }
4489
4490 if (chan->state != BT_CONNECTED) {
4491 /* Ignore logical link if channel is on BR/EDR */
4492 if (chan->local_amp_id)
4493 l2cap_logical_finish_create(chan, hchan);
4494 } else {
4495 l2cap_logical_finish_move(chan, hchan);
4496 }
4497 }
4498
4499 void l2cap_move_start(struct l2cap_chan *chan)
4500 {
4501 BT_DBG("chan %p", chan);
4502
4503 if (chan->local_amp_id == HCI_BREDR_ID) {
4504 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4505 return;
4506 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4507 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4508 /* Placeholder - start physical link setup */
4509 } else {
4510 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4511 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4512 chan->move_id = 0;
4513 l2cap_move_setup(chan);
4514 l2cap_send_move_chan_req(chan, 0);
4515 }
4516 }
4517
4518 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4519 u8 local_amp_id, u8 remote_amp_id)
4520 {
4521 if (!test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4522 struct l2cap_conn_rsp rsp;
4523 char buf[128];
4524 rsp.scid = cpu_to_le16(chan->dcid);
4525 rsp.dcid = cpu_to_le16(chan->scid);
4526
4527 /* Incoming channel on AMP */
4528 if (result == L2CAP_CR_SUCCESS) {
4529 /* Send successful response */
4530 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4531 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4532 } else {
4533 /* Send negative response */
4534 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4535 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4536 }
4537
4538 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4539 sizeof(rsp), &rsp);
4540
4541 if (result == L2CAP_CR_SUCCESS) {
4542 __l2cap_state_change(chan, BT_CONFIG);
4543 set_bit(CONF_REQ_SENT, &chan->conf_state);
4544 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4545 L2CAP_CONF_REQ,
4546 l2cap_build_conf_req(chan, buf), buf);
4547 chan->num_conf_req++;
4548 }
4549 } else {
4550 /* Outgoing channel on AMP */
4551 if (result == L2CAP_CR_SUCCESS) {
4552 chan->local_amp_id = local_amp_id;
4553 l2cap_send_create_chan_req(chan, remote_amp_id);
4554 } else {
4555 /* Revert to BR/EDR connect */
4556 l2cap_send_conn_req(chan);
4557 }
4558 }
4559 }
4560
4561 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4562 u8 remote_amp_id)
4563 {
4564 l2cap_move_setup(chan);
4565 chan->move_id = local_amp_id;
4566 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4567
4568 l2cap_send_move_chan_req(chan, remote_amp_id);
4569 }
4570
4571 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4572 {
4573 struct hci_chan *hchan = NULL;
4574
4575 /* Placeholder - get hci_chan for logical link */
4576
4577 if (hchan) {
4578 if (hchan->state == BT_CONNECTED) {
4579 /* Logical link is ready to go */
4580 chan->hs_hcon = hchan->conn;
4581 chan->hs_hcon->l2cap_data = chan->conn;
4582 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4583 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4584
4585 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4586 } else {
4587 /* Wait for logical link to be ready */
4588 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4589 }
4590 } else {
4591 /* Logical link not available */
4592 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4593 }
4594 }
4595
4596 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4597 {
4598 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4599 u8 rsp_result;
4600 if (result == -EINVAL)
4601 rsp_result = L2CAP_MR_BAD_ID;
4602 else
4603 rsp_result = L2CAP_MR_NOT_ALLOWED;
4604
4605 l2cap_send_move_chan_rsp(chan, rsp_result);
4606 }
4607
4608 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4609 chan->move_state = L2CAP_MOVE_STABLE;
4610
4611 /* Restart data transmission */
4612 l2cap_ertm_send(chan);
4613 }
4614
4615 void l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4616 {
4617 u8 local_amp_id = chan->local_amp_id;
4618 u8 remote_amp_id = chan->remote_amp_id;
4619
4620 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4621 chan, result, local_amp_id, remote_amp_id);
4622
4623 l2cap_chan_lock(chan);
4624
4625 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4626 l2cap_chan_unlock(chan);
4627 return;
4628 }
4629
4630 if (chan->state != BT_CONNECTED) {
4631 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4632 } else if (result != L2CAP_MR_SUCCESS) {
4633 l2cap_do_move_cancel(chan, result);
4634 } else {
4635 switch (chan->move_role) {
4636 case L2CAP_MOVE_ROLE_INITIATOR:
4637 l2cap_do_move_initiate(chan, local_amp_id,
4638 remote_amp_id);
4639 break;
4640 case L2CAP_MOVE_ROLE_RESPONDER:
4641 l2cap_do_move_respond(chan, result);
4642 break;
4643 default:
4644 l2cap_do_move_cancel(chan, result);
4645 break;
4646 }
4647 }
4648
4649 l2cap_chan_unlock(chan);
4650 }
4651
4652 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4653 struct l2cap_cmd_hdr *cmd,
4654 u16 cmd_len, void *data)
4655 {
4656 struct l2cap_move_chan_req *req = data;
4657 struct l2cap_move_chan_rsp rsp;
4658 struct l2cap_chan *chan;
4659 u16 icid = 0;
4660 u16 result = L2CAP_MR_NOT_ALLOWED;
4661
4662 if (cmd_len != sizeof(*req))
4663 return -EPROTO;
4664
4665 icid = le16_to_cpu(req->icid);
4666
4667 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4668
4669 if (!enable_hs)
4670 return -EINVAL;
4671
4672 chan = l2cap_get_chan_by_dcid(conn, icid);
4673 if (!chan) {
4674 rsp.icid = cpu_to_le16(icid);
4675 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4676 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4677 sizeof(rsp), &rsp);
4678 return 0;
4679 }
4680
4681 chan->ident = cmd->ident;
4682
4683 if (chan->scid < L2CAP_CID_DYN_START ||
4684 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4685 (chan->mode != L2CAP_MODE_ERTM &&
4686 chan->mode != L2CAP_MODE_STREAMING)) {
4687 result = L2CAP_MR_NOT_ALLOWED;
4688 goto send_move_response;
4689 }
4690
4691 if (chan->local_amp_id == req->dest_amp_id) {
4692 result = L2CAP_MR_SAME_ID;
4693 goto send_move_response;
4694 }
4695
4696 if (req->dest_amp_id) {
4697 struct hci_dev *hdev;
4698 hdev = hci_dev_get(req->dest_amp_id);
4699 if (!hdev || hdev->dev_type != HCI_AMP ||
4700 !test_bit(HCI_UP, &hdev->flags)) {
4701 if (hdev)
4702 hci_dev_put(hdev);
4703
4704 result = L2CAP_MR_BAD_ID;
4705 goto send_move_response;
4706 }
4707 hci_dev_put(hdev);
4708 }
4709
4710 /* Detect a move collision. Only send a collision response
4711 * if this side has "lost", otherwise proceed with the move.
4712 * The winner has the larger bd_addr.
4713 */
4714 if ((__chan_is_moving(chan) ||
4715 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4716 bacmp(conn->src, conn->dst) > 0) {
4717 result = L2CAP_MR_COLLISION;
4718 goto send_move_response;
4719 }
4720
4721 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4722 l2cap_move_setup(chan);
4723 chan->move_id = req->dest_amp_id;
4724 icid = chan->dcid;
4725
4726 if (!req->dest_amp_id) {
4727 /* Moving to BR/EDR */
4728 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4729 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4730 result = L2CAP_MR_PEND;
4731 } else {
4732 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4733 result = L2CAP_MR_SUCCESS;
4734 }
4735 } else {
4736 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4737 /* Placeholder - uncomment when amp functions are available */
4738 /*amp_accept_physical(chan, req->dest_amp_id);*/
4739 result = L2CAP_MR_PEND;
4740 }
4741
4742 send_move_response:
4743 l2cap_send_move_chan_rsp(chan, result);
4744
4745 l2cap_chan_unlock(chan);
4746
4747 return 0;
4748 }
4749
4750 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4751 {
4752 struct l2cap_chan *chan;
4753 struct hci_chan *hchan = NULL;
4754
4755 chan = l2cap_get_chan_by_scid(conn, icid);
4756 if (!chan) {
4757 l2cap_send_move_chan_cfm_icid(conn, icid);
4758 return;
4759 }
4760
4761 __clear_chan_timer(chan);
4762 if (result == L2CAP_MR_PEND)
4763 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4764
4765 switch (chan->move_state) {
4766 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4767 /* Move confirm will be sent when logical link
4768 * is complete.
4769 */
4770 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4771 break;
4772 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4773 if (result == L2CAP_MR_PEND) {
4774 break;
4775 } else if (test_bit(CONN_LOCAL_BUSY,
4776 &chan->conn_state)) {
4777 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4778 } else {
4779 /* Logical link is up or moving to BR/EDR,
4780 * proceed with move
4781 */
4782 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4783 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4784 }
4785 break;
4786 case L2CAP_MOVE_WAIT_RSP:
4787 /* Moving to AMP */
4788 if (result == L2CAP_MR_SUCCESS) {
4789 /* Remote is ready, send confirm immediately
4790 * after logical link is ready
4791 */
4792 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4793 } else {
4794 /* Both logical link and move success
4795 * are required to confirm
4796 */
4797 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4798 }
4799
4800 /* Placeholder - get hci_chan for logical link */
4801 if (!hchan) {
4802 /* Logical link not available */
4803 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4804 break;
4805 }
4806
4807 /* If the logical link is not yet connected, do not
4808 * send confirmation.
4809 */
4810 if (hchan->state != BT_CONNECTED)
4811 break;
4812
4813 /* Logical link is already ready to go */
4814
4815 chan->hs_hcon = hchan->conn;
4816 chan->hs_hcon->l2cap_data = chan->conn;
4817
4818 if (result == L2CAP_MR_SUCCESS) {
4819 /* Can confirm now */
4820 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4821 } else {
4822 /* Now only need move success
4823 * to confirm
4824 */
4825 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4826 }
4827
4828 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4829 break;
4830 default:
4831 /* Any other amp move state means the move failed. */
4832 chan->move_id = chan->local_amp_id;
4833 l2cap_move_done(chan);
4834 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4835 }
4836
4837 l2cap_chan_unlock(chan);
4838 }
4839
4840 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
4841 u16 result)
4842 {
4843 struct l2cap_chan *chan;
4844
4845 chan = l2cap_get_chan_by_ident(conn, ident);
4846 if (!chan) {
4847 /* Could not locate channel, icid is best guess */
4848 l2cap_send_move_chan_cfm_icid(conn, icid);
4849 return;
4850 }
4851
4852 __clear_chan_timer(chan);
4853
4854 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4855 if (result == L2CAP_MR_COLLISION) {
4856 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4857 } else {
4858 /* Cleanup - cancel move */
4859 chan->move_id = chan->local_amp_id;
4860 l2cap_move_done(chan);
4861 }
4862 }
4863
4864 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4865
4866 l2cap_chan_unlock(chan);
4867 }
4868
4869 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4870 struct l2cap_cmd_hdr *cmd,
4871 u16 cmd_len, void *data)
4872 {
4873 struct l2cap_move_chan_rsp *rsp = data;
4874 u16 icid, result;
4875
4876 if (cmd_len != sizeof(*rsp))
4877 return -EPROTO;
4878
4879 icid = le16_to_cpu(rsp->icid);
4880 result = le16_to_cpu(rsp->result);
4881
4882 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4883
4884 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
4885 l2cap_move_continue(conn, icid, result);
4886 else
4887 l2cap_move_fail(conn, cmd->ident, icid, result);
4888
4889 return 0;
4890 }
4891
4892 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4893 struct l2cap_cmd_hdr *cmd,
4894 u16 cmd_len, void *data)
4895 {
4896 struct l2cap_move_chan_cfm *cfm = data;
4897 struct l2cap_chan *chan;
4898 u16 icid, result;
4899
4900 if (cmd_len != sizeof(*cfm))
4901 return -EPROTO;
4902
4903 icid = le16_to_cpu(cfm->icid);
4904 result = le16_to_cpu(cfm->result);
4905
4906 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4907
4908 chan = l2cap_get_chan_by_dcid(conn, icid);
4909 if (!chan) {
4910 /* Spec requires a response even if the icid was not found */
4911 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4912 return 0;
4913 }
4914
4915 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
4916 if (result == L2CAP_MC_CONFIRMED) {
4917 chan->local_amp_id = chan->move_id;
4918 if (!chan->local_amp_id)
4919 __release_logical_link(chan);
4920 } else {
4921 chan->move_id = chan->local_amp_id;
4922 }
4923
4924 l2cap_move_done(chan);
4925 }
4926
4927 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4928
4929 l2cap_chan_unlock(chan);
4930
4931 return 0;
4932 }
4933
4934 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4935 struct l2cap_cmd_hdr *cmd,
4936 u16 cmd_len, void *data)
4937 {
4938 struct l2cap_move_chan_cfm_rsp *rsp = data;
4939 struct l2cap_chan *chan;
4940 u16 icid;
4941
4942 if (cmd_len != sizeof(*rsp))
4943 return -EPROTO;
4944
4945 icid = le16_to_cpu(rsp->icid);
4946
4947 BT_DBG("icid 0x%4.4x", icid);
4948
4949 chan = l2cap_get_chan_by_scid(conn, icid);
4950 if (!chan)
4951 return 0;
4952
4953 __clear_chan_timer(chan);
4954
4955 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
4956 chan->local_amp_id = chan->move_id;
4957
4958 if (!chan->local_amp_id && chan->hs_hchan)
4959 __release_logical_link(chan);
4960
4961 l2cap_move_done(chan);
4962 }
4963
4964 l2cap_chan_unlock(chan);
4965
4966 return 0;
4967 }
4968
4969 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4970 u16 to_multiplier)
4971 {
4972 u16 max_latency;
4973
4974 if (min > max || min < 6 || max > 3200)
4975 return -EINVAL;
4976
4977 if (to_multiplier < 10 || to_multiplier > 3200)
4978 return -EINVAL;
4979
4980 if (max >= to_multiplier * 8)
4981 return -EINVAL;
4982
4983 max_latency = (to_multiplier * 8 / max) - 1;
4984 if (latency > 499 || latency > max_latency)
4985 return -EINVAL;
4986
4987 return 0;
4988 }
4989
4990 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4991 struct l2cap_cmd_hdr *cmd,
4992 u8 *data)
4993 {
4994 struct hci_conn *hcon = conn->hcon;
4995 struct l2cap_conn_param_update_req *req;
4996 struct l2cap_conn_param_update_rsp rsp;
4997 u16 min, max, latency, to_multiplier, cmd_len;
4998 int err;
4999
5000 if (!(hcon->link_mode & HCI_LM_MASTER))
5001 return -EINVAL;
5002
5003 cmd_len = __le16_to_cpu(cmd->len);
5004 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5005 return -EPROTO;
5006
5007 req = (struct l2cap_conn_param_update_req *) data;
5008 min = __le16_to_cpu(req->min);
5009 max = __le16_to_cpu(req->max);
5010 latency = __le16_to_cpu(req->latency);
5011 to_multiplier = __le16_to_cpu(req->to_multiplier);
5012
5013 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5014 min, max, latency, to_multiplier);
5015
5016 memset(&rsp, 0, sizeof(rsp));
5017
5018 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5019 if (err)
5020 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5021 else
5022 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5023
5024 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5025 sizeof(rsp), &rsp);
5026
5027 if (!err)
5028 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5029
5030 return 0;
5031 }
5032
5033 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5034 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5035 u8 *data)
5036 {
5037 int err = 0;
5038
5039 switch (cmd->code) {
5040 case L2CAP_COMMAND_REJ:
5041 l2cap_command_rej(conn, cmd, data);
5042 break;
5043
5044 case L2CAP_CONN_REQ:
5045 err = l2cap_connect_req(conn, cmd, data);
5046 break;
5047
5048 case L2CAP_CONN_RSP:
5049 case L2CAP_CREATE_CHAN_RSP:
5050 err = l2cap_connect_create_rsp(conn, cmd, data);
5051 break;
5052
5053 case L2CAP_CONF_REQ:
5054 err = l2cap_config_req(conn, cmd, cmd_len, data);
5055 break;
5056
5057 case L2CAP_CONF_RSP:
5058 err = l2cap_config_rsp(conn, cmd, data);
5059 break;
5060
5061 case L2CAP_DISCONN_REQ:
5062 err = l2cap_disconnect_req(conn, cmd, data);
5063 break;
5064
5065 case L2CAP_DISCONN_RSP:
5066 err = l2cap_disconnect_rsp(conn, cmd, data);
5067 break;
5068
5069 case L2CAP_ECHO_REQ:
5070 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5071 break;
5072
5073 case L2CAP_ECHO_RSP:
5074 break;
5075
5076 case L2CAP_INFO_REQ:
5077 err = l2cap_information_req(conn, cmd, data);
5078 break;
5079
5080 case L2CAP_INFO_RSP:
5081 err = l2cap_information_rsp(conn, cmd, data);
5082 break;
5083
5084 case L2CAP_CREATE_CHAN_REQ:
5085 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5086 break;
5087
5088 case L2CAP_MOVE_CHAN_REQ:
5089 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5090 break;
5091
5092 case L2CAP_MOVE_CHAN_RSP:
5093 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5094 break;
5095
5096 case L2CAP_MOVE_CHAN_CFM:
5097 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5098 break;
5099
5100 case L2CAP_MOVE_CHAN_CFM_RSP:
5101 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5102 break;
5103
5104 default:
5105 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5106 err = -EINVAL;
5107 break;
5108 }
5109
5110 return err;
5111 }
5112
5113 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5114 struct l2cap_cmd_hdr *cmd, u8 *data)
5115 {
5116 switch (cmd->code) {
5117 case L2CAP_COMMAND_REJ:
5118 return 0;
5119
5120 case L2CAP_CONN_PARAM_UPDATE_REQ:
5121 return l2cap_conn_param_update_req(conn, cmd, data);
5122
5123 case L2CAP_CONN_PARAM_UPDATE_RSP:
5124 return 0;
5125
5126 default:
5127 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5128 return -EINVAL;
5129 }
5130 }
5131
5132 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5133 struct sk_buff *skb)
5134 {
5135 u8 *data = skb->data;
5136 int len = skb->len;
5137 struct l2cap_cmd_hdr cmd;
5138 int err;
5139
5140 l2cap_raw_recv(conn, skb);
5141
5142 while (len >= L2CAP_CMD_HDR_SIZE) {
5143 u16 cmd_len;
5144 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5145 data += L2CAP_CMD_HDR_SIZE;
5146 len -= L2CAP_CMD_HDR_SIZE;
5147
5148 cmd_len = le16_to_cpu(cmd.len);
5149
5150 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5151 cmd.ident);
5152
5153 if (cmd_len > len || !cmd.ident) {
5154 BT_DBG("corrupted command");
5155 break;
5156 }
5157
5158 if (conn->hcon->type == LE_LINK)
5159 err = l2cap_le_sig_cmd(conn, &cmd, data);
5160 else
5161 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5162
5163 if (err) {
5164 struct l2cap_cmd_rej_unk rej;
5165
5166 BT_ERR("Wrong link type (%d)", err);
5167
5168 /* FIXME: Map err to a valid reason */
5169 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5170 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5171 sizeof(rej), &rej);
5172 }
5173
5174 data += cmd_len;
5175 len -= cmd_len;
5176 }
5177
5178 kfree_skb(skb);
5179 }
5180
5181 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5182 {
5183 u16 our_fcs, rcv_fcs;
5184 int hdr_size;
5185
5186 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5187 hdr_size = L2CAP_EXT_HDR_SIZE;
5188 else
5189 hdr_size = L2CAP_ENH_HDR_SIZE;
5190
5191 if (chan->fcs == L2CAP_FCS_CRC16) {
5192 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5193 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5194 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5195
5196 if (our_fcs != rcv_fcs)
5197 return -EBADMSG;
5198 }
5199 return 0;
5200 }
5201
5202 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5203 {
5204 struct l2cap_ctrl control;
5205
5206 BT_DBG("chan %p", chan);
5207
5208 memset(&control, 0, sizeof(control));
5209 control.sframe = 1;
5210 control.final = 1;
5211 control.reqseq = chan->buffer_seq;
5212 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5213
5214 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5215 control.super = L2CAP_SUPER_RNR;
5216 l2cap_send_sframe(chan, &control);
5217 }
5218
5219 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5220 chan->unacked_frames > 0)
5221 __set_retrans_timer(chan);
5222
5223 /* Send pending iframes */
5224 l2cap_ertm_send(chan);
5225
5226 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5227 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5228 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5229 * send it now.
5230 */
5231 control.super = L2CAP_SUPER_RR;
5232 l2cap_send_sframe(chan, &control);
5233 }
5234 }
5235
5236 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5237 struct sk_buff **last_frag)
5238 {
5239 /* skb->len reflects data in skb as well as all fragments
5240 * skb->data_len reflects only data in fragments
5241 */
5242 if (!skb_has_frag_list(skb))
5243 skb_shinfo(skb)->frag_list = new_frag;
5244
5245 new_frag->next = NULL;
5246
5247 (*last_frag)->next = new_frag;
5248 *last_frag = new_frag;
5249
5250 skb->len += new_frag->len;
5251 skb->data_len += new_frag->len;
5252 skb->truesize += new_frag->truesize;
5253 }
5254
5255 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5256 struct l2cap_ctrl *control)
5257 {
5258 int err = -EINVAL;
5259
5260 switch (control->sar) {
5261 case L2CAP_SAR_UNSEGMENTED:
5262 if (chan->sdu)
5263 break;
5264
5265 err = chan->ops->recv(chan, skb);
5266 break;
5267
5268 case L2CAP_SAR_START:
5269 if (chan->sdu)
5270 break;
5271
5272 chan->sdu_len = get_unaligned_le16(skb->data);
5273 skb_pull(skb, L2CAP_SDULEN_SIZE);
5274
5275 if (chan->sdu_len > chan->imtu) {
5276 err = -EMSGSIZE;
5277 break;
5278 }
5279
5280 if (skb->len >= chan->sdu_len)
5281 break;
5282
5283 chan->sdu = skb;
5284 chan->sdu_last_frag = skb;
5285
5286 skb = NULL;
5287 err = 0;
5288 break;
5289
5290 case L2CAP_SAR_CONTINUE:
5291 if (!chan->sdu)
5292 break;
5293
5294 append_skb_frag(chan->sdu, skb,
5295 &chan->sdu_last_frag);
5296 skb = NULL;
5297
5298 if (chan->sdu->len >= chan->sdu_len)
5299 break;
5300
5301 err = 0;
5302 break;
5303
5304 case L2CAP_SAR_END:
5305 if (!chan->sdu)
5306 break;
5307
5308 append_skb_frag(chan->sdu, skb,
5309 &chan->sdu_last_frag);
5310 skb = NULL;
5311
5312 if (chan->sdu->len != chan->sdu_len)
5313 break;
5314
5315 err = chan->ops->recv(chan, chan->sdu);
5316
5317 if (!err) {
5318 /* Reassembly complete */
5319 chan->sdu = NULL;
5320 chan->sdu_last_frag = NULL;
5321 chan->sdu_len = 0;
5322 }
5323 break;
5324 }
5325
5326 if (err) {
5327 kfree_skb(skb);
5328 kfree_skb(chan->sdu);
5329 chan->sdu = NULL;
5330 chan->sdu_last_frag = NULL;
5331 chan->sdu_len = 0;
5332 }
5333
5334 return err;
5335 }
5336
5337 static int l2cap_resegment(struct l2cap_chan *chan)
5338 {
5339 /* Placeholder */
5340 return 0;
5341 }
5342
5343 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5344 {
5345 u8 event;
5346
5347 if (chan->mode != L2CAP_MODE_ERTM)
5348 return;
5349
5350 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5351 l2cap_tx(chan, NULL, NULL, event);
5352 }
5353
5354 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5355 {
5356 int err = 0;
5357 /* Pass sequential frames to l2cap_reassemble_sdu()
5358 * until a gap is encountered.
5359 */
5360
5361 BT_DBG("chan %p", chan);
5362
5363 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5364 struct sk_buff *skb;
5365 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5366 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5367
5368 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5369
5370 if (!skb)
5371 break;
5372
5373 skb_unlink(skb, &chan->srej_q);
5374 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5375 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5376 if (err)
5377 break;
5378 }
5379
5380 if (skb_queue_empty(&chan->srej_q)) {
5381 chan->rx_state = L2CAP_RX_STATE_RECV;
5382 l2cap_send_ack(chan);
5383 }
5384
5385 return err;
5386 }
5387
5388 static void l2cap_handle_srej(struct l2cap_chan *chan,
5389 struct l2cap_ctrl *control)
5390 {
5391 struct sk_buff *skb;
5392
5393 BT_DBG("chan %p, control %p", chan, control);
5394
5395 if (control->reqseq == chan->next_tx_seq) {
5396 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5397 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5398 return;
5399 }
5400
5401 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5402
5403 if (skb == NULL) {
5404 BT_DBG("Seq %d not available for retransmission",
5405 control->reqseq);
5406 return;
5407 }
5408
5409 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5410 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5411 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5412 return;
5413 }
5414
5415 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5416
5417 if (control->poll) {
5418 l2cap_pass_to_tx(chan, control);
5419
5420 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5421 l2cap_retransmit(chan, control);
5422 l2cap_ertm_send(chan);
5423
5424 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5425 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5426 chan->srej_save_reqseq = control->reqseq;
5427 }
5428 } else {
5429 l2cap_pass_to_tx_fbit(chan, control);
5430
5431 if (control->final) {
5432 if (chan->srej_save_reqseq != control->reqseq ||
5433 !test_and_clear_bit(CONN_SREJ_ACT,
5434 &chan->conn_state))
5435 l2cap_retransmit(chan, control);
5436 } else {
5437 l2cap_retransmit(chan, control);
5438 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5439 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5440 chan->srej_save_reqseq = control->reqseq;
5441 }
5442 }
5443 }
5444 }
5445
5446 static void l2cap_handle_rej(struct l2cap_chan *chan,
5447 struct l2cap_ctrl *control)
5448 {
5449 struct sk_buff *skb;
5450
5451 BT_DBG("chan %p, control %p", chan, control);
5452
5453 if (control->reqseq == chan->next_tx_seq) {
5454 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5455 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5456 return;
5457 }
5458
5459 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5460
5461 if (chan->max_tx && skb &&
5462 bt_cb(skb)->control.retries >= chan->max_tx) {
5463 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5464 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5465 return;
5466 }
5467
5468 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5469
5470 l2cap_pass_to_tx(chan, control);
5471
5472 if (control->final) {
5473 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5474 l2cap_retransmit_all(chan, control);
5475 } else {
5476 l2cap_retransmit_all(chan, control);
5477 l2cap_ertm_send(chan);
5478 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5479 set_bit(CONN_REJ_ACT, &chan->conn_state);
5480 }
5481 }
5482
5483 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5484 {
5485 BT_DBG("chan %p, txseq %d", chan, txseq);
5486
5487 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5488 chan->expected_tx_seq);
5489
5490 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5491 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5492 chan->tx_win) {
5493 /* See notes below regarding "double poll" and
5494 * invalid packets.
5495 */
5496 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5497 BT_DBG("Invalid/Ignore - after SREJ");
5498 return L2CAP_TXSEQ_INVALID_IGNORE;
5499 } else {
5500 BT_DBG("Invalid - in window after SREJ sent");
5501 return L2CAP_TXSEQ_INVALID;
5502 }
5503 }
5504
5505 if (chan->srej_list.head == txseq) {
5506 BT_DBG("Expected SREJ");
5507 return L2CAP_TXSEQ_EXPECTED_SREJ;
5508 }
5509
5510 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5511 BT_DBG("Duplicate SREJ - txseq already stored");
5512 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5513 }
5514
5515 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5516 BT_DBG("Unexpected SREJ - not requested");
5517 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5518 }
5519 }
5520
5521 if (chan->expected_tx_seq == txseq) {
5522 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5523 chan->tx_win) {
5524 BT_DBG("Invalid - txseq outside tx window");
5525 return L2CAP_TXSEQ_INVALID;
5526 } else {
5527 BT_DBG("Expected");
5528 return L2CAP_TXSEQ_EXPECTED;
5529 }
5530 }
5531
5532 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5533 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5534 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5535 return L2CAP_TXSEQ_DUPLICATE;
5536 }
5537
5538 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5539 /* A source of invalid packets is a "double poll" condition,
5540 * where delays cause us to send multiple poll packets. If
5541 * the remote stack receives and processes both polls,
5542 * sequence numbers can wrap around in such a way that a
5543 * resent frame has a sequence number that looks like new data
5544 * with a sequence gap. This would trigger an erroneous SREJ
5545 * request.
5546 *
5547 * Fortunately, this is impossible with a tx window that's
5548 * less than half of the maximum sequence number, which allows
5549 * invalid frames to be safely ignored.
5550 *
5551 * With tx window sizes greater than half of the tx window
5552 * maximum, the frame is invalid and cannot be ignored. This
5553 * causes a disconnect.
5554 */
5555
5556 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5557 BT_DBG("Invalid/Ignore - txseq outside tx window");
5558 return L2CAP_TXSEQ_INVALID_IGNORE;
5559 } else {
5560 BT_DBG("Invalid - txseq outside tx window");
5561 return L2CAP_TXSEQ_INVALID;
5562 }
5563 } else {
5564 BT_DBG("Unexpected - txseq indicates missing frames");
5565 return L2CAP_TXSEQ_UNEXPECTED;
5566 }
5567 }
5568
5569 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5570 struct l2cap_ctrl *control,
5571 struct sk_buff *skb, u8 event)
5572 {
5573 int err = 0;
5574 bool skb_in_use = 0;
5575
5576 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5577 event);
5578
5579 switch (event) {
5580 case L2CAP_EV_RECV_IFRAME:
5581 switch (l2cap_classify_txseq(chan, control->txseq)) {
5582 case L2CAP_TXSEQ_EXPECTED:
5583 l2cap_pass_to_tx(chan, control);
5584
5585 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5586 BT_DBG("Busy, discarding expected seq %d",
5587 control->txseq);
5588 break;
5589 }
5590
5591 chan->expected_tx_seq = __next_seq(chan,
5592 control->txseq);
5593
5594 chan->buffer_seq = chan->expected_tx_seq;
5595 skb_in_use = 1;
5596
5597 err = l2cap_reassemble_sdu(chan, skb, control);
5598 if (err)
5599 break;
5600
5601 if (control->final) {
5602 if (!test_and_clear_bit(CONN_REJ_ACT,
5603 &chan->conn_state)) {
5604 control->final = 0;
5605 l2cap_retransmit_all(chan, control);
5606 l2cap_ertm_send(chan);
5607 }
5608 }
5609
5610 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5611 l2cap_send_ack(chan);
5612 break;
5613 case L2CAP_TXSEQ_UNEXPECTED:
5614 l2cap_pass_to_tx(chan, control);
5615
5616 /* Can't issue SREJ frames in the local busy state.
5617 * Drop this frame, it will be seen as missing
5618 * when local busy is exited.
5619 */
5620 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5621 BT_DBG("Busy, discarding unexpected seq %d",
5622 control->txseq);
5623 break;
5624 }
5625
5626 /* There was a gap in the sequence, so an SREJ
5627 * must be sent for each missing frame. The
5628 * current frame is stored for later use.
5629 */
5630 skb_queue_tail(&chan->srej_q, skb);
5631 skb_in_use = 1;
5632 BT_DBG("Queued %p (queue len %d)", skb,
5633 skb_queue_len(&chan->srej_q));
5634
5635 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5636 l2cap_seq_list_clear(&chan->srej_list);
5637 l2cap_send_srej(chan, control->txseq);
5638
5639 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5640 break;
5641 case L2CAP_TXSEQ_DUPLICATE:
5642 l2cap_pass_to_tx(chan, control);
5643 break;
5644 case L2CAP_TXSEQ_INVALID_IGNORE:
5645 break;
5646 case L2CAP_TXSEQ_INVALID:
5647 default:
5648 l2cap_send_disconn_req(chan->conn, chan,
5649 ECONNRESET);
5650 break;
5651 }
5652 break;
5653 case L2CAP_EV_RECV_RR:
5654 l2cap_pass_to_tx(chan, control);
5655 if (control->final) {
5656 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5657
5658 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5659 !__chan_is_moving(chan)) {
5660 control->final = 0;
5661 l2cap_retransmit_all(chan, control);
5662 }
5663
5664 l2cap_ertm_send(chan);
5665 } else if (control->poll) {
5666 l2cap_send_i_or_rr_or_rnr(chan);
5667 } else {
5668 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5669 &chan->conn_state) &&
5670 chan->unacked_frames)
5671 __set_retrans_timer(chan);
5672
5673 l2cap_ertm_send(chan);
5674 }
5675 break;
5676 case L2CAP_EV_RECV_RNR:
5677 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5678 l2cap_pass_to_tx(chan, control);
5679 if (control && control->poll) {
5680 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5681 l2cap_send_rr_or_rnr(chan, 0);
5682 }
5683 __clear_retrans_timer(chan);
5684 l2cap_seq_list_clear(&chan->retrans_list);
5685 break;
5686 case L2CAP_EV_RECV_REJ:
5687 l2cap_handle_rej(chan, control);
5688 break;
5689 case L2CAP_EV_RECV_SREJ:
5690 l2cap_handle_srej(chan, control);
5691 break;
5692 default:
5693 break;
5694 }
5695
5696 if (skb && !skb_in_use) {
5697 BT_DBG("Freeing %p", skb);
5698 kfree_skb(skb);
5699 }
5700
5701 return err;
5702 }
5703
5704 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5705 struct l2cap_ctrl *control,
5706 struct sk_buff *skb, u8 event)
5707 {
5708 int err = 0;
5709 u16 txseq = control->txseq;
5710 bool skb_in_use = 0;
5711
5712 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5713 event);
5714
5715 switch (event) {
5716 case L2CAP_EV_RECV_IFRAME:
5717 switch (l2cap_classify_txseq(chan, txseq)) {
5718 case L2CAP_TXSEQ_EXPECTED:
5719 /* Keep frame for reassembly later */
5720 l2cap_pass_to_tx(chan, control);
5721 skb_queue_tail(&chan->srej_q, skb);
5722 skb_in_use = 1;
5723 BT_DBG("Queued %p (queue len %d)", skb,
5724 skb_queue_len(&chan->srej_q));
5725
5726 chan->expected_tx_seq = __next_seq(chan, txseq);
5727 break;
5728 case L2CAP_TXSEQ_EXPECTED_SREJ:
5729 l2cap_seq_list_pop(&chan->srej_list);
5730
5731 l2cap_pass_to_tx(chan, control);
5732 skb_queue_tail(&chan->srej_q, skb);
5733 skb_in_use = 1;
5734 BT_DBG("Queued %p (queue len %d)", skb,
5735 skb_queue_len(&chan->srej_q));
5736
5737 err = l2cap_rx_queued_iframes(chan);
5738 if (err)
5739 break;
5740
5741 break;
5742 case L2CAP_TXSEQ_UNEXPECTED:
5743 /* Got a frame that can't be reassembled yet.
5744 * Save it for later, and send SREJs to cover
5745 * the missing frames.
5746 */
5747 skb_queue_tail(&chan->srej_q, skb);
5748 skb_in_use = 1;
5749 BT_DBG("Queued %p (queue len %d)", skb,
5750 skb_queue_len(&chan->srej_q));
5751
5752 l2cap_pass_to_tx(chan, control);
5753 l2cap_send_srej(chan, control->txseq);
5754 break;
5755 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
5756 /* This frame was requested with an SREJ, but
5757 * some expected retransmitted frames are
5758 * missing. Request retransmission of missing
5759 * SREJ'd frames.
5760 */
5761 skb_queue_tail(&chan->srej_q, skb);
5762 skb_in_use = 1;
5763 BT_DBG("Queued %p (queue len %d)", skb,
5764 skb_queue_len(&chan->srej_q));
5765
5766 l2cap_pass_to_tx(chan, control);
5767 l2cap_send_srej_list(chan, control->txseq);
5768 break;
5769 case L2CAP_TXSEQ_DUPLICATE_SREJ:
5770 /* We've already queued this frame. Drop this copy. */
5771 l2cap_pass_to_tx(chan, control);
5772 break;
5773 case L2CAP_TXSEQ_DUPLICATE:
5774 /* Expecting a later sequence number, so this frame
5775 * was already received. Ignore it completely.
5776 */
5777 break;
5778 case L2CAP_TXSEQ_INVALID_IGNORE:
5779 break;
5780 case L2CAP_TXSEQ_INVALID:
5781 default:
5782 l2cap_send_disconn_req(chan->conn, chan,
5783 ECONNRESET);
5784 break;
5785 }
5786 break;
5787 case L2CAP_EV_RECV_RR:
5788 l2cap_pass_to_tx(chan, control);
5789 if (control->final) {
5790 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5791
5792 if (!test_and_clear_bit(CONN_REJ_ACT,
5793 &chan->conn_state)) {
5794 control->final = 0;
5795 l2cap_retransmit_all(chan, control);
5796 }
5797
5798 l2cap_ertm_send(chan);
5799 } else if (control->poll) {
5800 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5801 &chan->conn_state) &&
5802 chan->unacked_frames) {
5803 __set_retrans_timer(chan);
5804 }
5805
5806 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5807 l2cap_send_srej_tail(chan);
5808 } else {
5809 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5810 &chan->conn_state) &&
5811 chan->unacked_frames)
5812 __set_retrans_timer(chan);
5813
5814 l2cap_send_ack(chan);
5815 }
5816 break;
5817 case L2CAP_EV_RECV_RNR:
5818 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5819 l2cap_pass_to_tx(chan, control);
5820 if (control->poll) {
5821 l2cap_send_srej_tail(chan);
5822 } else {
5823 struct l2cap_ctrl rr_control;
5824 memset(&rr_control, 0, sizeof(rr_control));
5825 rr_control.sframe = 1;
5826 rr_control.super = L2CAP_SUPER_RR;
5827 rr_control.reqseq = chan->buffer_seq;
5828 l2cap_send_sframe(chan, &rr_control);
5829 }
5830
5831 break;
5832 case L2CAP_EV_RECV_REJ:
5833 l2cap_handle_rej(chan, control);
5834 break;
5835 case L2CAP_EV_RECV_SREJ:
5836 l2cap_handle_srej(chan, control);
5837 break;
5838 }
5839
5840 if (skb && !skb_in_use) {
5841 BT_DBG("Freeing %p", skb);
5842 kfree_skb(skb);
5843 }
5844
5845 return err;
5846 }
5847
5848 static int l2cap_finish_move(struct l2cap_chan *chan)
5849 {
5850 BT_DBG("chan %p", chan);
5851
5852 chan->rx_state = L2CAP_RX_STATE_RECV;
5853
5854 if (chan->hs_hcon)
5855 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
5856 else
5857 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
5858
5859 return l2cap_resegment(chan);
5860 }
5861
5862 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
5863 struct l2cap_ctrl *control,
5864 struct sk_buff *skb, u8 event)
5865 {
5866 int err;
5867
5868 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5869 event);
5870
5871 if (!control->poll)
5872 return -EPROTO;
5873
5874 l2cap_process_reqseq(chan, control->reqseq);
5875
5876 if (!skb_queue_empty(&chan->tx_q))
5877 chan->tx_send_head = skb_peek(&chan->tx_q);
5878 else
5879 chan->tx_send_head = NULL;
5880
5881 /* Rewind next_tx_seq to the point expected
5882 * by the receiver.
5883 */
5884 chan->next_tx_seq = control->reqseq;
5885 chan->unacked_frames = 0;
5886
5887 err = l2cap_finish_move(chan);
5888 if (err)
5889 return err;
5890
5891 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5892 l2cap_send_i_or_rr_or_rnr(chan);
5893
5894 if (event == L2CAP_EV_RECV_IFRAME)
5895 return -EPROTO;
5896
5897 return l2cap_rx_state_recv(chan, control, NULL, event);
5898 }
5899
5900 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
5901 struct l2cap_ctrl *control,
5902 struct sk_buff *skb, u8 event)
5903 {
5904 int err;
5905
5906 if (!control->final)
5907 return -EPROTO;
5908
5909 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5910
5911 chan->rx_state = L2CAP_RX_STATE_RECV;
5912 l2cap_process_reqseq(chan, control->reqseq);
5913
5914 if (!skb_queue_empty(&chan->tx_q))
5915 chan->tx_send_head = skb_peek(&chan->tx_q);
5916 else
5917 chan->tx_send_head = NULL;
5918
5919 /* Rewind next_tx_seq to the point expected
5920 * by the receiver.
5921 */
5922 chan->next_tx_seq = control->reqseq;
5923 chan->unacked_frames = 0;
5924
5925 if (chan->hs_hcon)
5926 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
5927 else
5928 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
5929
5930 err = l2cap_resegment(chan);
5931
5932 if (!err)
5933 err = l2cap_rx_state_recv(chan, control, skb, event);
5934
5935 return err;
5936 }
5937
5938 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5939 {
5940 /* Make sure reqseq is for a packet that has been sent but not acked */
5941 u16 unacked;
5942
5943 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5944 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5945 }
5946
5947 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5948 struct sk_buff *skb, u8 event)
5949 {
5950 int err = 0;
5951
5952 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5953 control, skb, event, chan->rx_state);
5954
5955 if (__valid_reqseq(chan, control->reqseq)) {
5956 switch (chan->rx_state) {
5957 case L2CAP_RX_STATE_RECV:
5958 err = l2cap_rx_state_recv(chan, control, skb, event);
5959 break;
5960 case L2CAP_RX_STATE_SREJ_SENT:
5961 err = l2cap_rx_state_srej_sent(chan, control, skb,
5962 event);
5963 break;
5964 case L2CAP_RX_STATE_WAIT_P:
5965 err = l2cap_rx_state_wait_p(chan, control, skb, event);
5966 break;
5967 case L2CAP_RX_STATE_WAIT_F:
5968 err = l2cap_rx_state_wait_f(chan, control, skb, event);
5969 break;
5970 default:
5971 /* shut it down */
5972 break;
5973 }
5974 } else {
5975 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5976 control->reqseq, chan->next_tx_seq,
5977 chan->expected_ack_seq);
5978 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5979 }
5980
5981 return err;
5982 }
5983
5984 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5985 struct sk_buff *skb)
5986 {
5987 int err = 0;
5988
5989 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5990 chan->rx_state);
5991
5992 if (l2cap_classify_txseq(chan, control->txseq) ==
5993 L2CAP_TXSEQ_EXPECTED) {
5994 l2cap_pass_to_tx(chan, control);
5995
5996 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5997 __next_seq(chan, chan->buffer_seq));
5998
5999 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6000
6001 l2cap_reassemble_sdu(chan, skb, control);
6002 } else {
6003 if (chan->sdu) {
6004 kfree_skb(chan->sdu);
6005 chan->sdu = NULL;
6006 }
6007 chan->sdu_last_frag = NULL;
6008 chan->sdu_len = 0;
6009
6010 if (skb) {
6011 BT_DBG("Freeing %p", skb);
6012 kfree_skb(skb);
6013 }
6014 }
6015
6016 chan->last_acked_seq = control->txseq;
6017 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6018
6019 return err;
6020 }
6021
6022 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6023 {
6024 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6025 u16 len;
6026 u8 event;
6027
6028 __unpack_control(chan, skb);
6029
6030 len = skb->len;
6031
6032 /*
6033 * We can just drop the corrupted I-frame here.
6034 * Receiver will miss it and start proper recovery
6035 * procedures and ask for retransmission.
6036 */
6037 if (l2cap_check_fcs(chan, skb))
6038 goto drop;
6039
6040 if (!control->sframe && control->sar == L2CAP_SAR_START)
6041 len -= L2CAP_SDULEN_SIZE;
6042
6043 if (chan->fcs == L2CAP_FCS_CRC16)
6044 len -= L2CAP_FCS_SIZE;
6045
6046 if (len > chan->mps) {
6047 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
6048 goto drop;
6049 }
6050
6051 if (!control->sframe) {
6052 int err;
6053
6054 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6055 control->sar, control->reqseq, control->final,
6056 control->txseq);
6057
6058 /* Validate F-bit - F=0 always valid, F=1 only
6059 * valid in TX WAIT_F
6060 */
6061 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6062 goto drop;
6063
6064 if (chan->mode != L2CAP_MODE_STREAMING) {
6065 event = L2CAP_EV_RECV_IFRAME;
6066 err = l2cap_rx(chan, control, skb, event);
6067 } else {
6068 err = l2cap_stream_rx(chan, control, skb);
6069 }
6070
6071 if (err)
6072 l2cap_send_disconn_req(chan->conn, chan,
6073 ECONNRESET);
6074 } else {
6075 const u8 rx_func_to_event[4] = {
6076 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6077 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6078 };
6079
6080 /* Only I-frames are expected in streaming mode */
6081 if (chan->mode == L2CAP_MODE_STREAMING)
6082 goto drop;
6083
6084 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6085 control->reqseq, control->final, control->poll,
6086 control->super);
6087
6088 if (len != 0) {
6089 BT_ERR("%d", len);
6090 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
6091 goto drop;
6092 }
6093
6094 /* Validate F and P bits */
6095 if (control->final && (control->poll ||
6096 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6097 goto drop;
6098
6099 event = rx_func_to_event[control->super];
6100 if (l2cap_rx(chan, control, skb, event))
6101 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
6102 }
6103
6104 return 0;
6105
6106 drop:
6107 kfree_skb(skb);
6108 return 0;
6109 }
6110
6111 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6112 struct sk_buff *skb)
6113 {
6114 struct l2cap_chan *chan;
6115
6116 chan = l2cap_get_chan_by_scid(conn, cid);
6117 if (!chan) {
6118 if (cid == L2CAP_CID_A2MP) {
6119 chan = a2mp_channel_create(conn, skb);
6120 if (!chan) {
6121 kfree_skb(skb);
6122 return;
6123 }
6124
6125 l2cap_chan_lock(chan);
6126 } else {
6127 BT_DBG("unknown cid 0x%4.4x", cid);
6128 /* Drop packet and return */
6129 kfree_skb(skb);
6130 return;
6131 }
6132 }
6133
6134 BT_DBG("chan %p, len %d", chan, skb->len);
6135
6136 if (chan->state != BT_CONNECTED)
6137 goto drop;
6138
6139 switch (chan->mode) {
6140 case L2CAP_MODE_BASIC:
6141 /* If socket recv buffers overflows we drop data here
6142 * which is *bad* because L2CAP has to be reliable.
6143 * But we don't have any other choice. L2CAP doesn't
6144 * provide flow control mechanism. */
6145
6146 if (chan->imtu < skb->len)
6147 goto drop;
6148
6149 if (!chan->ops->recv(chan, skb))
6150 goto done;
6151 break;
6152
6153 case L2CAP_MODE_ERTM:
6154 case L2CAP_MODE_STREAMING:
6155 l2cap_data_rcv(chan, skb);
6156 goto done;
6157
6158 default:
6159 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6160 break;
6161 }
6162
6163 drop:
6164 kfree_skb(skb);
6165
6166 done:
6167 l2cap_chan_unlock(chan);
6168 }
6169
6170 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6171 struct sk_buff *skb)
6172 {
6173 struct l2cap_chan *chan;
6174
6175 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
6176 if (!chan)
6177 goto drop;
6178
6179 BT_DBG("chan %p, len %d", chan, skb->len);
6180
6181 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6182 goto drop;
6183
6184 if (chan->imtu < skb->len)
6185 goto drop;
6186
6187 if (!chan->ops->recv(chan, skb))
6188 return;
6189
6190 drop:
6191 kfree_skb(skb);
6192 }
6193
6194 static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
6195 struct sk_buff *skb)
6196 {
6197 struct l2cap_chan *chan;
6198
6199 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
6200 if (!chan)
6201 goto drop;
6202
6203 BT_DBG("chan %p, len %d", chan, skb->len);
6204
6205 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6206 goto drop;
6207
6208 if (chan->imtu < skb->len)
6209 goto drop;
6210
6211 if (!chan->ops->recv(chan, skb))
6212 return;
6213
6214 drop:
6215 kfree_skb(skb);
6216 }
6217
6218 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6219 {
6220 struct l2cap_hdr *lh = (void *) skb->data;
6221 u16 cid, len;
6222 __le16 psm;
6223
6224 skb_pull(skb, L2CAP_HDR_SIZE);
6225 cid = __le16_to_cpu(lh->cid);
6226 len = __le16_to_cpu(lh->len);
6227
6228 if (len != skb->len) {
6229 kfree_skb(skb);
6230 return;
6231 }
6232
6233 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6234
6235 switch (cid) {
6236 case L2CAP_CID_LE_SIGNALING:
6237 case L2CAP_CID_SIGNALING:
6238 l2cap_sig_channel(conn, skb);
6239 break;
6240
6241 case L2CAP_CID_CONN_LESS:
6242 psm = get_unaligned((__le16 *) skb->data);
6243 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6244 l2cap_conless_channel(conn, psm, skb);
6245 break;
6246
6247 case L2CAP_CID_LE_DATA:
6248 l2cap_att_channel(conn, cid, skb);
6249 break;
6250
6251 case L2CAP_CID_SMP:
6252 if (smp_sig_channel(conn, skb))
6253 l2cap_conn_del(conn->hcon, EACCES);
6254 break;
6255
6256 default:
6257 l2cap_data_channel(conn, cid, skb);
6258 break;
6259 }
6260 }
6261
6262 /* ---- L2CAP interface with lower layer (HCI) ---- */
6263
6264 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6265 {
6266 int exact = 0, lm1 = 0, lm2 = 0;
6267 struct l2cap_chan *c;
6268
6269 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6270
6271 /* Find listening sockets and check their link_mode */
6272 read_lock(&chan_list_lock);
6273 list_for_each_entry(c, &chan_list, global_l) {
6274 struct sock *sk = c->sk;
6275
6276 if (c->state != BT_LISTEN)
6277 continue;
6278
6279 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
6280 lm1 |= HCI_LM_ACCEPT;
6281 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6282 lm1 |= HCI_LM_MASTER;
6283 exact++;
6284 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
6285 lm2 |= HCI_LM_ACCEPT;
6286 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6287 lm2 |= HCI_LM_MASTER;
6288 }
6289 }
6290 read_unlock(&chan_list_lock);
6291
6292 return exact ? lm1 : lm2;
6293 }
6294
6295 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6296 {
6297 struct l2cap_conn *conn;
6298
6299 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6300
6301 if (!status) {
6302 conn = l2cap_conn_add(hcon, status);
6303 if (conn)
6304 l2cap_conn_ready(conn);
6305 } else {
6306 l2cap_conn_del(hcon, bt_to_errno(status));
6307 }
6308 }
6309
6310 int l2cap_disconn_ind(struct hci_conn *hcon)
6311 {
6312 struct l2cap_conn *conn = hcon->l2cap_data;
6313
6314 BT_DBG("hcon %p", hcon);
6315
6316 if (!conn)
6317 return HCI_ERROR_REMOTE_USER_TERM;
6318 return conn->disc_reason;
6319 }
6320
6321 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6322 {
6323 BT_DBG("hcon %p reason %d", hcon, reason);
6324
6325 l2cap_conn_del(hcon, bt_to_errno(reason));
6326 }
6327
6328 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6329 {
6330 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6331 return;
6332
6333 if (encrypt == 0x00) {
6334 if (chan->sec_level == BT_SECURITY_MEDIUM) {
6335 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6336 } else if (chan->sec_level == BT_SECURITY_HIGH)
6337 l2cap_chan_close(chan, ECONNREFUSED);
6338 } else {
6339 if (chan->sec_level == BT_SECURITY_MEDIUM)
6340 __clear_chan_timer(chan);
6341 }
6342 }
6343
6344 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6345 {
6346 struct l2cap_conn *conn = hcon->l2cap_data;
6347 struct l2cap_chan *chan;
6348
6349 if (!conn)
6350 return 0;
6351
6352 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6353
6354 if (hcon->type == LE_LINK) {
6355 if (!status && encrypt)
6356 smp_distribute_keys(conn, 0);
6357 cancel_delayed_work(&conn->security_timer);
6358 }
6359
6360 mutex_lock(&conn->chan_lock);
6361
6362 list_for_each_entry(chan, &conn->chan_l, list) {
6363 l2cap_chan_lock(chan);
6364
6365 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6366 state_to_string(chan->state));
6367
6368 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6369 l2cap_chan_unlock(chan);
6370 continue;
6371 }
6372
6373 if (chan->scid == L2CAP_CID_LE_DATA) {
6374 if (!status && encrypt) {
6375 chan->sec_level = hcon->sec_level;
6376 l2cap_chan_ready(chan);
6377 }
6378
6379 l2cap_chan_unlock(chan);
6380 continue;
6381 }
6382
6383 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
6384 l2cap_chan_unlock(chan);
6385 continue;
6386 }
6387
6388 if (!status && (chan->state == BT_CONNECTED ||
6389 chan->state == BT_CONFIG)) {
6390 struct sock *sk = chan->sk;
6391
6392 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
6393 sk->sk_state_change(sk);
6394
6395 l2cap_check_encryption(chan, encrypt);
6396 l2cap_chan_unlock(chan);
6397 continue;
6398 }
6399
6400 if (chan->state == BT_CONNECT) {
6401 if (!status) {
6402 l2cap_start_connection(chan);
6403 } else {
6404 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6405 }
6406 } else if (chan->state == BT_CONNECT2) {
6407 struct sock *sk = chan->sk;
6408 struct l2cap_conn_rsp rsp;
6409 __u16 res, stat;
6410
6411 lock_sock(sk);
6412
6413 if (!status) {
6414 if (test_bit(BT_SK_DEFER_SETUP,
6415 &bt_sk(sk)->flags)) {
6416 res = L2CAP_CR_PEND;
6417 stat = L2CAP_CS_AUTHOR_PEND;
6418 chan->ops->defer(chan);
6419 } else {
6420 __l2cap_state_change(chan, BT_CONFIG);
6421 res = L2CAP_CR_SUCCESS;
6422 stat = L2CAP_CS_NO_INFO;
6423 }
6424 } else {
6425 __l2cap_state_change(chan, BT_DISCONN);
6426 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6427 res = L2CAP_CR_SEC_BLOCK;
6428 stat = L2CAP_CS_NO_INFO;
6429 }
6430
6431 release_sock(sk);
6432
6433 rsp.scid = cpu_to_le16(chan->dcid);
6434 rsp.dcid = cpu_to_le16(chan->scid);
6435 rsp.result = cpu_to_le16(res);
6436 rsp.status = cpu_to_le16(stat);
6437 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6438 sizeof(rsp), &rsp);
6439
6440 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6441 res == L2CAP_CR_SUCCESS) {
6442 char buf[128];
6443 set_bit(CONF_REQ_SENT, &chan->conf_state);
6444 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6445 L2CAP_CONF_REQ,
6446 l2cap_build_conf_req(chan, buf),
6447 buf);
6448 chan->num_conf_req++;
6449 }
6450 }
6451
6452 l2cap_chan_unlock(chan);
6453 }
6454
6455 mutex_unlock(&conn->chan_lock);
6456
6457 return 0;
6458 }
6459
6460 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6461 {
6462 struct l2cap_conn *conn = hcon->l2cap_data;
6463 struct l2cap_hdr *hdr;
6464 int len;
6465
6466 /* For AMP controller do not create l2cap conn */
6467 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6468 goto drop;
6469
6470 if (!conn)
6471 conn = l2cap_conn_add(hcon, 0);
6472
6473 if (!conn)
6474 goto drop;
6475
6476 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6477
6478 switch (flags) {
6479 case ACL_START:
6480 case ACL_START_NO_FLUSH:
6481 case ACL_COMPLETE:
6482 if (conn->rx_len) {
6483 BT_ERR("Unexpected start frame (len %d)", skb->len);
6484 kfree_skb(conn->rx_skb);
6485 conn->rx_skb = NULL;
6486 conn->rx_len = 0;
6487 l2cap_conn_unreliable(conn, ECOMM);
6488 }
6489
6490 /* Start fragment always begin with Basic L2CAP header */
6491 if (skb->len < L2CAP_HDR_SIZE) {
6492 BT_ERR("Frame is too short (len %d)", skb->len);
6493 l2cap_conn_unreliable(conn, ECOMM);
6494 goto drop;
6495 }
6496
6497 hdr = (struct l2cap_hdr *) skb->data;
6498 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6499
6500 if (len == skb->len) {
6501 /* Complete frame received */
6502 l2cap_recv_frame(conn, skb);
6503 return 0;
6504 }
6505
6506 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6507
6508 if (skb->len > len) {
6509 BT_ERR("Frame is too long (len %d, expected len %d)",
6510 skb->len, len);
6511 l2cap_conn_unreliable(conn, ECOMM);
6512 goto drop;
6513 }
6514
6515 /* Allocate skb for the complete frame (with header) */
6516 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6517 if (!conn->rx_skb)
6518 goto drop;
6519
6520 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6521 skb->len);
6522 conn->rx_len = len - skb->len;
6523 break;
6524
6525 case ACL_CONT:
6526 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6527
6528 if (!conn->rx_len) {
6529 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6530 l2cap_conn_unreliable(conn, ECOMM);
6531 goto drop;
6532 }
6533
6534 if (skb->len > conn->rx_len) {
6535 BT_ERR("Fragment is too long (len %d, expected %d)",
6536 skb->len, conn->rx_len);
6537 kfree_skb(conn->rx_skb);
6538 conn->rx_skb = NULL;
6539 conn->rx_len = 0;
6540 l2cap_conn_unreliable(conn, ECOMM);
6541 goto drop;
6542 }
6543
6544 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6545 skb->len);
6546 conn->rx_len -= skb->len;
6547
6548 if (!conn->rx_len) {
6549 /* Complete frame received */
6550 l2cap_recv_frame(conn, conn->rx_skb);
6551 conn->rx_skb = NULL;
6552 }
6553 break;
6554 }
6555
6556 drop:
6557 kfree_skb(skb);
6558 return 0;
6559 }
6560
6561 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6562 {
6563 struct l2cap_chan *c;
6564
6565 read_lock(&chan_list_lock);
6566
6567 list_for_each_entry(c, &chan_list, global_l) {
6568 struct sock *sk = c->sk;
6569
6570 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6571 &bt_sk(sk)->src, &bt_sk(sk)->dst,
6572 c->state, __le16_to_cpu(c->psm),
6573 c->scid, c->dcid, c->imtu, c->omtu,
6574 c->sec_level, c->mode);
6575 }
6576
6577 read_unlock(&chan_list_lock);
6578
6579 return 0;
6580 }
6581
6582 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6583 {
6584 return single_open(file, l2cap_debugfs_show, inode->i_private);
6585 }
6586
6587 static const struct file_operations l2cap_debugfs_fops = {
6588 .open = l2cap_debugfs_open,
6589 .read = seq_read,
6590 .llseek = seq_lseek,
6591 .release = single_release,
6592 };
6593
6594 static struct dentry *l2cap_debugfs;
6595
6596 int __init l2cap_init(void)
6597 {
6598 int err;
6599
6600 err = l2cap_init_sockets();
6601 if (err < 0)
6602 return err;
6603
6604 if (bt_debugfs) {
6605 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6606 NULL, &l2cap_debugfs_fops);
6607 if (!l2cap_debugfs)
6608 BT_ERR("Failed to create L2CAP debug file");
6609 }
6610
6611 return 0;
6612 }
6613
6614 void l2cap_exit(void)
6615 {
6616 debugfs_remove(l2cap_debugfs);
6617 l2cap_cleanup_sockets();
6618 }
6619
6620 module_param(disable_ertm, bool, 0644);
6621 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");