Bluetooth: Remove unnecessary intermediate function
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41
42 bool disable_ertm;
43
44 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
45 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
46
47 static LIST_HEAD(chan_list);
48 static DEFINE_RWLOCK(chan_list_lock);
49
50 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
51 u8 code, u8 ident, u16 dlen, void *data);
52 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
53 void *data);
54 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
55 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
56 struct l2cap_chan *chan, int err);
57
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
60
61 /* ---- L2CAP channels ---- */
62
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
64 u16 cid)
65 {
66 struct l2cap_chan *c;
67
68 list_for_each_entry(c, &conn->chan_l, list) {
69 if (c->dcid == cid)
70 return c;
71 }
72 return NULL;
73 }
74
75 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
76 u16 cid)
77 {
78 struct l2cap_chan *c;
79
80 list_for_each_entry(c, &conn->chan_l, list) {
81 if (c->scid == cid)
82 return c;
83 }
84 return NULL;
85 }
86
87 /* Find channel with given SCID.
88 * Returns locked channel. */
89 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
90 u16 cid)
91 {
92 struct l2cap_chan *c;
93
94 mutex_lock(&conn->chan_lock);
95 c = __l2cap_get_chan_by_scid(conn, cid);
96 if (c)
97 l2cap_chan_lock(c);
98 mutex_unlock(&conn->chan_lock);
99
100 return c;
101 }
102
103 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
104 u8 ident)
105 {
106 struct l2cap_chan *c;
107
108 list_for_each_entry(c, &conn->chan_l, list) {
109 if (c->ident == ident)
110 return c;
111 }
112 return NULL;
113 }
114
115 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
116 {
117 struct l2cap_chan *c;
118
119 list_for_each_entry(c, &chan_list, global_l) {
120 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
121 return c;
122 }
123 return NULL;
124 }
125
126 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
127 {
128 int err;
129
130 write_lock(&chan_list_lock);
131
132 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
133 err = -EADDRINUSE;
134 goto done;
135 }
136
137 if (psm) {
138 chan->psm = psm;
139 chan->sport = psm;
140 err = 0;
141 } else {
142 u16 p;
143
144 err = -EINVAL;
145 for (p = 0x1001; p < 0x1100; p += 2)
146 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
147 chan->psm = cpu_to_le16(p);
148 chan->sport = cpu_to_le16(p);
149 err = 0;
150 break;
151 }
152 }
153
154 done:
155 write_unlock(&chan_list_lock);
156 return err;
157 }
158
159 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
160 {
161 write_lock(&chan_list_lock);
162
163 chan->scid = scid;
164
165 write_unlock(&chan_list_lock);
166
167 return 0;
168 }
169
170 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
171 {
172 u16 cid = L2CAP_CID_DYN_START;
173
174 for (; cid < L2CAP_CID_DYN_END; cid++) {
175 if (!__l2cap_get_chan_by_scid(conn, cid))
176 return cid;
177 }
178
179 return 0;
180 }
181
182 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
183 {
184 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
185 state_to_string(state));
186
187 chan->state = state;
188 chan->ops->state_change(chan, state);
189 }
190
191 static void l2cap_state_change(struct l2cap_chan *chan, int state)
192 {
193 struct sock *sk = chan->sk;
194
195 lock_sock(sk);
196 __l2cap_state_change(chan, state);
197 release_sock(sk);
198 }
199
200 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
201 {
202 struct sock *sk = chan->sk;
203
204 sk->sk_err = err;
205 }
206
207 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
208 {
209 struct sock *sk = chan->sk;
210
211 lock_sock(sk);
212 __l2cap_chan_set_err(chan, err);
213 release_sock(sk);
214 }
215
216 static void __set_retrans_timer(struct l2cap_chan *chan)
217 {
218 if (!delayed_work_pending(&chan->monitor_timer) &&
219 chan->retrans_timeout) {
220 l2cap_set_timer(chan, &chan->retrans_timer,
221 msecs_to_jiffies(chan->retrans_timeout));
222 }
223 }
224
225 static void __set_monitor_timer(struct l2cap_chan *chan)
226 {
227 __clear_retrans_timer(chan);
228 if (chan->monitor_timeout) {
229 l2cap_set_timer(chan, &chan->monitor_timer,
230 msecs_to_jiffies(chan->monitor_timeout));
231 }
232 }
233
234 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
235 u16 seq)
236 {
237 struct sk_buff *skb;
238
239 skb_queue_walk(head, skb) {
240 if (bt_cb(skb)->control.txseq == seq)
241 return skb;
242 }
243
244 return NULL;
245 }
246
247 /* ---- L2CAP sequence number lists ---- */
248
249 /* For ERTM, ordered lists of sequence numbers must be tracked for
250 * SREJ requests that are received and for frames that are to be
251 * retransmitted. These seq_list functions implement a singly-linked
252 * list in an array, where membership in the list can also be checked
253 * in constant time. Items can also be added to the tail of the list
254 * and removed from the head in constant time, without further memory
255 * allocs or frees.
256 */
257
258 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
259 {
260 size_t alloc_size, i;
261
262 /* Allocated size is a power of 2 to map sequence numbers
263 * (which may be up to 14 bits) in to a smaller array that is
264 * sized for the negotiated ERTM transmit windows.
265 */
266 alloc_size = roundup_pow_of_two(size);
267
268 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
269 if (!seq_list->list)
270 return -ENOMEM;
271
272 seq_list->mask = alloc_size - 1;
273 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
274 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
275 for (i = 0; i < alloc_size; i++)
276 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
277
278 return 0;
279 }
280
281 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
282 {
283 kfree(seq_list->list);
284 }
285
286 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
287 u16 seq)
288 {
289 /* Constant-time check for list membership */
290 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
291 }
292
293 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
294 {
295 u16 mask = seq_list->mask;
296
297 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
298 /* In case someone tries to pop the head of an empty list */
299 return L2CAP_SEQ_LIST_CLEAR;
300 } else if (seq_list->head == seq) {
301 /* Head can be removed in constant time */
302 seq_list->head = seq_list->list[seq & mask];
303 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
304
305 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
306 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
307 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
308 }
309 } else {
310 /* Walk the list to find the sequence number */
311 u16 prev = seq_list->head;
312 while (seq_list->list[prev & mask] != seq) {
313 prev = seq_list->list[prev & mask];
314 if (prev == L2CAP_SEQ_LIST_TAIL)
315 return L2CAP_SEQ_LIST_CLEAR;
316 }
317
318 /* Unlink the number from the list and clear it */
319 seq_list->list[prev & mask] = seq_list->list[seq & mask];
320 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
321 if (seq_list->tail == seq)
322 seq_list->tail = prev;
323 }
324 return seq;
325 }
326
327 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
328 {
329 /* Remove the head in constant time */
330 return l2cap_seq_list_remove(seq_list, seq_list->head);
331 }
332
333 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
334 {
335 u16 i;
336
337 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
338 return;
339
340 for (i = 0; i <= seq_list->mask; i++)
341 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
342
343 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
344 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
345 }
346
347 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
348 {
349 u16 mask = seq_list->mask;
350
351 /* All appends happen in constant time */
352
353 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
354 return;
355
356 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
357 seq_list->head = seq;
358 else
359 seq_list->list[seq_list->tail & mask] = seq;
360
361 seq_list->tail = seq;
362 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
363 }
364
365 static void l2cap_chan_timeout(struct work_struct *work)
366 {
367 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
368 chan_timer.work);
369 struct l2cap_conn *conn = chan->conn;
370 int reason;
371
372 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
373
374 mutex_lock(&conn->chan_lock);
375 l2cap_chan_lock(chan);
376
377 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
378 reason = ECONNREFUSED;
379 else if (chan->state == BT_CONNECT &&
380 chan->sec_level != BT_SECURITY_SDP)
381 reason = ECONNREFUSED;
382 else
383 reason = ETIMEDOUT;
384
385 l2cap_chan_close(chan, reason);
386
387 l2cap_chan_unlock(chan);
388
389 chan->ops->close(chan);
390 mutex_unlock(&conn->chan_lock);
391
392 l2cap_chan_put(chan);
393 }
394
395 struct l2cap_chan *l2cap_chan_create(void)
396 {
397 struct l2cap_chan *chan;
398
399 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
400 if (!chan)
401 return NULL;
402
403 mutex_init(&chan->lock);
404
405 write_lock(&chan_list_lock);
406 list_add(&chan->global_l, &chan_list);
407 write_unlock(&chan_list_lock);
408
409 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
410
411 chan->state = BT_OPEN;
412
413 kref_init(&chan->kref);
414
415 /* This flag is cleared in l2cap_chan_ready() */
416 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
417
418 BT_DBG("chan %p", chan);
419
420 return chan;
421 }
422
423 static void l2cap_chan_destroy(struct kref *kref)
424 {
425 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
426
427 BT_DBG("chan %p", chan);
428
429 write_lock(&chan_list_lock);
430 list_del(&chan->global_l);
431 write_unlock(&chan_list_lock);
432
433 kfree(chan);
434 }
435
436 void l2cap_chan_hold(struct l2cap_chan *c)
437 {
438 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
439
440 kref_get(&c->kref);
441 }
442
443 void l2cap_chan_put(struct l2cap_chan *c)
444 {
445 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
446
447 kref_put(&c->kref, l2cap_chan_destroy);
448 }
449
450 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
451 {
452 chan->fcs = L2CAP_FCS_CRC16;
453 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
454 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
455 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
456 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
457 chan->sec_level = BT_SECURITY_LOW;
458
459 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
460 }
461
462 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
463 {
464 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
465 __le16_to_cpu(chan->psm), chan->dcid);
466
467 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
468
469 chan->conn = conn;
470
471 switch (chan->chan_type) {
472 case L2CAP_CHAN_CONN_ORIENTED:
473 if (conn->hcon->type == LE_LINK) {
474 /* LE connection */
475 chan->omtu = L2CAP_DEFAULT_MTU;
476 chan->scid = L2CAP_CID_LE_DATA;
477 chan->dcid = L2CAP_CID_LE_DATA;
478 } else {
479 /* Alloc CID for connection-oriented socket */
480 chan->scid = l2cap_alloc_cid(conn);
481 chan->omtu = L2CAP_DEFAULT_MTU;
482 }
483 break;
484
485 case L2CAP_CHAN_CONN_LESS:
486 /* Connectionless socket */
487 chan->scid = L2CAP_CID_CONN_LESS;
488 chan->dcid = L2CAP_CID_CONN_LESS;
489 chan->omtu = L2CAP_DEFAULT_MTU;
490 break;
491
492 case L2CAP_CHAN_CONN_FIX_A2MP:
493 chan->scid = L2CAP_CID_A2MP;
494 chan->dcid = L2CAP_CID_A2MP;
495 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
496 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
497 break;
498
499 default:
500 /* Raw socket can send/recv signalling messages only */
501 chan->scid = L2CAP_CID_SIGNALING;
502 chan->dcid = L2CAP_CID_SIGNALING;
503 chan->omtu = L2CAP_DEFAULT_MTU;
504 }
505
506 chan->local_id = L2CAP_BESTEFFORT_ID;
507 chan->local_stype = L2CAP_SERV_BESTEFFORT;
508 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
509 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
510 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
511 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
512
513 l2cap_chan_hold(chan);
514
515 list_add(&chan->list, &conn->chan_l);
516 }
517
518 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
519 {
520 mutex_lock(&conn->chan_lock);
521 __l2cap_chan_add(conn, chan);
522 mutex_unlock(&conn->chan_lock);
523 }
524
525 void l2cap_chan_del(struct l2cap_chan *chan, int err)
526 {
527 struct l2cap_conn *conn = chan->conn;
528
529 __clear_chan_timer(chan);
530
531 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
532
533 if (conn) {
534 struct amp_mgr *mgr = conn->hcon->amp_mgr;
535 /* Delete from channel list */
536 list_del(&chan->list);
537
538 l2cap_chan_put(chan);
539
540 chan->conn = NULL;
541
542 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
543 hci_conn_put(conn->hcon);
544
545 if (mgr && mgr->bredr_chan == chan)
546 mgr->bredr_chan = NULL;
547 }
548
549 chan->ops->teardown(chan, err);
550
551 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
552 return;
553
554 switch(chan->mode) {
555 case L2CAP_MODE_BASIC:
556 break;
557
558 case L2CAP_MODE_ERTM:
559 __clear_retrans_timer(chan);
560 __clear_monitor_timer(chan);
561 __clear_ack_timer(chan);
562
563 skb_queue_purge(&chan->srej_q);
564
565 l2cap_seq_list_free(&chan->srej_list);
566 l2cap_seq_list_free(&chan->retrans_list);
567
568 /* fall through */
569
570 case L2CAP_MODE_STREAMING:
571 skb_queue_purge(&chan->tx_q);
572 break;
573 }
574
575 return;
576 }
577
578 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
579 {
580 struct l2cap_conn *conn = chan->conn;
581 struct sock *sk = chan->sk;
582
583 BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
584 sk);
585
586 switch (chan->state) {
587 case BT_LISTEN:
588 chan->ops->teardown(chan, 0);
589 break;
590
591 case BT_CONNECTED:
592 case BT_CONFIG:
593 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
594 conn->hcon->type == ACL_LINK) {
595 __set_chan_timer(chan, sk->sk_sndtimeo);
596 l2cap_send_disconn_req(conn, chan, reason);
597 } else
598 l2cap_chan_del(chan, reason);
599 break;
600
601 case BT_CONNECT2:
602 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
603 conn->hcon->type == ACL_LINK) {
604 struct l2cap_conn_rsp rsp;
605 __u16 result;
606
607 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
608 result = L2CAP_CR_SEC_BLOCK;
609 else
610 result = L2CAP_CR_BAD_PSM;
611 l2cap_state_change(chan, BT_DISCONN);
612
613 rsp.scid = cpu_to_le16(chan->dcid);
614 rsp.dcid = cpu_to_le16(chan->scid);
615 rsp.result = cpu_to_le16(result);
616 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
617 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
618 sizeof(rsp), &rsp);
619 }
620
621 l2cap_chan_del(chan, reason);
622 break;
623
624 case BT_CONNECT:
625 case BT_DISCONN:
626 l2cap_chan_del(chan, reason);
627 break;
628
629 default:
630 chan->ops->teardown(chan, 0);
631 break;
632 }
633 }
634
635 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
636 {
637 if (chan->chan_type == L2CAP_CHAN_RAW) {
638 switch (chan->sec_level) {
639 case BT_SECURITY_HIGH:
640 return HCI_AT_DEDICATED_BONDING_MITM;
641 case BT_SECURITY_MEDIUM:
642 return HCI_AT_DEDICATED_BONDING;
643 default:
644 return HCI_AT_NO_BONDING;
645 }
646 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
647 if (chan->sec_level == BT_SECURITY_LOW)
648 chan->sec_level = BT_SECURITY_SDP;
649
650 if (chan->sec_level == BT_SECURITY_HIGH)
651 return HCI_AT_NO_BONDING_MITM;
652 else
653 return HCI_AT_NO_BONDING;
654 } else {
655 switch (chan->sec_level) {
656 case BT_SECURITY_HIGH:
657 return HCI_AT_GENERAL_BONDING_MITM;
658 case BT_SECURITY_MEDIUM:
659 return HCI_AT_GENERAL_BONDING;
660 default:
661 return HCI_AT_NO_BONDING;
662 }
663 }
664 }
665
666 /* Service level security */
667 int l2cap_chan_check_security(struct l2cap_chan *chan)
668 {
669 struct l2cap_conn *conn = chan->conn;
670 __u8 auth_type;
671
672 auth_type = l2cap_get_auth_type(chan);
673
674 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
675 }
676
677 static u8 l2cap_get_ident(struct l2cap_conn *conn)
678 {
679 u8 id;
680
681 /* Get next available identificator.
682 * 1 - 128 are used by kernel.
683 * 129 - 199 are reserved.
684 * 200 - 254 are used by utilities like l2ping, etc.
685 */
686
687 spin_lock(&conn->lock);
688
689 if (++conn->tx_ident > 128)
690 conn->tx_ident = 1;
691
692 id = conn->tx_ident;
693
694 spin_unlock(&conn->lock);
695
696 return id;
697 }
698
699 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
700 void *data)
701 {
702 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
703 u8 flags;
704
705 BT_DBG("code 0x%2.2x", code);
706
707 if (!skb)
708 return;
709
710 if (lmp_no_flush_capable(conn->hcon->hdev))
711 flags = ACL_START_NO_FLUSH;
712 else
713 flags = ACL_START;
714
715 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
716 skb->priority = HCI_PRIO_MAX;
717
718 hci_send_acl(conn->hchan, skb, flags);
719 }
720
721 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
722 {
723 struct hci_conn *hcon = chan->conn->hcon;
724 u16 flags;
725
726 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
727 skb->priority);
728
729 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
730 lmp_no_flush_capable(hcon->hdev))
731 flags = ACL_START_NO_FLUSH;
732 else
733 flags = ACL_START;
734
735 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
736 hci_send_acl(chan->conn->hchan, skb, flags);
737 }
738
739 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
740 {
741 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
742 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
743
744 if (enh & L2CAP_CTRL_FRAME_TYPE) {
745 /* S-Frame */
746 control->sframe = 1;
747 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
748 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
749
750 control->sar = 0;
751 control->txseq = 0;
752 } else {
753 /* I-Frame */
754 control->sframe = 0;
755 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
756 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
757
758 control->poll = 0;
759 control->super = 0;
760 }
761 }
762
763 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
764 {
765 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
766 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
767
768 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
769 /* S-Frame */
770 control->sframe = 1;
771 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
772 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
773
774 control->sar = 0;
775 control->txseq = 0;
776 } else {
777 /* I-Frame */
778 control->sframe = 0;
779 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
780 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
781
782 control->poll = 0;
783 control->super = 0;
784 }
785 }
786
787 static inline void __unpack_control(struct l2cap_chan *chan,
788 struct sk_buff *skb)
789 {
790 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
791 __unpack_extended_control(get_unaligned_le32(skb->data),
792 &bt_cb(skb)->control);
793 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
794 } else {
795 __unpack_enhanced_control(get_unaligned_le16(skb->data),
796 &bt_cb(skb)->control);
797 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
798 }
799 }
800
801 static u32 __pack_extended_control(struct l2cap_ctrl *control)
802 {
803 u32 packed;
804
805 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
806 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
807
808 if (control->sframe) {
809 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
810 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
811 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
812 } else {
813 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
814 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
815 }
816
817 return packed;
818 }
819
820 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
821 {
822 u16 packed;
823
824 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
825 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
826
827 if (control->sframe) {
828 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
829 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
830 packed |= L2CAP_CTRL_FRAME_TYPE;
831 } else {
832 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
833 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
834 }
835
836 return packed;
837 }
838
839 static inline void __pack_control(struct l2cap_chan *chan,
840 struct l2cap_ctrl *control,
841 struct sk_buff *skb)
842 {
843 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
844 put_unaligned_le32(__pack_extended_control(control),
845 skb->data + L2CAP_HDR_SIZE);
846 } else {
847 put_unaligned_le16(__pack_enhanced_control(control),
848 skb->data + L2CAP_HDR_SIZE);
849 }
850 }
851
852 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
853 {
854 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
855 return L2CAP_EXT_HDR_SIZE;
856 else
857 return L2CAP_ENH_HDR_SIZE;
858 }
859
860 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
861 u32 control)
862 {
863 struct sk_buff *skb;
864 struct l2cap_hdr *lh;
865 int hlen = __ertm_hdr_size(chan);
866
867 if (chan->fcs == L2CAP_FCS_CRC16)
868 hlen += L2CAP_FCS_SIZE;
869
870 skb = bt_skb_alloc(hlen, GFP_KERNEL);
871
872 if (!skb)
873 return ERR_PTR(-ENOMEM);
874
875 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
876 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
877 lh->cid = cpu_to_le16(chan->dcid);
878
879 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
880 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
881 else
882 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
883
884 if (chan->fcs == L2CAP_FCS_CRC16) {
885 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
886 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
887 }
888
889 skb->priority = HCI_PRIO_MAX;
890 return skb;
891 }
892
893 static void l2cap_send_sframe(struct l2cap_chan *chan,
894 struct l2cap_ctrl *control)
895 {
896 struct sk_buff *skb;
897 u32 control_field;
898
899 BT_DBG("chan %p, control %p", chan, control);
900
901 if (!control->sframe)
902 return;
903
904 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
905 !control->poll)
906 control->final = 1;
907
908 if (control->super == L2CAP_SUPER_RR)
909 clear_bit(CONN_RNR_SENT, &chan->conn_state);
910 else if (control->super == L2CAP_SUPER_RNR)
911 set_bit(CONN_RNR_SENT, &chan->conn_state);
912
913 if (control->super != L2CAP_SUPER_SREJ) {
914 chan->last_acked_seq = control->reqseq;
915 __clear_ack_timer(chan);
916 }
917
918 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
919 control->final, control->poll, control->super);
920
921 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
922 control_field = __pack_extended_control(control);
923 else
924 control_field = __pack_enhanced_control(control);
925
926 skb = l2cap_create_sframe_pdu(chan, control_field);
927 if (!IS_ERR(skb))
928 l2cap_do_send(chan, skb);
929 }
930
931 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
932 {
933 struct l2cap_ctrl control;
934
935 BT_DBG("chan %p, poll %d", chan, poll);
936
937 memset(&control, 0, sizeof(control));
938 control.sframe = 1;
939 control.poll = poll;
940
941 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
942 control.super = L2CAP_SUPER_RNR;
943 else
944 control.super = L2CAP_SUPER_RR;
945
946 control.reqseq = chan->buffer_seq;
947 l2cap_send_sframe(chan, &control);
948 }
949
950 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
951 {
952 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
953 }
954
955 static bool __amp_capable(struct l2cap_chan *chan)
956 {
957 struct l2cap_conn *conn = chan->conn;
958
959 if (enable_hs &&
960 chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
961 conn->fixed_chan_mask & L2CAP_FC_A2MP)
962 return true;
963 else
964 return false;
965 }
966
967 void l2cap_send_conn_req(struct l2cap_chan *chan)
968 {
969 struct l2cap_conn *conn = chan->conn;
970 struct l2cap_conn_req req;
971
972 req.scid = cpu_to_le16(chan->scid);
973 req.psm = chan->psm;
974
975 chan->ident = l2cap_get_ident(conn);
976
977 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
978
979 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
980 }
981
982 static void l2cap_chan_ready(struct l2cap_chan *chan)
983 {
984 /* This clears all conf flags, including CONF_NOT_COMPLETE */
985 chan->conf_state = 0;
986 __clear_chan_timer(chan);
987
988 chan->state = BT_CONNECTED;
989
990 chan->ops->ready(chan);
991 }
992
993 static void l2cap_start_connection(struct l2cap_chan *chan)
994 {
995 if (__amp_capable(chan)) {
996 BT_DBG("chan %p AMP capable: discover AMPs", chan);
997 a2mp_discover_amp(chan);
998 } else {
999 l2cap_send_conn_req(chan);
1000 }
1001 }
1002
1003 static void l2cap_do_start(struct l2cap_chan *chan)
1004 {
1005 struct l2cap_conn *conn = chan->conn;
1006
1007 if (conn->hcon->type == LE_LINK) {
1008 l2cap_chan_ready(chan);
1009 return;
1010 }
1011
1012 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1013 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1014 return;
1015
1016 if (l2cap_chan_check_security(chan) &&
1017 __l2cap_no_conn_pending(chan)) {
1018 l2cap_start_connection(chan);
1019 }
1020 } else {
1021 struct l2cap_info_req req;
1022 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1023
1024 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1025 conn->info_ident = l2cap_get_ident(conn);
1026
1027 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1028
1029 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1030 sizeof(req), &req);
1031 }
1032 }
1033
1034 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1035 {
1036 u32 local_feat_mask = l2cap_feat_mask;
1037 if (!disable_ertm)
1038 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1039
1040 switch (mode) {
1041 case L2CAP_MODE_ERTM:
1042 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1043 case L2CAP_MODE_STREAMING:
1044 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1045 default:
1046 return 0x00;
1047 }
1048 }
1049
1050 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
1051 struct l2cap_chan *chan, int err)
1052 {
1053 struct sock *sk = chan->sk;
1054 struct l2cap_disconn_req req;
1055
1056 if (!conn)
1057 return;
1058
1059 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1060 __clear_retrans_timer(chan);
1061 __clear_monitor_timer(chan);
1062 __clear_ack_timer(chan);
1063 }
1064
1065 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1066 l2cap_state_change(chan, BT_DISCONN);
1067 return;
1068 }
1069
1070 req.dcid = cpu_to_le16(chan->dcid);
1071 req.scid = cpu_to_le16(chan->scid);
1072 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1073 sizeof(req), &req);
1074
1075 lock_sock(sk);
1076 __l2cap_state_change(chan, BT_DISCONN);
1077 __l2cap_chan_set_err(chan, err);
1078 release_sock(sk);
1079 }
1080
1081 /* ---- L2CAP connections ---- */
1082 static void l2cap_conn_start(struct l2cap_conn *conn)
1083 {
1084 struct l2cap_chan *chan, *tmp;
1085
1086 BT_DBG("conn %p", conn);
1087
1088 mutex_lock(&conn->chan_lock);
1089
1090 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1091 struct sock *sk = chan->sk;
1092
1093 l2cap_chan_lock(chan);
1094
1095 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1096 l2cap_chan_unlock(chan);
1097 continue;
1098 }
1099
1100 if (chan->state == BT_CONNECT) {
1101 if (!l2cap_chan_check_security(chan) ||
1102 !__l2cap_no_conn_pending(chan)) {
1103 l2cap_chan_unlock(chan);
1104 continue;
1105 }
1106
1107 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1108 && test_bit(CONF_STATE2_DEVICE,
1109 &chan->conf_state)) {
1110 l2cap_chan_close(chan, ECONNRESET);
1111 l2cap_chan_unlock(chan);
1112 continue;
1113 }
1114
1115 l2cap_start_connection(chan);
1116
1117 } else if (chan->state == BT_CONNECT2) {
1118 struct l2cap_conn_rsp rsp;
1119 char buf[128];
1120 rsp.scid = cpu_to_le16(chan->dcid);
1121 rsp.dcid = cpu_to_le16(chan->scid);
1122
1123 if (l2cap_chan_check_security(chan)) {
1124 lock_sock(sk);
1125 if (test_bit(BT_SK_DEFER_SETUP,
1126 &bt_sk(sk)->flags)) {
1127 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1128 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1129 chan->ops->defer(chan);
1130
1131 } else {
1132 __l2cap_state_change(chan, BT_CONFIG);
1133 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1134 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1135 }
1136 release_sock(sk);
1137 } else {
1138 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1139 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1140 }
1141
1142 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1143 sizeof(rsp), &rsp);
1144
1145 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1146 rsp.result != L2CAP_CR_SUCCESS) {
1147 l2cap_chan_unlock(chan);
1148 continue;
1149 }
1150
1151 set_bit(CONF_REQ_SENT, &chan->conf_state);
1152 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1153 l2cap_build_conf_req(chan, buf), buf);
1154 chan->num_conf_req++;
1155 }
1156
1157 l2cap_chan_unlock(chan);
1158 }
1159
1160 mutex_unlock(&conn->chan_lock);
1161 }
1162
1163 /* Find socket with cid and source/destination bdaddr.
1164 * Returns closest match, locked.
1165 */
1166 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1167 bdaddr_t *src,
1168 bdaddr_t *dst)
1169 {
1170 struct l2cap_chan *c, *c1 = NULL;
1171
1172 read_lock(&chan_list_lock);
1173
1174 list_for_each_entry(c, &chan_list, global_l) {
1175 struct sock *sk = c->sk;
1176
1177 if (state && c->state != state)
1178 continue;
1179
1180 if (c->scid == cid) {
1181 int src_match, dst_match;
1182 int src_any, dst_any;
1183
1184 /* Exact match. */
1185 src_match = !bacmp(&bt_sk(sk)->src, src);
1186 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1187 if (src_match && dst_match) {
1188 read_unlock(&chan_list_lock);
1189 return c;
1190 }
1191
1192 /* Closest match */
1193 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1194 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1195 if ((src_match && dst_any) || (src_any && dst_match) ||
1196 (src_any && dst_any))
1197 c1 = c;
1198 }
1199 }
1200
1201 read_unlock(&chan_list_lock);
1202
1203 return c1;
1204 }
1205
1206 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1207 {
1208 struct sock *parent, *sk;
1209 struct l2cap_chan *chan, *pchan;
1210
1211 BT_DBG("");
1212
1213 /* Check if we have socket listening on cid */
1214 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1215 conn->src, conn->dst);
1216 if (!pchan)
1217 return;
1218
1219 parent = pchan->sk;
1220
1221 lock_sock(parent);
1222
1223 chan = pchan->ops->new_connection(pchan);
1224 if (!chan)
1225 goto clean;
1226
1227 sk = chan->sk;
1228
1229 hci_conn_hold(conn->hcon);
1230 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
1231
1232 bacpy(&bt_sk(sk)->src, conn->src);
1233 bacpy(&bt_sk(sk)->dst, conn->dst);
1234
1235 l2cap_chan_add(conn, chan);
1236
1237 l2cap_chan_ready(chan);
1238
1239 clean:
1240 release_sock(parent);
1241 }
1242
1243 static void l2cap_conn_ready(struct l2cap_conn *conn)
1244 {
1245 struct l2cap_chan *chan;
1246 struct hci_conn *hcon = conn->hcon;
1247
1248 BT_DBG("conn %p", conn);
1249
1250 if (!hcon->out && hcon->type == LE_LINK)
1251 l2cap_le_conn_ready(conn);
1252
1253 if (hcon->out && hcon->type == LE_LINK)
1254 smp_conn_security(hcon, hcon->pending_sec_level);
1255
1256 mutex_lock(&conn->chan_lock);
1257
1258 list_for_each_entry(chan, &conn->chan_l, list) {
1259
1260 l2cap_chan_lock(chan);
1261
1262 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1263 l2cap_chan_unlock(chan);
1264 continue;
1265 }
1266
1267 if (hcon->type == LE_LINK) {
1268 if (smp_conn_security(hcon, chan->sec_level))
1269 l2cap_chan_ready(chan);
1270
1271 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1272 struct sock *sk = chan->sk;
1273 __clear_chan_timer(chan);
1274 lock_sock(sk);
1275 __l2cap_state_change(chan, BT_CONNECTED);
1276 sk->sk_state_change(sk);
1277 release_sock(sk);
1278
1279 } else if (chan->state == BT_CONNECT)
1280 l2cap_do_start(chan);
1281
1282 l2cap_chan_unlock(chan);
1283 }
1284
1285 mutex_unlock(&conn->chan_lock);
1286 }
1287
1288 /* Notify sockets that we cannot guaranty reliability anymore */
1289 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1290 {
1291 struct l2cap_chan *chan;
1292
1293 BT_DBG("conn %p", conn);
1294
1295 mutex_lock(&conn->chan_lock);
1296
1297 list_for_each_entry(chan, &conn->chan_l, list) {
1298 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1299 l2cap_chan_set_err(chan, err);
1300 }
1301
1302 mutex_unlock(&conn->chan_lock);
1303 }
1304
1305 static void l2cap_info_timeout(struct work_struct *work)
1306 {
1307 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1308 info_timer.work);
1309
1310 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1311 conn->info_ident = 0;
1312
1313 l2cap_conn_start(conn);
1314 }
1315
1316 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1317 {
1318 struct l2cap_conn *conn = hcon->l2cap_data;
1319 struct l2cap_chan *chan, *l;
1320
1321 if (!conn)
1322 return;
1323
1324 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1325
1326 kfree_skb(conn->rx_skb);
1327
1328 mutex_lock(&conn->chan_lock);
1329
1330 /* Kill channels */
1331 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1332 l2cap_chan_hold(chan);
1333 l2cap_chan_lock(chan);
1334
1335 l2cap_chan_del(chan, err);
1336
1337 l2cap_chan_unlock(chan);
1338
1339 chan->ops->close(chan);
1340 l2cap_chan_put(chan);
1341 }
1342
1343 mutex_unlock(&conn->chan_lock);
1344
1345 hci_chan_del(conn->hchan);
1346
1347 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1348 cancel_delayed_work_sync(&conn->info_timer);
1349
1350 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1351 cancel_delayed_work_sync(&conn->security_timer);
1352 smp_chan_destroy(conn);
1353 }
1354
1355 hcon->l2cap_data = NULL;
1356 kfree(conn);
1357 }
1358
1359 static void security_timeout(struct work_struct *work)
1360 {
1361 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1362 security_timer.work);
1363
1364 BT_DBG("conn %p", conn);
1365
1366 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1367 smp_chan_destroy(conn);
1368 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1369 }
1370 }
1371
1372 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1373 {
1374 struct l2cap_conn *conn = hcon->l2cap_data;
1375 struct hci_chan *hchan;
1376
1377 if (conn || status)
1378 return conn;
1379
1380 hchan = hci_chan_create(hcon);
1381 if (!hchan)
1382 return NULL;
1383
1384 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1385 if (!conn) {
1386 hci_chan_del(hchan);
1387 return NULL;
1388 }
1389
1390 hcon->l2cap_data = conn;
1391 conn->hcon = hcon;
1392 conn->hchan = hchan;
1393
1394 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1395
1396 switch (hcon->type) {
1397 case AMP_LINK:
1398 conn->mtu = hcon->hdev->block_mtu;
1399 break;
1400
1401 case LE_LINK:
1402 if (hcon->hdev->le_mtu) {
1403 conn->mtu = hcon->hdev->le_mtu;
1404 break;
1405 }
1406 /* fall through */
1407
1408 default:
1409 conn->mtu = hcon->hdev->acl_mtu;
1410 break;
1411 }
1412
1413 conn->src = &hcon->hdev->bdaddr;
1414 conn->dst = &hcon->dst;
1415
1416 conn->feat_mask = 0;
1417
1418 spin_lock_init(&conn->lock);
1419 mutex_init(&conn->chan_lock);
1420
1421 INIT_LIST_HEAD(&conn->chan_l);
1422
1423 if (hcon->type == LE_LINK)
1424 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1425 else
1426 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1427
1428 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1429
1430 return conn;
1431 }
1432
1433 /* ---- Socket interface ---- */
1434
1435 /* Find socket with psm and source / destination bdaddr.
1436 * Returns closest match.
1437 */
1438 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1439 bdaddr_t *src,
1440 bdaddr_t *dst)
1441 {
1442 struct l2cap_chan *c, *c1 = NULL;
1443
1444 read_lock(&chan_list_lock);
1445
1446 list_for_each_entry(c, &chan_list, global_l) {
1447 struct sock *sk = c->sk;
1448
1449 if (state && c->state != state)
1450 continue;
1451
1452 if (c->psm == psm) {
1453 int src_match, dst_match;
1454 int src_any, dst_any;
1455
1456 /* Exact match. */
1457 src_match = !bacmp(&bt_sk(sk)->src, src);
1458 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1459 if (src_match && dst_match) {
1460 read_unlock(&chan_list_lock);
1461 return c;
1462 }
1463
1464 /* Closest match */
1465 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1466 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1467 if ((src_match && dst_any) || (src_any && dst_match) ||
1468 (src_any && dst_any))
1469 c1 = c;
1470 }
1471 }
1472
1473 read_unlock(&chan_list_lock);
1474
1475 return c1;
1476 }
1477
1478 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1479 bdaddr_t *dst, u8 dst_type)
1480 {
1481 struct sock *sk = chan->sk;
1482 bdaddr_t *src = &bt_sk(sk)->src;
1483 struct l2cap_conn *conn;
1484 struct hci_conn *hcon;
1485 struct hci_dev *hdev;
1486 __u8 auth_type;
1487 int err;
1488
1489 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1490 dst_type, __le16_to_cpu(psm));
1491
1492 hdev = hci_get_route(dst, src);
1493 if (!hdev)
1494 return -EHOSTUNREACH;
1495
1496 hci_dev_lock(hdev);
1497
1498 l2cap_chan_lock(chan);
1499
1500 /* PSM must be odd and lsb of upper byte must be 0 */
1501 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1502 chan->chan_type != L2CAP_CHAN_RAW) {
1503 err = -EINVAL;
1504 goto done;
1505 }
1506
1507 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1508 err = -EINVAL;
1509 goto done;
1510 }
1511
1512 switch (chan->mode) {
1513 case L2CAP_MODE_BASIC:
1514 break;
1515 case L2CAP_MODE_ERTM:
1516 case L2CAP_MODE_STREAMING:
1517 if (!disable_ertm)
1518 break;
1519 /* fall through */
1520 default:
1521 err = -ENOTSUPP;
1522 goto done;
1523 }
1524
1525 switch (chan->state) {
1526 case BT_CONNECT:
1527 case BT_CONNECT2:
1528 case BT_CONFIG:
1529 /* Already connecting */
1530 err = 0;
1531 goto done;
1532
1533 case BT_CONNECTED:
1534 /* Already connected */
1535 err = -EISCONN;
1536 goto done;
1537
1538 case BT_OPEN:
1539 case BT_BOUND:
1540 /* Can connect */
1541 break;
1542
1543 default:
1544 err = -EBADFD;
1545 goto done;
1546 }
1547
1548 /* Set destination address and psm */
1549 lock_sock(sk);
1550 bacpy(&bt_sk(sk)->dst, dst);
1551 release_sock(sk);
1552
1553 chan->psm = psm;
1554 chan->dcid = cid;
1555
1556 auth_type = l2cap_get_auth_type(chan);
1557
1558 if (chan->dcid == L2CAP_CID_LE_DATA)
1559 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1560 chan->sec_level, auth_type);
1561 else
1562 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1563 chan->sec_level, auth_type);
1564
1565 if (IS_ERR(hcon)) {
1566 err = PTR_ERR(hcon);
1567 goto done;
1568 }
1569
1570 conn = l2cap_conn_add(hcon, 0);
1571 if (!conn) {
1572 hci_conn_put(hcon);
1573 err = -ENOMEM;
1574 goto done;
1575 }
1576
1577 if (hcon->type == LE_LINK) {
1578 err = 0;
1579
1580 if (!list_empty(&conn->chan_l)) {
1581 err = -EBUSY;
1582 hci_conn_put(hcon);
1583 }
1584
1585 if (err)
1586 goto done;
1587 }
1588
1589 /* Update source addr of the socket */
1590 bacpy(src, conn->src);
1591
1592 l2cap_chan_unlock(chan);
1593 l2cap_chan_add(conn, chan);
1594 l2cap_chan_lock(chan);
1595
1596 l2cap_state_change(chan, BT_CONNECT);
1597 __set_chan_timer(chan, sk->sk_sndtimeo);
1598
1599 if (hcon->state == BT_CONNECTED) {
1600 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1601 __clear_chan_timer(chan);
1602 if (l2cap_chan_check_security(chan))
1603 l2cap_state_change(chan, BT_CONNECTED);
1604 } else
1605 l2cap_do_start(chan);
1606 }
1607
1608 err = 0;
1609
1610 done:
1611 l2cap_chan_unlock(chan);
1612 hci_dev_unlock(hdev);
1613 hci_dev_put(hdev);
1614 return err;
1615 }
1616
1617 int __l2cap_wait_ack(struct sock *sk)
1618 {
1619 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1620 DECLARE_WAITQUEUE(wait, current);
1621 int err = 0;
1622 int timeo = HZ/5;
1623
1624 add_wait_queue(sk_sleep(sk), &wait);
1625 set_current_state(TASK_INTERRUPTIBLE);
1626 while (chan->unacked_frames > 0 && chan->conn) {
1627 if (!timeo)
1628 timeo = HZ/5;
1629
1630 if (signal_pending(current)) {
1631 err = sock_intr_errno(timeo);
1632 break;
1633 }
1634
1635 release_sock(sk);
1636 timeo = schedule_timeout(timeo);
1637 lock_sock(sk);
1638 set_current_state(TASK_INTERRUPTIBLE);
1639
1640 err = sock_error(sk);
1641 if (err)
1642 break;
1643 }
1644 set_current_state(TASK_RUNNING);
1645 remove_wait_queue(sk_sleep(sk), &wait);
1646 return err;
1647 }
1648
1649 static void l2cap_monitor_timeout(struct work_struct *work)
1650 {
1651 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1652 monitor_timer.work);
1653
1654 BT_DBG("chan %p", chan);
1655
1656 l2cap_chan_lock(chan);
1657
1658 if (!chan->conn) {
1659 l2cap_chan_unlock(chan);
1660 l2cap_chan_put(chan);
1661 return;
1662 }
1663
1664 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1665
1666 l2cap_chan_unlock(chan);
1667 l2cap_chan_put(chan);
1668 }
1669
1670 static void l2cap_retrans_timeout(struct work_struct *work)
1671 {
1672 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1673 retrans_timer.work);
1674
1675 BT_DBG("chan %p", chan);
1676
1677 l2cap_chan_lock(chan);
1678
1679 if (!chan->conn) {
1680 l2cap_chan_unlock(chan);
1681 l2cap_chan_put(chan);
1682 return;
1683 }
1684
1685 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1686 l2cap_chan_unlock(chan);
1687 l2cap_chan_put(chan);
1688 }
1689
1690 static void l2cap_streaming_send(struct l2cap_chan *chan,
1691 struct sk_buff_head *skbs)
1692 {
1693 struct sk_buff *skb;
1694 struct l2cap_ctrl *control;
1695
1696 BT_DBG("chan %p, skbs %p", chan, skbs);
1697
1698 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1699
1700 while (!skb_queue_empty(&chan->tx_q)) {
1701
1702 skb = skb_dequeue(&chan->tx_q);
1703
1704 bt_cb(skb)->control.retries = 1;
1705 control = &bt_cb(skb)->control;
1706
1707 control->reqseq = 0;
1708 control->txseq = chan->next_tx_seq;
1709
1710 __pack_control(chan, control, skb);
1711
1712 if (chan->fcs == L2CAP_FCS_CRC16) {
1713 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1714 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1715 }
1716
1717 l2cap_do_send(chan, skb);
1718
1719 BT_DBG("Sent txseq %u", control->txseq);
1720
1721 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1722 chan->frames_sent++;
1723 }
1724 }
1725
1726 static int l2cap_ertm_send(struct l2cap_chan *chan)
1727 {
1728 struct sk_buff *skb, *tx_skb;
1729 struct l2cap_ctrl *control;
1730 int sent = 0;
1731
1732 BT_DBG("chan %p", chan);
1733
1734 if (chan->state != BT_CONNECTED)
1735 return -ENOTCONN;
1736
1737 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1738 return 0;
1739
1740 while (chan->tx_send_head &&
1741 chan->unacked_frames < chan->remote_tx_win &&
1742 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1743
1744 skb = chan->tx_send_head;
1745
1746 bt_cb(skb)->control.retries = 1;
1747 control = &bt_cb(skb)->control;
1748
1749 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1750 control->final = 1;
1751
1752 control->reqseq = chan->buffer_seq;
1753 chan->last_acked_seq = chan->buffer_seq;
1754 control->txseq = chan->next_tx_seq;
1755
1756 __pack_control(chan, control, skb);
1757
1758 if (chan->fcs == L2CAP_FCS_CRC16) {
1759 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1760 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1761 }
1762
1763 /* Clone after data has been modified. Data is assumed to be
1764 read-only (for locking purposes) on cloned sk_buffs.
1765 */
1766 tx_skb = skb_clone(skb, GFP_KERNEL);
1767
1768 if (!tx_skb)
1769 break;
1770
1771 __set_retrans_timer(chan);
1772
1773 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1774 chan->unacked_frames++;
1775 chan->frames_sent++;
1776 sent++;
1777
1778 if (skb_queue_is_last(&chan->tx_q, skb))
1779 chan->tx_send_head = NULL;
1780 else
1781 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1782
1783 l2cap_do_send(chan, tx_skb);
1784 BT_DBG("Sent txseq %u", control->txseq);
1785 }
1786
1787 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1788 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1789
1790 return sent;
1791 }
1792
1793 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1794 {
1795 struct l2cap_ctrl control;
1796 struct sk_buff *skb;
1797 struct sk_buff *tx_skb;
1798 u16 seq;
1799
1800 BT_DBG("chan %p", chan);
1801
1802 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1803 return;
1804
1805 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1806 seq = l2cap_seq_list_pop(&chan->retrans_list);
1807
1808 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1809 if (!skb) {
1810 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1811 seq);
1812 continue;
1813 }
1814
1815 bt_cb(skb)->control.retries++;
1816 control = bt_cb(skb)->control;
1817
1818 if (chan->max_tx != 0 &&
1819 bt_cb(skb)->control.retries > chan->max_tx) {
1820 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1821 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1822 l2cap_seq_list_clear(&chan->retrans_list);
1823 break;
1824 }
1825
1826 control.reqseq = chan->buffer_seq;
1827 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1828 control.final = 1;
1829 else
1830 control.final = 0;
1831
1832 if (skb_cloned(skb)) {
1833 /* Cloned sk_buffs are read-only, so we need a
1834 * writeable copy
1835 */
1836 tx_skb = skb_copy(skb, GFP_KERNEL);
1837 } else {
1838 tx_skb = skb_clone(skb, GFP_KERNEL);
1839 }
1840
1841 if (!tx_skb) {
1842 l2cap_seq_list_clear(&chan->retrans_list);
1843 break;
1844 }
1845
1846 /* Update skb contents */
1847 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1848 put_unaligned_le32(__pack_extended_control(&control),
1849 tx_skb->data + L2CAP_HDR_SIZE);
1850 } else {
1851 put_unaligned_le16(__pack_enhanced_control(&control),
1852 tx_skb->data + L2CAP_HDR_SIZE);
1853 }
1854
1855 if (chan->fcs == L2CAP_FCS_CRC16) {
1856 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1857 put_unaligned_le16(fcs, skb_put(tx_skb,
1858 L2CAP_FCS_SIZE));
1859 }
1860
1861 l2cap_do_send(chan, tx_skb);
1862
1863 BT_DBG("Resent txseq %d", control.txseq);
1864
1865 chan->last_acked_seq = chan->buffer_seq;
1866 }
1867 }
1868
1869 static void l2cap_retransmit(struct l2cap_chan *chan,
1870 struct l2cap_ctrl *control)
1871 {
1872 BT_DBG("chan %p, control %p", chan, control);
1873
1874 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1875 l2cap_ertm_resend(chan);
1876 }
1877
1878 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1879 struct l2cap_ctrl *control)
1880 {
1881 struct sk_buff *skb;
1882
1883 BT_DBG("chan %p, control %p", chan, control);
1884
1885 if (control->poll)
1886 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1887
1888 l2cap_seq_list_clear(&chan->retrans_list);
1889
1890 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1891 return;
1892
1893 if (chan->unacked_frames) {
1894 skb_queue_walk(&chan->tx_q, skb) {
1895 if (bt_cb(skb)->control.txseq == control->reqseq ||
1896 skb == chan->tx_send_head)
1897 break;
1898 }
1899
1900 skb_queue_walk_from(&chan->tx_q, skb) {
1901 if (skb == chan->tx_send_head)
1902 break;
1903
1904 l2cap_seq_list_append(&chan->retrans_list,
1905 bt_cb(skb)->control.txseq);
1906 }
1907
1908 l2cap_ertm_resend(chan);
1909 }
1910 }
1911
1912 static void l2cap_send_ack(struct l2cap_chan *chan)
1913 {
1914 struct l2cap_ctrl control;
1915 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1916 chan->last_acked_seq);
1917 int threshold;
1918
1919 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1920 chan, chan->last_acked_seq, chan->buffer_seq);
1921
1922 memset(&control, 0, sizeof(control));
1923 control.sframe = 1;
1924
1925 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1926 chan->rx_state == L2CAP_RX_STATE_RECV) {
1927 __clear_ack_timer(chan);
1928 control.super = L2CAP_SUPER_RNR;
1929 control.reqseq = chan->buffer_seq;
1930 l2cap_send_sframe(chan, &control);
1931 } else {
1932 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1933 l2cap_ertm_send(chan);
1934 /* If any i-frames were sent, they included an ack */
1935 if (chan->buffer_seq == chan->last_acked_seq)
1936 frames_to_ack = 0;
1937 }
1938
1939 /* Ack now if the window is 3/4ths full.
1940 * Calculate without mul or div
1941 */
1942 threshold = chan->ack_win;
1943 threshold += threshold << 1;
1944 threshold >>= 2;
1945
1946 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
1947 threshold);
1948
1949 if (frames_to_ack >= threshold) {
1950 __clear_ack_timer(chan);
1951 control.super = L2CAP_SUPER_RR;
1952 control.reqseq = chan->buffer_seq;
1953 l2cap_send_sframe(chan, &control);
1954 frames_to_ack = 0;
1955 }
1956
1957 if (frames_to_ack)
1958 __set_ack_timer(chan);
1959 }
1960 }
1961
1962 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1963 struct msghdr *msg, int len,
1964 int count, struct sk_buff *skb)
1965 {
1966 struct l2cap_conn *conn = chan->conn;
1967 struct sk_buff **frag;
1968 int sent = 0;
1969
1970 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1971 return -EFAULT;
1972
1973 sent += count;
1974 len -= count;
1975
1976 /* Continuation fragments (no L2CAP header) */
1977 frag = &skb_shinfo(skb)->frag_list;
1978 while (len) {
1979 struct sk_buff *tmp;
1980
1981 count = min_t(unsigned int, conn->mtu, len);
1982
1983 tmp = chan->ops->alloc_skb(chan, count,
1984 msg->msg_flags & MSG_DONTWAIT);
1985 if (IS_ERR(tmp))
1986 return PTR_ERR(tmp);
1987
1988 *frag = tmp;
1989
1990 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1991 return -EFAULT;
1992
1993 (*frag)->priority = skb->priority;
1994
1995 sent += count;
1996 len -= count;
1997
1998 skb->len += (*frag)->len;
1999 skb->data_len += (*frag)->len;
2000
2001 frag = &(*frag)->next;
2002 }
2003
2004 return sent;
2005 }
2006
2007 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2008 struct msghdr *msg, size_t len,
2009 u32 priority)
2010 {
2011 struct l2cap_conn *conn = chan->conn;
2012 struct sk_buff *skb;
2013 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2014 struct l2cap_hdr *lh;
2015
2016 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
2017
2018 count = min_t(unsigned int, (conn->mtu - hlen), len);
2019
2020 skb = chan->ops->alloc_skb(chan, count + hlen,
2021 msg->msg_flags & MSG_DONTWAIT);
2022 if (IS_ERR(skb))
2023 return skb;
2024
2025 skb->priority = priority;
2026
2027 /* Create L2CAP header */
2028 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2029 lh->cid = cpu_to_le16(chan->dcid);
2030 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2031 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2032
2033 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2034 if (unlikely(err < 0)) {
2035 kfree_skb(skb);
2036 return ERR_PTR(err);
2037 }
2038 return skb;
2039 }
2040
2041 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2042 struct msghdr *msg, size_t len,
2043 u32 priority)
2044 {
2045 struct l2cap_conn *conn = chan->conn;
2046 struct sk_buff *skb;
2047 int err, count;
2048 struct l2cap_hdr *lh;
2049
2050 BT_DBG("chan %p len %zu", chan, len);
2051
2052 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2053
2054 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2055 msg->msg_flags & MSG_DONTWAIT);
2056 if (IS_ERR(skb))
2057 return skb;
2058
2059 skb->priority = priority;
2060
2061 /* Create L2CAP header */
2062 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2063 lh->cid = cpu_to_le16(chan->dcid);
2064 lh->len = cpu_to_le16(len);
2065
2066 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2067 if (unlikely(err < 0)) {
2068 kfree_skb(skb);
2069 return ERR_PTR(err);
2070 }
2071 return skb;
2072 }
2073
2074 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2075 struct msghdr *msg, size_t len,
2076 u16 sdulen)
2077 {
2078 struct l2cap_conn *conn = chan->conn;
2079 struct sk_buff *skb;
2080 int err, count, hlen;
2081 struct l2cap_hdr *lh;
2082
2083 BT_DBG("chan %p len %zu", chan, len);
2084
2085 if (!conn)
2086 return ERR_PTR(-ENOTCONN);
2087
2088 hlen = __ertm_hdr_size(chan);
2089
2090 if (sdulen)
2091 hlen += L2CAP_SDULEN_SIZE;
2092
2093 if (chan->fcs == L2CAP_FCS_CRC16)
2094 hlen += L2CAP_FCS_SIZE;
2095
2096 count = min_t(unsigned int, (conn->mtu - hlen), len);
2097
2098 skb = chan->ops->alloc_skb(chan, count + hlen,
2099 msg->msg_flags & MSG_DONTWAIT);
2100 if (IS_ERR(skb))
2101 return skb;
2102
2103 /* Create L2CAP header */
2104 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2105 lh->cid = cpu_to_le16(chan->dcid);
2106 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2107
2108 /* Control header is populated later */
2109 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2110 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2111 else
2112 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2113
2114 if (sdulen)
2115 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2116
2117 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2118 if (unlikely(err < 0)) {
2119 kfree_skb(skb);
2120 return ERR_PTR(err);
2121 }
2122
2123 bt_cb(skb)->control.fcs = chan->fcs;
2124 bt_cb(skb)->control.retries = 0;
2125 return skb;
2126 }
2127
2128 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2129 struct sk_buff_head *seg_queue,
2130 struct msghdr *msg, size_t len)
2131 {
2132 struct sk_buff *skb;
2133 u16 sdu_len;
2134 size_t pdu_len;
2135 u8 sar;
2136
2137 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2138
2139 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2140 * so fragmented skbs are not used. The HCI layer's handling
2141 * of fragmented skbs is not compatible with ERTM's queueing.
2142 */
2143
2144 /* PDU size is derived from the HCI MTU */
2145 pdu_len = chan->conn->mtu;
2146
2147 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2148
2149 /* Adjust for largest possible L2CAP overhead. */
2150 if (chan->fcs)
2151 pdu_len -= L2CAP_FCS_SIZE;
2152
2153 pdu_len -= __ertm_hdr_size(chan);
2154
2155 /* Remote device may have requested smaller PDUs */
2156 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2157
2158 if (len <= pdu_len) {
2159 sar = L2CAP_SAR_UNSEGMENTED;
2160 sdu_len = 0;
2161 pdu_len = len;
2162 } else {
2163 sar = L2CAP_SAR_START;
2164 sdu_len = len;
2165 pdu_len -= L2CAP_SDULEN_SIZE;
2166 }
2167
2168 while (len > 0) {
2169 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2170
2171 if (IS_ERR(skb)) {
2172 __skb_queue_purge(seg_queue);
2173 return PTR_ERR(skb);
2174 }
2175
2176 bt_cb(skb)->control.sar = sar;
2177 __skb_queue_tail(seg_queue, skb);
2178
2179 len -= pdu_len;
2180 if (sdu_len) {
2181 sdu_len = 0;
2182 pdu_len += L2CAP_SDULEN_SIZE;
2183 }
2184
2185 if (len <= pdu_len) {
2186 sar = L2CAP_SAR_END;
2187 pdu_len = len;
2188 } else {
2189 sar = L2CAP_SAR_CONTINUE;
2190 }
2191 }
2192
2193 return 0;
2194 }
2195
2196 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2197 u32 priority)
2198 {
2199 struct sk_buff *skb;
2200 int err;
2201 struct sk_buff_head seg_queue;
2202
2203 /* Connectionless channel */
2204 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2205 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2206 if (IS_ERR(skb))
2207 return PTR_ERR(skb);
2208
2209 l2cap_do_send(chan, skb);
2210 return len;
2211 }
2212
2213 switch (chan->mode) {
2214 case L2CAP_MODE_BASIC:
2215 /* Check outgoing MTU */
2216 if (len > chan->omtu)
2217 return -EMSGSIZE;
2218
2219 /* Create a basic PDU */
2220 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2221 if (IS_ERR(skb))
2222 return PTR_ERR(skb);
2223
2224 l2cap_do_send(chan, skb);
2225 err = len;
2226 break;
2227
2228 case L2CAP_MODE_ERTM:
2229 case L2CAP_MODE_STREAMING:
2230 /* Check outgoing MTU */
2231 if (len > chan->omtu) {
2232 err = -EMSGSIZE;
2233 break;
2234 }
2235
2236 __skb_queue_head_init(&seg_queue);
2237
2238 /* Do segmentation before calling in to the state machine,
2239 * since it's possible to block while waiting for memory
2240 * allocation.
2241 */
2242 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2243
2244 /* The channel could have been closed while segmenting,
2245 * check that it is still connected.
2246 */
2247 if (chan->state != BT_CONNECTED) {
2248 __skb_queue_purge(&seg_queue);
2249 err = -ENOTCONN;
2250 }
2251
2252 if (err)
2253 break;
2254
2255 if (chan->mode == L2CAP_MODE_ERTM)
2256 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2257 else
2258 l2cap_streaming_send(chan, &seg_queue);
2259
2260 err = len;
2261
2262 /* If the skbs were not queued for sending, they'll still be in
2263 * seg_queue and need to be purged.
2264 */
2265 __skb_queue_purge(&seg_queue);
2266 break;
2267
2268 default:
2269 BT_DBG("bad state %1.1x", chan->mode);
2270 err = -EBADFD;
2271 }
2272
2273 return err;
2274 }
2275
2276 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2277 {
2278 struct l2cap_ctrl control;
2279 u16 seq;
2280
2281 BT_DBG("chan %p, txseq %u", chan, txseq);
2282
2283 memset(&control, 0, sizeof(control));
2284 control.sframe = 1;
2285 control.super = L2CAP_SUPER_SREJ;
2286
2287 for (seq = chan->expected_tx_seq; seq != txseq;
2288 seq = __next_seq(chan, seq)) {
2289 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2290 control.reqseq = seq;
2291 l2cap_send_sframe(chan, &control);
2292 l2cap_seq_list_append(&chan->srej_list, seq);
2293 }
2294 }
2295
2296 chan->expected_tx_seq = __next_seq(chan, txseq);
2297 }
2298
2299 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2300 {
2301 struct l2cap_ctrl control;
2302
2303 BT_DBG("chan %p", chan);
2304
2305 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2306 return;
2307
2308 memset(&control, 0, sizeof(control));
2309 control.sframe = 1;
2310 control.super = L2CAP_SUPER_SREJ;
2311 control.reqseq = chan->srej_list.tail;
2312 l2cap_send_sframe(chan, &control);
2313 }
2314
2315 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2316 {
2317 struct l2cap_ctrl control;
2318 u16 initial_head;
2319 u16 seq;
2320
2321 BT_DBG("chan %p, txseq %u", chan, txseq);
2322
2323 memset(&control, 0, sizeof(control));
2324 control.sframe = 1;
2325 control.super = L2CAP_SUPER_SREJ;
2326
2327 /* Capture initial list head to allow only one pass through the list. */
2328 initial_head = chan->srej_list.head;
2329
2330 do {
2331 seq = l2cap_seq_list_pop(&chan->srej_list);
2332 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2333 break;
2334
2335 control.reqseq = seq;
2336 l2cap_send_sframe(chan, &control);
2337 l2cap_seq_list_append(&chan->srej_list, seq);
2338 } while (chan->srej_list.head != initial_head);
2339 }
2340
2341 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2342 {
2343 struct sk_buff *acked_skb;
2344 u16 ackseq;
2345
2346 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2347
2348 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2349 return;
2350
2351 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2352 chan->expected_ack_seq, chan->unacked_frames);
2353
2354 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2355 ackseq = __next_seq(chan, ackseq)) {
2356
2357 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2358 if (acked_skb) {
2359 skb_unlink(acked_skb, &chan->tx_q);
2360 kfree_skb(acked_skb);
2361 chan->unacked_frames--;
2362 }
2363 }
2364
2365 chan->expected_ack_seq = reqseq;
2366
2367 if (chan->unacked_frames == 0)
2368 __clear_retrans_timer(chan);
2369
2370 BT_DBG("unacked_frames %u", chan->unacked_frames);
2371 }
2372
2373 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2374 {
2375 BT_DBG("chan %p", chan);
2376
2377 chan->expected_tx_seq = chan->buffer_seq;
2378 l2cap_seq_list_clear(&chan->srej_list);
2379 skb_queue_purge(&chan->srej_q);
2380 chan->rx_state = L2CAP_RX_STATE_RECV;
2381 }
2382
2383 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2384 struct l2cap_ctrl *control,
2385 struct sk_buff_head *skbs, u8 event)
2386 {
2387 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2388 event);
2389
2390 switch (event) {
2391 case L2CAP_EV_DATA_REQUEST:
2392 if (chan->tx_send_head == NULL)
2393 chan->tx_send_head = skb_peek(skbs);
2394
2395 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2396 l2cap_ertm_send(chan);
2397 break;
2398 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2399 BT_DBG("Enter LOCAL_BUSY");
2400 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2401
2402 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2403 /* The SREJ_SENT state must be aborted if we are to
2404 * enter the LOCAL_BUSY state.
2405 */
2406 l2cap_abort_rx_srej_sent(chan);
2407 }
2408
2409 l2cap_send_ack(chan);
2410
2411 break;
2412 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2413 BT_DBG("Exit LOCAL_BUSY");
2414 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2415
2416 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2417 struct l2cap_ctrl local_control;
2418
2419 memset(&local_control, 0, sizeof(local_control));
2420 local_control.sframe = 1;
2421 local_control.super = L2CAP_SUPER_RR;
2422 local_control.poll = 1;
2423 local_control.reqseq = chan->buffer_seq;
2424 l2cap_send_sframe(chan, &local_control);
2425
2426 chan->retry_count = 1;
2427 __set_monitor_timer(chan);
2428 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2429 }
2430 break;
2431 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2432 l2cap_process_reqseq(chan, control->reqseq);
2433 break;
2434 case L2CAP_EV_EXPLICIT_POLL:
2435 l2cap_send_rr_or_rnr(chan, 1);
2436 chan->retry_count = 1;
2437 __set_monitor_timer(chan);
2438 __clear_ack_timer(chan);
2439 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2440 break;
2441 case L2CAP_EV_RETRANS_TO:
2442 l2cap_send_rr_or_rnr(chan, 1);
2443 chan->retry_count = 1;
2444 __set_monitor_timer(chan);
2445 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2446 break;
2447 case L2CAP_EV_RECV_FBIT:
2448 /* Nothing to process */
2449 break;
2450 default:
2451 break;
2452 }
2453 }
2454
2455 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2456 struct l2cap_ctrl *control,
2457 struct sk_buff_head *skbs, u8 event)
2458 {
2459 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2460 event);
2461
2462 switch (event) {
2463 case L2CAP_EV_DATA_REQUEST:
2464 if (chan->tx_send_head == NULL)
2465 chan->tx_send_head = skb_peek(skbs);
2466 /* Queue data, but don't send. */
2467 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2468 break;
2469 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2470 BT_DBG("Enter LOCAL_BUSY");
2471 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2472
2473 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2474 /* The SREJ_SENT state must be aborted if we are to
2475 * enter the LOCAL_BUSY state.
2476 */
2477 l2cap_abort_rx_srej_sent(chan);
2478 }
2479
2480 l2cap_send_ack(chan);
2481
2482 break;
2483 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2484 BT_DBG("Exit LOCAL_BUSY");
2485 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2486
2487 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2488 struct l2cap_ctrl local_control;
2489 memset(&local_control, 0, sizeof(local_control));
2490 local_control.sframe = 1;
2491 local_control.super = L2CAP_SUPER_RR;
2492 local_control.poll = 1;
2493 local_control.reqseq = chan->buffer_seq;
2494 l2cap_send_sframe(chan, &local_control);
2495
2496 chan->retry_count = 1;
2497 __set_monitor_timer(chan);
2498 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2499 }
2500 break;
2501 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2502 l2cap_process_reqseq(chan, control->reqseq);
2503
2504 /* Fall through */
2505
2506 case L2CAP_EV_RECV_FBIT:
2507 if (control && control->final) {
2508 __clear_monitor_timer(chan);
2509 if (chan->unacked_frames > 0)
2510 __set_retrans_timer(chan);
2511 chan->retry_count = 0;
2512 chan->tx_state = L2CAP_TX_STATE_XMIT;
2513 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2514 }
2515 break;
2516 case L2CAP_EV_EXPLICIT_POLL:
2517 /* Ignore */
2518 break;
2519 case L2CAP_EV_MONITOR_TO:
2520 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2521 l2cap_send_rr_or_rnr(chan, 1);
2522 __set_monitor_timer(chan);
2523 chan->retry_count++;
2524 } else {
2525 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2526 }
2527 break;
2528 default:
2529 break;
2530 }
2531 }
2532
2533 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2534 struct sk_buff_head *skbs, u8 event)
2535 {
2536 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2537 chan, control, skbs, event, chan->tx_state);
2538
2539 switch (chan->tx_state) {
2540 case L2CAP_TX_STATE_XMIT:
2541 l2cap_tx_state_xmit(chan, control, skbs, event);
2542 break;
2543 case L2CAP_TX_STATE_WAIT_F:
2544 l2cap_tx_state_wait_f(chan, control, skbs, event);
2545 break;
2546 default:
2547 /* Ignore event */
2548 break;
2549 }
2550 }
2551
2552 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2553 struct l2cap_ctrl *control)
2554 {
2555 BT_DBG("chan %p, control %p", chan, control);
2556 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2557 }
2558
2559 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2560 struct l2cap_ctrl *control)
2561 {
2562 BT_DBG("chan %p, control %p", chan, control);
2563 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2564 }
2565
2566 /* Copy frame to all raw sockets on that connection */
2567 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2568 {
2569 struct sk_buff *nskb;
2570 struct l2cap_chan *chan;
2571
2572 BT_DBG("conn %p", conn);
2573
2574 mutex_lock(&conn->chan_lock);
2575
2576 list_for_each_entry(chan, &conn->chan_l, list) {
2577 struct sock *sk = chan->sk;
2578 if (chan->chan_type != L2CAP_CHAN_RAW)
2579 continue;
2580
2581 /* Don't send frame to the socket it came from */
2582 if (skb->sk == sk)
2583 continue;
2584 nskb = skb_clone(skb, GFP_KERNEL);
2585 if (!nskb)
2586 continue;
2587
2588 if (chan->ops->recv(chan, nskb))
2589 kfree_skb(nskb);
2590 }
2591
2592 mutex_unlock(&conn->chan_lock);
2593 }
2594
2595 /* ---- L2CAP signalling commands ---- */
2596 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2597 u8 ident, u16 dlen, void *data)
2598 {
2599 struct sk_buff *skb, **frag;
2600 struct l2cap_cmd_hdr *cmd;
2601 struct l2cap_hdr *lh;
2602 int len, count;
2603
2604 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2605 conn, code, ident, dlen);
2606
2607 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2608 count = min_t(unsigned int, conn->mtu, len);
2609
2610 skb = bt_skb_alloc(count, GFP_KERNEL);
2611 if (!skb)
2612 return NULL;
2613
2614 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2615 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2616
2617 if (conn->hcon->type == LE_LINK)
2618 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2619 else
2620 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2621
2622 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2623 cmd->code = code;
2624 cmd->ident = ident;
2625 cmd->len = cpu_to_le16(dlen);
2626
2627 if (dlen) {
2628 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2629 memcpy(skb_put(skb, count), data, count);
2630 data += count;
2631 }
2632
2633 len -= skb->len;
2634
2635 /* Continuation fragments (no L2CAP header) */
2636 frag = &skb_shinfo(skb)->frag_list;
2637 while (len) {
2638 count = min_t(unsigned int, conn->mtu, len);
2639
2640 *frag = bt_skb_alloc(count, GFP_KERNEL);
2641 if (!*frag)
2642 goto fail;
2643
2644 memcpy(skb_put(*frag, count), data, count);
2645
2646 len -= count;
2647 data += count;
2648
2649 frag = &(*frag)->next;
2650 }
2651
2652 return skb;
2653
2654 fail:
2655 kfree_skb(skb);
2656 return NULL;
2657 }
2658
2659 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2660 unsigned long *val)
2661 {
2662 struct l2cap_conf_opt *opt = *ptr;
2663 int len;
2664
2665 len = L2CAP_CONF_OPT_SIZE + opt->len;
2666 *ptr += len;
2667
2668 *type = opt->type;
2669 *olen = opt->len;
2670
2671 switch (opt->len) {
2672 case 1:
2673 *val = *((u8 *) opt->val);
2674 break;
2675
2676 case 2:
2677 *val = get_unaligned_le16(opt->val);
2678 break;
2679
2680 case 4:
2681 *val = get_unaligned_le32(opt->val);
2682 break;
2683
2684 default:
2685 *val = (unsigned long) opt->val;
2686 break;
2687 }
2688
2689 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2690 return len;
2691 }
2692
2693 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2694 {
2695 struct l2cap_conf_opt *opt = *ptr;
2696
2697 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2698
2699 opt->type = type;
2700 opt->len = len;
2701
2702 switch (len) {
2703 case 1:
2704 *((u8 *) opt->val) = val;
2705 break;
2706
2707 case 2:
2708 put_unaligned_le16(val, opt->val);
2709 break;
2710
2711 case 4:
2712 put_unaligned_le32(val, opt->val);
2713 break;
2714
2715 default:
2716 memcpy(opt->val, (void *) val, len);
2717 break;
2718 }
2719
2720 *ptr += L2CAP_CONF_OPT_SIZE + len;
2721 }
2722
2723 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2724 {
2725 struct l2cap_conf_efs efs;
2726
2727 switch (chan->mode) {
2728 case L2CAP_MODE_ERTM:
2729 efs.id = chan->local_id;
2730 efs.stype = chan->local_stype;
2731 efs.msdu = cpu_to_le16(chan->local_msdu);
2732 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2733 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2734 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2735 break;
2736
2737 case L2CAP_MODE_STREAMING:
2738 efs.id = 1;
2739 efs.stype = L2CAP_SERV_BESTEFFORT;
2740 efs.msdu = cpu_to_le16(chan->local_msdu);
2741 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2742 efs.acc_lat = 0;
2743 efs.flush_to = 0;
2744 break;
2745
2746 default:
2747 return;
2748 }
2749
2750 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2751 (unsigned long) &efs);
2752 }
2753
2754 static void l2cap_ack_timeout(struct work_struct *work)
2755 {
2756 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2757 ack_timer.work);
2758 u16 frames_to_ack;
2759
2760 BT_DBG("chan %p", chan);
2761
2762 l2cap_chan_lock(chan);
2763
2764 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2765 chan->last_acked_seq);
2766
2767 if (frames_to_ack)
2768 l2cap_send_rr_or_rnr(chan, 0);
2769
2770 l2cap_chan_unlock(chan);
2771 l2cap_chan_put(chan);
2772 }
2773
2774 int l2cap_ertm_init(struct l2cap_chan *chan)
2775 {
2776 int err;
2777
2778 chan->next_tx_seq = 0;
2779 chan->expected_tx_seq = 0;
2780 chan->expected_ack_seq = 0;
2781 chan->unacked_frames = 0;
2782 chan->buffer_seq = 0;
2783 chan->frames_sent = 0;
2784 chan->last_acked_seq = 0;
2785 chan->sdu = NULL;
2786 chan->sdu_last_frag = NULL;
2787 chan->sdu_len = 0;
2788
2789 skb_queue_head_init(&chan->tx_q);
2790
2791 chan->local_amp_id = 0;
2792 chan->move_id = 0;
2793 chan->move_state = L2CAP_MOVE_STABLE;
2794 chan->move_role = L2CAP_MOVE_ROLE_NONE;
2795
2796 if (chan->mode != L2CAP_MODE_ERTM)
2797 return 0;
2798
2799 chan->rx_state = L2CAP_RX_STATE_RECV;
2800 chan->tx_state = L2CAP_TX_STATE_XMIT;
2801
2802 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2803 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2804 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2805
2806 skb_queue_head_init(&chan->srej_q);
2807
2808 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2809 if (err < 0)
2810 return err;
2811
2812 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2813 if (err < 0)
2814 l2cap_seq_list_free(&chan->srej_list);
2815
2816 return err;
2817 }
2818
2819 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2820 {
2821 switch (mode) {
2822 case L2CAP_MODE_STREAMING:
2823 case L2CAP_MODE_ERTM:
2824 if (l2cap_mode_supported(mode, remote_feat_mask))
2825 return mode;
2826 /* fall through */
2827 default:
2828 return L2CAP_MODE_BASIC;
2829 }
2830 }
2831
2832 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2833 {
2834 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2835 }
2836
2837 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2838 {
2839 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2840 }
2841
2842 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2843 {
2844 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2845 __l2cap_ews_supported(chan)) {
2846 /* use extended control field */
2847 set_bit(FLAG_EXT_CTRL, &chan->flags);
2848 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2849 } else {
2850 chan->tx_win = min_t(u16, chan->tx_win,
2851 L2CAP_DEFAULT_TX_WINDOW);
2852 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2853 }
2854 chan->ack_win = chan->tx_win;
2855 }
2856
2857 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2858 {
2859 struct l2cap_conf_req *req = data;
2860 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2861 void *ptr = req->data;
2862 u16 size;
2863
2864 BT_DBG("chan %p", chan);
2865
2866 if (chan->num_conf_req || chan->num_conf_rsp)
2867 goto done;
2868
2869 switch (chan->mode) {
2870 case L2CAP_MODE_STREAMING:
2871 case L2CAP_MODE_ERTM:
2872 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2873 break;
2874
2875 if (__l2cap_efs_supported(chan))
2876 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2877
2878 /* fall through */
2879 default:
2880 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2881 break;
2882 }
2883
2884 done:
2885 if (chan->imtu != L2CAP_DEFAULT_MTU)
2886 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2887
2888 switch (chan->mode) {
2889 case L2CAP_MODE_BASIC:
2890 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2891 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2892 break;
2893
2894 rfc.mode = L2CAP_MODE_BASIC;
2895 rfc.txwin_size = 0;
2896 rfc.max_transmit = 0;
2897 rfc.retrans_timeout = 0;
2898 rfc.monitor_timeout = 0;
2899 rfc.max_pdu_size = 0;
2900
2901 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2902 (unsigned long) &rfc);
2903 break;
2904
2905 case L2CAP_MODE_ERTM:
2906 rfc.mode = L2CAP_MODE_ERTM;
2907 rfc.max_transmit = chan->max_tx;
2908 rfc.retrans_timeout = 0;
2909 rfc.monitor_timeout = 0;
2910
2911 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2912 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
2913 L2CAP_FCS_SIZE);
2914 rfc.max_pdu_size = cpu_to_le16(size);
2915
2916 l2cap_txwin_setup(chan);
2917
2918 rfc.txwin_size = min_t(u16, chan->tx_win,
2919 L2CAP_DEFAULT_TX_WINDOW);
2920
2921 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2922 (unsigned long) &rfc);
2923
2924 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2925 l2cap_add_opt_efs(&ptr, chan);
2926
2927 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2928 break;
2929
2930 if (chan->fcs == L2CAP_FCS_NONE ||
2931 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2932 chan->fcs = L2CAP_FCS_NONE;
2933 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2934 }
2935
2936 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2937 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2938 chan->tx_win);
2939 break;
2940
2941 case L2CAP_MODE_STREAMING:
2942 l2cap_txwin_setup(chan);
2943 rfc.mode = L2CAP_MODE_STREAMING;
2944 rfc.txwin_size = 0;
2945 rfc.max_transmit = 0;
2946 rfc.retrans_timeout = 0;
2947 rfc.monitor_timeout = 0;
2948
2949 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2950 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
2951 L2CAP_FCS_SIZE);
2952 rfc.max_pdu_size = cpu_to_le16(size);
2953
2954 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2955 (unsigned long) &rfc);
2956
2957 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2958 l2cap_add_opt_efs(&ptr, chan);
2959
2960 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2961 break;
2962
2963 if (chan->fcs == L2CAP_FCS_NONE ||
2964 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2965 chan->fcs = L2CAP_FCS_NONE;
2966 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2967 }
2968 break;
2969 }
2970
2971 req->dcid = cpu_to_le16(chan->dcid);
2972 req->flags = __constant_cpu_to_le16(0);
2973
2974 return ptr - data;
2975 }
2976
2977 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2978 {
2979 struct l2cap_conf_rsp *rsp = data;
2980 void *ptr = rsp->data;
2981 void *req = chan->conf_req;
2982 int len = chan->conf_len;
2983 int type, hint, olen;
2984 unsigned long val;
2985 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2986 struct l2cap_conf_efs efs;
2987 u8 remote_efs = 0;
2988 u16 mtu = L2CAP_DEFAULT_MTU;
2989 u16 result = L2CAP_CONF_SUCCESS;
2990 u16 size;
2991
2992 BT_DBG("chan %p", chan);
2993
2994 while (len >= L2CAP_CONF_OPT_SIZE) {
2995 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2996
2997 hint = type & L2CAP_CONF_HINT;
2998 type &= L2CAP_CONF_MASK;
2999
3000 switch (type) {
3001 case L2CAP_CONF_MTU:
3002 mtu = val;
3003 break;
3004
3005 case L2CAP_CONF_FLUSH_TO:
3006 chan->flush_to = val;
3007 break;
3008
3009 case L2CAP_CONF_QOS:
3010 break;
3011
3012 case L2CAP_CONF_RFC:
3013 if (olen == sizeof(rfc))
3014 memcpy(&rfc, (void *) val, olen);
3015 break;
3016
3017 case L2CAP_CONF_FCS:
3018 if (val == L2CAP_FCS_NONE)
3019 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
3020 break;
3021
3022 case L2CAP_CONF_EFS:
3023 remote_efs = 1;
3024 if (olen == sizeof(efs))
3025 memcpy(&efs, (void *) val, olen);
3026 break;
3027
3028 case L2CAP_CONF_EWS:
3029 if (!enable_hs)
3030 return -ECONNREFUSED;
3031
3032 set_bit(FLAG_EXT_CTRL, &chan->flags);
3033 set_bit(CONF_EWS_RECV, &chan->conf_state);
3034 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3035 chan->remote_tx_win = val;
3036 break;
3037
3038 default:
3039 if (hint)
3040 break;
3041
3042 result = L2CAP_CONF_UNKNOWN;
3043 *((u8 *) ptr++) = type;
3044 break;
3045 }
3046 }
3047
3048 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3049 goto done;
3050
3051 switch (chan->mode) {
3052 case L2CAP_MODE_STREAMING:
3053 case L2CAP_MODE_ERTM:
3054 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3055 chan->mode = l2cap_select_mode(rfc.mode,
3056 chan->conn->feat_mask);
3057 break;
3058 }
3059
3060 if (remote_efs) {
3061 if (__l2cap_efs_supported(chan))
3062 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3063 else
3064 return -ECONNREFUSED;
3065 }
3066
3067 if (chan->mode != rfc.mode)
3068 return -ECONNREFUSED;
3069
3070 break;
3071 }
3072
3073 done:
3074 if (chan->mode != rfc.mode) {
3075 result = L2CAP_CONF_UNACCEPT;
3076 rfc.mode = chan->mode;
3077
3078 if (chan->num_conf_rsp == 1)
3079 return -ECONNREFUSED;
3080
3081 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3082 (unsigned long) &rfc);
3083 }
3084
3085 if (result == L2CAP_CONF_SUCCESS) {
3086 /* Configure output options and let the other side know
3087 * which ones we don't like. */
3088
3089 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3090 result = L2CAP_CONF_UNACCEPT;
3091 else {
3092 chan->omtu = mtu;
3093 set_bit(CONF_MTU_DONE, &chan->conf_state);
3094 }
3095 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3096
3097 if (remote_efs) {
3098 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3099 efs.stype != L2CAP_SERV_NOTRAFIC &&
3100 efs.stype != chan->local_stype) {
3101
3102 result = L2CAP_CONF_UNACCEPT;
3103
3104 if (chan->num_conf_req >= 1)
3105 return -ECONNREFUSED;
3106
3107 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3108 sizeof(efs),
3109 (unsigned long) &efs);
3110 } else {
3111 /* Send PENDING Conf Rsp */
3112 result = L2CAP_CONF_PENDING;
3113 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3114 }
3115 }
3116
3117 switch (rfc.mode) {
3118 case L2CAP_MODE_BASIC:
3119 chan->fcs = L2CAP_FCS_NONE;
3120 set_bit(CONF_MODE_DONE, &chan->conf_state);
3121 break;
3122
3123 case L2CAP_MODE_ERTM:
3124 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3125 chan->remote_tx_win = rfc.txwin_size;
3126 else
3127 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3128
3129 chan->remote_max_tx = rfc.max_transmit;
3130
3131 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3132 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3133 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3134 rfc.max_pdu_size = cpu_to_le16(size);
3135 chan->remote_mps = size;
3136
3137 rfc.retrans_timeout =
3138 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3139 rfc.monitor_timeout =
3140 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3141
3142 set_bit(CONF_MODE_DONE, &chan->conf_state);
3143
3144 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3145 sizeof(rfc), (unsigned long) &rfc);
3146
3147 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3148 chan->remote_id = efs.id;
3149 chan->remote_stype = efs.stype;
3150 chan->remote_msdu = le16_to_cpu(efs.msdu);
3151 chan->remote_flush_to =
3152 le32_to_cpu(efs.flush_to);
3153 chan->remote_acc_lat =
3154 le32_to_cpu(efs.acc_lat);
3155 chan->remote_sdu_itime =
3156 le32_to_cpu(efs.sdu_itime);
3157 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3158 sizeof(efs),
3159 (unsigned long) &efs);
3160 }
3161 break;
3162
3163 case L2CAP_MODE_STREAMING:
3164 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3165 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3166 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3167 rfc.max_pdu_size = cpu_to_le16(size);
3168 chan->remote_mps = size;
3169
3170 set_bit(CONF_MODE_DONE, &chan->conf_state);
3171
3172 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3173 (unsigned long) &rfc);
3174
3175 break;
3176
3177 default:
3178 result = L2CAP_CONF_UNACCEPT;
3179
3180 memset(&rfc, 0, sizeof(rfc));
3181 rfc.mode = chan->mode;
3182 }
3183
3184 if (result == L2CAP_CONF_SUCCESS)
3185 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3186 }
3187 rsp->scid = cpu_to_le16(chan->dcid);
3188 rsp->result = cpu_to_le16(result);
3189 rsp->flags = __constant_cpu_to_le16(0);
3190
3191 return ptr - data;
3192 }
3193
3194 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3195 void *data, u16 *result)
3196 {
3197 struct l2cap_conf_req *req = data;
3198 void *ptr = req->data;
3199 int type, olen;
3200 unsigned long val;
3201 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3202 struct l2cap_conf_efs efs;
3203
3204 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3205
3206 while (len >= L2CAP_CONF_OPT_SIZE) {
3207 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3208
3209 switch (type) {
3210 case L2CAP_CONF_MTU:
3211 if (val < L2CAP_DEFAULT_MIN_MTU) {
3212 *result = L2CAP_CONF_UNACCEPT;
3213 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3214 } else
3215 chan->imtu = val;
3216 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3217 break;
3218
3219 case L2CAP_CONF_FLUSH_TO:
3220 chan->flush_to = val;
3221 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3222 2, chan->flush_to);
3223 break;
3224
3225 case L2CAP_CONF_RFC:
3226 if (olen == sizeof(rfc))
3227 memcpy(&rfc, (void *)val, olen);
3228
3229 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3230 rfc.mode != chan->mode)
3231 return -ECONNREFUSED;
3232
3233 chan->fcs = 0;
3234
3235 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3236 sizeof(rfc), (unsigned long) &rfc);
3237 break;
3238
3239 case L2CAP_CONF_EWS:
3240 chan->ack_win = min_t(u16, val, chan->ack_win);
3241 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3242 chan->tx_win);
3243 break;
3244
3245 case L2CAP_CONF_EFS:
3246 if (olen == sizeof(efs))
3247 memcpy(&efs, (void *)val, olen);
3248
3249 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3250 efs.stype != L2CAP_SERV_NOTRAFIC &&
3251 efs.stype != chan->local_stype)
3252 return -ECONNREFUSED;
3253
3254 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3255 (unsigned long) &efs);
3256 break;
3257 }
3258 }
3259
3260 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3261 return -ECONNREFUSED;
3262
3263 chan->mode = rfc.mode;
3264
3265 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3266 switch (rfc.mode) {
3267 case L2CAP_MODE_ERTM:
3268 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3269 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3270 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3271 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3272 chan->ack_win = min_t(u16, chan->ack_win,
3273 rfc.txwin_size);
3274
3275 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3276 chan->local_msdu = le16_to_cpu(efs.msdu);
3277 chan->local_sdu_itime =
3278 le32_to_cpu(efs.sdu_itime);
3279 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3280 chan->local_flush_to =
3281 le32_to_cpu(efs.flush_to);
3282 }
3283 break;
3284
3285 case L2CAP_MODE_STREAMING:
3286 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3287 }
3288 }
3289
3290 req->dcid = cpu_to_le16(chan->dcid);
3291 req->flags = __constant_cpu_to_le16(0);
3292
3293 return ptr - data;
3294 }
3295
3296 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3297 u16 result, u16 flags)
3298 {
3299 struct l2cap_conf_rsp *rsp = data;
3300 void *ptr = rsp->data;
3301
3302 BT_DBG("chan %p", chan);
3303
3304 rsp->scid = cpu_to_le16(chan->dcid);
3305 rsp->result = cpu_to_le16(result);
3306 rsp->flags = cpu_to_le16(flags);
3307
3308 return ptr - data;
3309 }
3310
3311 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3312 {
3313 struct l2cap_conn_rsp rsp;
3314 struct l2cap_conn *conn = chan->conn;
3315 u8 buf[128];
3316
3317 rsp.scid = cpu_to_le16(chan->dcid);
3318 rsp.dcid = cpu_to_le16(chan->scid);
3319 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3320 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3321 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3322
3323 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3324 return;
3325
3326 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3327 l2cap_build_conf_req(chan, buf), buf);
3328 chan->num_conf_req++;
3329 }
3330
3331 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3332 {
3333 int type, olen;
3334 unsigned long val;
3335 /* Use sane default values in case a misbehaving remote device
3336 * did not send an RFC or extended window size option.
3337 */
3338 u16 txwin_ext = chan->ack_win;
3339 struct l2cap_conf_rfc rfc = {
3340 .mode = chan->mode,
3341 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3342 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3343 .max_pdu_size = cpu_to_le16(chan->imtu),
3344 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3345 };
3346
3347 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3348
3349 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3350 return;
3351
3352 while (len >= L2CAP_CONF_OPT_SIZE) {
3353 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3354
3355 switch (type) {
3356 case L2CAP_CONF_RFC:
3357 if (olen == sizeof(rfc))
3358 memcpy(&rfc, (void *)val, olen);
3359 break;
3360 case L2CAP_CONF_EWS:
3361 txwin_ext = val;
3362 break;
3363 }
3364 }
3365
3366 switch (rfc.mode) {
3367 case L2CAP_MODE_ERTM:
3368 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3369 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3370 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3371 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3372 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3373 else
3374 chan->ack_win = min_t(u16, chan->ack_win,
3375 rfc.txwin_size);
3376 break;
3377 case L2CAP_MODE_STREAMING:
3378 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3379 }
3380 }
3381
3382 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3383 struct l2cap_cmd_hdr *cmd, u8 *data)
3384 {
3385 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3386
3387 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3388 return 0;
3389
3390 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3391 cmd->ident == conn->info_ident) {
3392 cancel_delayed_work(&conn->info_timer);
3393
3394 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3395 conn->info_ident = 0;
3396
3397 l2cap_conn_start(conn);
3398 }
3399
3400 return 0;
3401 }
3402
3403 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3404 struct l2cap_cmd_hdr *cmd,
3405 u8 *data, u8 rsp_code, u8 amp_id)
3406 {
3407 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3408 struct l2cap_conn_rsp rsp;
3409 struct l2cap_chan *chan = NULL, *pchan;
3410 struct sock *parent, *sk = NULL;
3411 int result, status = L2CAP_CS_NO_INFO;
3412
3413 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3414 __le16 psm = req->psm;
3415
3416 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3417
3418 /* Check if we have socket listening on psm */
3419 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3420 if (!pchan) {
3421 result = L2CAP_CR_BAD_PSM;
3422 goto sendresp;
3423 }
3424
3425 parent = pchan->sk;
3426
3427 mutex_lock(&conn->chan_lock);
3428 lock_sock(parent);
3429
3430 /* Check if the ACL is secure enough (if not SDP) */
3431 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3432 !hci_conn_check_link_mode(conn->hcon)) {
3433 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3434 result = L2CAP_CR_SEC_BLOCK;
3435 goto response;
3436 }
3437
3438 result = L2CAP_CR_NO_MEM;
3439
3440 /* Check if we already have channel with that dcid */
3441 if (__l2cap_get_chan_by_dcid(conn, scid))
3442 goto response;
3443
3444 chan = pchan->ops->new_connection(pchan);
3445 if (!chan)
3446 goto response;
3447
3448 sk = chan->sk;
3449
3450 hci_conn_hold(conn->hcon);
3451
3452 bacpy(&bt_sk(sk)->src, conn->src);
3453 bacpy(&bt_sk(sk)->dst, conn->dst);
3454 chan->psm = psm;
3455 chan->dcid = scid;
3456 chan->local_amp_id = amp_id;
3457
3458 __l2cap_chan_add(conn, chan);
3459
3460 dcid = chan->scid;
3461
3462 __set_chan_timer(chan, sk->sk_sndtimeo);
3463
3464 chan->ident = cmd->ident;
3465
3466 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3467 if (l2cap_chan_check_security(chan)) {
3468 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3469 __l2cap_state_change(chan, BT_CONNECT2);
3470 result = L2CAP_CR_PEND;
3471 status = L2CAP_CS_AUTHOR_PEND;
3472 chan->ops->defer(chan);
3473 } else {
3474 /* Force pending result for AMP controllers.
3475 * The connection will succeed after the
3476 * physical link is up.
3477 */
3478 if (amp_id) {
3479 __l2cap_state_change(chan, BT_CONNECT2);
3480 result = L2CAP_CR_PEND;
3481 } else {
3482 __l2cap_state_change(chan, BT_CONFIG);
3483 result = L2CAP_CR_SUCCESS;
3484 }
3485 status = L2CAP_CS_NO_INFO;
3486 }
3487 } else {
3488 __l2cap_state_change(chan, BT_CONNECT2);
3489 result = L2CAP_CR_PEND;
3490 status = L2CAP_CS_AUTHEN_PEND;
3491 }
3492 } else {
3493 __l2cap_state_change(chan, BT_CONNECT2);
3494 result = L2CAP_CR_PEND;
3495 status = L2CAP_CS_NO_INFO;
3496 }
3497
3498 response:
3499 release_sock(parent);
3500 mutex_unlock(&conn->chan_lock);
3501
3502 sendresp:
3503 rsp.scid = cpu_to_le16(scid);
3504 rsp.dcid = cpu_to_le16(dcid);
3505 rsp.result = cpu_to_le16(result);
3506 rsp.status = cpu_to_le16(status);
3507 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3508
3509 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3510 struct l2cap_info_req info;
3511 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3512
3513 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3514 conn->info_ident = l2cap_get_ident(conn);
3515
3516 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3517
3518 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3519 sizeof(info), &info);
3520 }
3521
3522 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3523 result == L2CAP_CR_SUCCESS) {
3524 u8 buf[128];
3525 set_bit(CONF_REQ_SENT, &chan->conf_state);
3526 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3527 l2cap_build_conf_req(chan, buf), buf);
3528 chan->num_conf_req++;
3529 }
3530
3531 return chan;
3532 }
3533
3534 static int l2cap_connect_req(struct l2cap_conn *conn,
3535 struct l2cap_cmd_hdr *cmd, u8 *data)
3536 {
3537 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3538 return 0;
3539 }
3540
3541 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3542 struct l2cap_cmd_hdr *cmd, u8 *data)
3543 {
3544 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3545 u16 scid, dcid, result, status;
3546 struct l2cap_chan *chan;
3547 u8 req[128];
3548 int err;
3549
3550 scid = __le16_to_cpu(rsp->scid);
3551 dcid = __le16_to_cpu(rsp->dcid);
3552 result = __le16_to_cpu(rsp->result);
3553 status = __le16_to_cpu(rsp->status);
3554
3555 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3556 dcid, scid, result, status);
3557
3558 mutex_lock(&conn->chan_lock);
3559
3560 if (scid) {
3561 chan = __l2cap_get_chan_by_scid(conn, scid);
3562 if (!chan) {
3563 err = -EFAULT;
3564 goto unlock;
3565 }
3566 } else {
3567 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3568 if (!chan) {
3569 err = -EFAULT;
3570 goto unlock;
3571 }
3572 }
3573
3574 err = 0;
3575
3576 l2cap_chan_lock(chan);
3577
3578 switch (result) {
3579 case L2CAP_CR_SUCCESS:
3580 l2cap_state_change(chan, BT_CONFIG);
3581 chan->ident = 0;
3582 chan->dcid = dcid;
3583 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3584
3585 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3586 break;
3587
3588 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3589 l2cap_build_conf_req(chan, req), req);
3590 chan->num_conf_req++;
3591 break;
3592
3593 case L2CAP_CR_PEND:
3594 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3595 break;
3596
3597 default:
3598 l2cap_chan_del(chan, ECONNREFUSED);
3599 break;
3600 }
3601
3602 l2cap_chan_unlock(chan);
3603
3604 unlock:
3605 mutex_unlock(&conn->chan_lock);
3606
3607 return err;
3608 }
3609
3610 static inline void set_default_fcs(struct l2cap_chan *chan)
3611 {
3612 /* FCS is enabled only in ERTM or streaming mode, if one or both
3613 * sides request it.
3614 */
3615 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3616 chan->fcs = L2CAP_FCS_NONE;
3617 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3618 chan->fcs = L2CAP_FCS_CRC16;
3619 }
3620
3621 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3622 u8 ident, u16 flags)
3623 {
3624 struct l2cap_conn *conn = chan->conn;
3625
3626 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3627 flags);
3628
3629 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3630 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3631
3632 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3633 l2cap_build_conf_rsp(chan, data,
3634 L2CAP_CONF_SUCCESS, flags), data);
3635 }
3636
3637 static inline int l2cap_config_req(struct l2cap_conn *conn,
3638 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3639 u8 *data)
3640 {
3641 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3642 u16 dcid, flags;
3643 u8 rsp[64];
3644 struct l2cap_chan *chan;
3645 int len, err = 0;
3646
3647 dcid = __le16_to_cpu(req->dcid);
3648 flags = __le16_to_cpu(req->flags);
3649
3650 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3651
3652 chan = l2cap_get_chan_by_scid(conn, dcid);
3653 if (!chan)
3654 return -ENOENT;
3655
3656 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3657 struct l2cap_cmd_rej_cid rej;
3658
3659 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3660 rej.scid = cpu_to_le16(chan->scid);
3661 rej.dcid = cpu_to_le16(chan->dcid);
3662
3663 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3664 sizeof(rej), &rej);
3665 goto unlock;
3666 }
3667
3668 /* Reject if config buffer is too small. */
3669 len = cmd_len - sizeof(*req);
3670 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3671 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3672 l2cap_build_conf_rsp(chan, rsp,
3673 L2CAP_CONF_REJECT, flags), rsp);
3674 goto unlock;
3675 }
3676
3677 /* Store config. */
3678 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3679 chan->conf_len += len;
3680
3681 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3682 /* Incomplete config. Send empty response. */
3683 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3684 l2cap_build_conf_rsp(chan, rsp,
3685 L2CAP_CONF_SUCCESS, flags), rsp);
3686 goto unlock;
3687 }
3688
3689 /* Complete config. */
3690 len = l2cap_parse_conf_req(chan, rsp);
3691 if (len < 0) {
3692 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3693 goto unlock;
3694 }
3695
3696 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3697 chan->num_conf_rsp++;
3698
3699 /* Reset config buffer. */
3700 chan->conf_len = 0;
3701
3702 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3703 goto unlock;
3704
3705 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3706 set_default_fcs(chan);
3707
3708 if (chan->mode == L2CAP_MODE_ERTM ||
3709 chan->mode == L2CAP_MODE_STREAMING)
3710 err = l2cap_ertm_init(chan);
3711
3712 if (err < 0)
3713 l2cap_send_disconn_req(chan->conn, chan, -err);
3714 else
3715 l2cap_chan_ready(chan);
3716
3717 goto unlock;
3718 }
3719
3720 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3721 u8 buf[64];
3722 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3723 l2cap_build_conf_req(chan, buf), buf);
3724 chan->num_conf_req++;
3725 }
3726
3727 /* Got Conf Rsp PENDING from remote side and asume we sent
3728 Conf Rsp PENDING in the code above */
3729 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3730 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3731
3732 /* check compatibility */
3733
3734 /* Send rsp for BR/EDR channel */
3735 if (!chan->ctrl_id)
3736 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
3737 else
3738 chan->ident = cmd->ident;
3739 }
3740
3741 unlock:
3742 l2cap_chan_unlock(chan);
3743 return err;
3744 }
3745
3746 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
3747 struct l2cap_cmd_hdr *cmd, u8 *data)
3748 {
3749 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3750 u16 scid, flags, result;
3751 struct l2cap_chan *chan;
3752 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3753 int err = 0;
3754
3755 scid = __le16_to_cpu(rsp->scid);
3756 flags = __le16_to_cpu(rsp->flags);
3757 result = __le16_to_cpu(rsp->result);
3758
3759 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3760 result, len);
3761
3762 chan = l2cap_get_chan_by_scid(conn, scid);
3763 if (!chan)
3764 return 0;
3765
3766 switch (result) {
3767 case L2CAP_CONF_SUCCESS:
3768 l2cap_conf_rfc_get(chan, rsp->data, len);
3769 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3770 break;
3771
3772 case L2CAP_CONF_PENDING:
3773 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3774
3775 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3776 char buf[64];
3777
3778 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3779 buf, &result);
3780 if (len < 0) {
3781 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3782 goto done;
3783 }
3784
3785 /* check compatibility */
3786
3787 if (!chan->ctrl_id)
3788 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
3789 0);
3790 else
3791 chan->ident = cmd->ident;
3792 }
3793 goto done;
3794
3795 case L2CAP_CONF_UNACCEPT:
3796 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3797 char req[64];
3798
3799 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3800 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3801 goto done;
3802 }
3803
3804 /* throw out any old stored conf requests */
3805 result = L2CAP_CONF_SUCCESS;
3806 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3807 req, &result);
3808 if (len < 0) {
3809 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3810 goto done;
3811 }
3812
3813 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3814 L2CAP_CONF_REQ, len, req);
3815 chan->num_conf_req++;
3816 if (result != L2CAP_CONF_SUCCESS)
3817 goto done;
3818 break;
3819 }
3820
3821 default:
3822 l2cap_chan_set_err(chan, ECONNRESET);
3823
3824 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3825 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3826 goto done;
3827 }
3828
3829 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3830 goto done;
3831
3832 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3833
3834 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3835 set_default_fcs(chan);
3836
3837 if (chan->mode == L2CAP_MODE_ERTM ||
3838 chan->mode == L2CAP_MODE_STREAMING)
3839 err = l2cap_ertm_init(chan);
3840
3841 if (err < 0)
3842 l2cap_send_disconn_req(chan->conn, chan, -err);
3843 else
3844 l2cap_chan_ready(chan);
3845 }
3846
3847 done:
3848 l2cap_chan_unlock(chan);
3849 return err;
3850 }
3851
3852 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
3853 struct l2cap_cmd_hdr *cmd, u8 *data)
3854 {
3855 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3856 struct l2cap_disconn_rsp rsp;
3857 u16 dcid, scid;
3858 struct l2cap_chan *chan;
3859 struct sock *sk;
3860
3861 scid = __le16_to_cpu(req->scid);
3862 dcid = __le16_to_cpu(req->dcid);
3863
3864 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3865
3866 mutex_lock(&conn->chan_lock);
3867
3868 chan = __l2cap_get_chan_by_scid(conn, dcid);
3869 if (!chan) {
3870 mutex_unlock(&conn->chan_lock);
3871 return 0;
3872 }
3873
3874 l2cap_chan_lock(chan);
3875
3876 sk = chan->sk;
3877
3878 rsp.dcid = cpu_to_le16(chan->scid);
3879 rsp.scid = cpu_to_le16(chan->dcid);
3880 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3881
3882 lock_sock(sk);
3883 sk->sk_shutdown = SHUTDOWN_MASK;
3884 release_sock(sk);
3885
3886 l2cap_chan_hold(chan);
3887 l2cap_chan_del(chan, ECONNRESET);
3888
3889 l2cap_chan_unlock(chan);
3890
3891 chan->ops->close(chan);
3892 l2cap_chan_put(chan);
3893
3894 mutex_unlock(&conn->chan_lock);
3895
3896 return 0;
3897 }
3898
3899 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
3900 struct l2cap_cmd_hdr *cmd, u8 *data)
3901 {
3902 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3903 u16 dcid, scid;
3904 struct l2cap_chan *chan;
3905
3906 scid = __le16_to_cpu(rsp->scid);
3907 dcid = __le16_to_cpu(rsp->dcid);
3908
3909 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3910
3911 mutex_lock(&conn->chan_lock);
3912
3913 chan = __l2cap_get_chan_by_scid(conn, scid);
3914 if (!chan) {
3915 mutex_unlock(&conn->chan_lock);
3916 return 0;
3917 }
3918
3919 l2cap_chan_lock(chan);
3920
3921 l2cap_chan_hold(chan);
3922 l2cap_chan_del(chan, 0);
3923
3924 l2cap_chan_unlock(chan);
3925
3926 chan->ops->close(chan);
3927 l2cap_chan_put(chan);
3928
3929 mutex_unlock(&conn->chan_lock);
3930
3931 return 0;
3932 }
3933
3934 static inline int l2cap_information_req(struct l2cap_conn *conn,
3935 struct l2cap_cmd_hdr *cmd, u8 *data)
3936 {
3937 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3938 u16 type;
3939
3940 type = __le16_to_cpu(req->type);
3941
3942 BT_DBG("type 0x%4.4x", type);
3943
3944 if (type == L2CAP_IT_FEAT_MASK) {
3945 u8 buf[8];
3946 u32 feat_mask = l2cap_feat_mask;
3947 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3948 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3949 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3950 if (!disable_ertm)
3951 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3952 | L2CAP_FEAT_FCS;
3953 if (enable_hs)
3954 feat_mask |= L2CAP_FEAT_EXT_FLOW
3955 | L2CAP_FEAT_EXT_WINDOW;
3956
3957 put_unaligned_le32(feat_mask, rsp->data);
3958 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
3959 buf);
3960 } else if (type == L2CAP_IT_FIXED_CHAN) {
3961 u8 buf[12];
3962 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3963
3964 if (enable_hs)
3965 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3966 else
3967 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3968
3969 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3970 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3971 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3972 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
3973 buf);
3974 } else {
3975 struct l2cap_info_rsp rsp;
3976 rsp.type = cpu_to_le16(type);
3977 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3978 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
3979 &rsp);
3980 }
3981
3982 return 0;
3983 }
3984
3985 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
3986 struct l2cap_cmd_hdr *cmd, u8 *data)
3987 {
3988 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3989 u16 type, result;
3990
3991 type = __le16_to_cpu(rsp->type);
3992 result = __le16_to_cpu(rsp->result);
3993
3994 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3995
3996 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3997 if (cmd->ident != conn->info_ident ||
3998 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3999 return 0;
4000
4001 cancel_delayed_work(&conn->info_timer);
4002
4003 if (result != L2CAP_IR_SUCCESS) {
4004 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4005 conn->info_ident = 0;
4006
4007 l2cap_conn_start(conn);
4008
4009 return 0;
4010 }
4011
4012 switch (type) {
4013 case L2CAP_IT_FEAT_MASK:
4014 conn->feat_mask = get_unaligned_le32(rsp->data);
4015
4016 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4017 struct l2cap_info_req req;
4018 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4019
4020 conn->info_ident = l2cap_get_ident(conn);
4021
4022 l2cap_send_cmd(conn, conn->info_ident,
4023 L2CAP_INFO_REQ, sizeof(req), &req);
4024 } else {
4025 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4026 conn->info_ident = 0;
4027
4028 l2cap_conn_start(conn);
4029 }
4030 break;
4031
4032 case L2CAP_IT_FIXED_CHAN:
4033 conn->fixed_chan_mask = rsp->data[0];
4034 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4035 conn->info_ident = 0;
4036
4037 l2cap_conn_start(conn);
4038 break;
4039 }
4040
4041 return 0;
4042 }
4043
4044 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4045 struct l2cap_cmd_hdr *cmd,
4046 u16 cmd_len, void *data)
4047 {
4048 struct l2cap_create_chan_req *req = data;
4049 struct l2cap_chan *chan;
4050 u16 psm, scid;
4051
4052 if (cmd_len != sizeof(*req))
4053 return -EPROTO;
4054
4055 if (!enable_hs)
4056 return -EINVAL;
4057
4058 psm = le16_to_cpu(req->psm);
4059 scid = le16_to_cpu(req->scid);
4060
4061 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4062
4063 if (req->amp_id) {
4064 struct hci_dev *hdev;
4065
4066 /* Validate AMP controller id */
4067 hdev = hci_dev_get(req->amp_id);
4068 if (!hdev || hdev->dev_type != HCI_AMP ||
4069 !test_bit(HCI_UP, &hdev->flags)) {
4070 struct l2cap_create_chan_rsp rsp;
4071
4072 rsp.dcid = 0;
4073 rsp.scid = cpu_to_le16(scid);
4074 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4075 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4076
4077 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4078 sizeof(rsp), &rsp);
4079
4080 if (hdev)
4081 hci_dev_put(hdev);
4082
4083 return 0;
4084 }
4085
4086 hci_dev_put(hdev);
4087 }
4088
4089 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4090 req->amp_id);
4091
4092 return 0;
4093 }
4094
4095 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4096 u16 icid, u16 result)
4097 {
4098 struct l2cap_move_chan_rsp rsp;
4099
4100 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4101
4102 rsp.icid = cpu_to_le16(icid);
4103 rsp.result = cpu_to_le16(result);
4104
4105 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4106 }
4107
4108 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4109 struct l2cap_chan *chan,
4110 u16 icid, u16 result)
4111 {
4112 struct l2cap_move_chan_cfm cfm;
4113 u8 ident;
4114
4115 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4116
4117 ident = l2cap_get_ident(conn);
4118 if (chan)
4119 chan->ident = ident;
4120
4121 cfm.icid = cpu_to_le16(icid);
4122 cfm.result = cpu_to_le16(result);
4123
4124 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4125 }
4126
4127 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4128 u16 icid)
4129 {
4130 struct l2cap_move_chan_cfm_rsp rsp;
4131
4132 BT_DBG("icid 0x%4.4x", icid);
4133
4134 rsp.icid = cpu_to_le16(icid);
4135 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4136 }
4137
4138 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4139 struct l2cap_cmd_hdr *cmd,
4140 u16 cmd_len, void *data)
4141 {
4142 struct l2cap_move_chan_req *req = data;
4143 u16 icid = 0;
4144 u16 result = L2CAP_MR_NOT_ALLOWED;
4145
4146 if (cmd_len != sizeof(*req))
4147 return -EPROTO;
4148
4149 icid = le16_to_cpu(req->icid);
4150
4151 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4152
4153 if (!enable_hs)
4154 return -EINVAL;
4155
4156 /* Placeholder: Always refuse */
4157 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4158
4159 return 0;
4160 }
4161
4162 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4163 struct l2cap_cmd_hdr *cmd,
4164 u16 cmd_len, void *data)
4165 {
4166 struct l2cap_move_chan_rsp *rsp = data;
4167 u16 icid, result;
4168
4169 if (cmd_len != sizeof(*rsp))
4170 return -EPROTO;
4171
4172 icid = le16_to_cpu(rsp->icid);
4173 result = le16_to_cpu(rsp->result);
4174
4175 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4176
4177 /* Placeholder: Always unconfirmed */
4178 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4179
4180 return 0;
4181 }
4182
4183 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4184 struct l2cap_cmd_hdr *cmd,
4185 u16 cmd_len, void *data)
4186 {
4187 struct l2cap_move_chan_cfm *cfm = data;
4188 u16 icid, result;
4189
4190 if (cmd_len != sizeof(*cfm))
4191 return -EPROTO;
4192
4193 icid = le16_to_cpu(cfm->icid);
4194 result = le16_to_cpu(cfm->result);
4195
4196 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4197
4198 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4199
4200 return 0;
4201 }
4202
4203 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4204 struct l2cap_cmd_hdr *cmd,
4205 u16 cmd_len, void *data)
4206 {
4207 struct l2cap_move_chan_cfm_rsp *rsp = data;
4208 u16 icid;
4209
4210 if (cmd_len != sizeof(*rsp))
4211 return -EPROTO;
4212
4213 icid = le16_to_cpu(rsp->icid);
4214
4215 BT_DBG("icid 0x%4.4x", icid);
4216
4217 return 0;
4218 }
4219
4220 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4221 u16 to_multiplier)
4222 {
4223 u16 max_latency;
4224
4225 if (min > max || min < 6 || max > 3200)
4226 return -EINVAL;
4227
4228 if (to_multiplier < 10 || to_multiplier > 3200)
4229 return -EINVAL;
4230
4231 if (max >= to_multiplier * 8)
4232 return -EINVAL;
4233
4234 max_latency = (to_multiplier * 8 / max) - 1;
4235 if (latency > 499 || latency > max_latency)
4236 return -EINVAL;
4237
4238 return 0;
4239 }
4240
4241 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4242 struct l2cap_cmd_hdr *cmd,
4243 u8 *data)
4244 {
4245 struct hci_conn *hcon = conn->hcon;
4246 struct l2cap_conn_param_update_req *req;
4247 struct l2cap_conn_param_update_rsp rsp;
4248 u16 min, max, latency, to_multiplier, cmd_len;
4249 int err;
4250
4251 if (!(hcon->link_mode & HCI_LM_MASTER))
4252 return -EINVAL;
4253
4254 cmd_len = __le16_to_cpu(cmd->len);
4255 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4256 return -EPROTO;
4257
4258 req = (struct l2cap_conn_param_update_req *) data;
4259 min = __le16_to_cpu(req->min);
4260 max = __le16_to_cpu(req->max);
4261 latency = __le16_to_cpu(req->latency);
4262 to_multiplier = __le16_to_cpu(req->to_multiplier);
4263
4264 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4265 min, max, latency, to_multiplier);
4266
4267 memset(&rsp, 0, sizeof(rsp));
4268
4269 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4270 if (err)
4271 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4272 else
4273 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4274
4275 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4276 sizeof(rsp), &rsp);
4277
4278 if (!err)
4279 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4280
4281 return 0;
4282 }
4283
4284 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4285 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4286 u8 *data)
4287 {
4288 int err = 0;
4289
4290 switch (cmd->code) {
4291 case L2CAP_COMMAND_REJ:
4292 l2cap_command_rej(conn, cmd, data);
4293 break;
4294
4295 case L2CAP_CONN_REQ:
4296 err = l2cap_connect_req(conn, cmd, data);
4297 break;
4298
4299 case L2CAP_CONN_RSP:
4300 case L2CAP_CREATE_CHAN_RSP:
4301 err = l2cap_connect_create_rsp(conn, cmd, data);
4302 break;
4303
4304 case L2CAP_CONF_REQ:
4305 err = l2cap_config_req(conn, cmd, cmd_len, data);
4306 break;
4307
4308 case L2CAP_CONF_RSP:
4309 err = l2cap_config_rsp(conn, cmd, data);
4310 break;
4311
4312 case L2CAP_DISCONN_REQ:
4313 err = l2cap_disconnect_req(conn, cmd, data);
4314 break;
4315
4316 case L2CAP_DISCONN_RSP:
4317 err = l2cap_disconnect_rsp(conn, cmd, data);
4318 break;
4319
4320 case L2CAP_ECHO_REQ:
4321 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4322 break;
4323
4324 case L2CAP_ECHO_RSP:
4325 break;
4326
4327 case L2CAP_INFO_REQ:
4328 err = l2cap_information_req(conn, cmd, data);
4329 break;
4330
4331 case L2CAP_INFO_RSP:
4332 err = l2cap_information_rsp(conn, cmd, data);
4333 break;
4334
4335 case L2CAP_CREATE_CHAN_REQ:
4336 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4337 break;
4338
4339 case L2CAP_MOVE_CHAN_REQ:
4340 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4341 break;
4342
4343 case L2CAP_MOVE_CHAN_RSP:
4344 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4345 break;
4346
4347 case L2CAP_MOVE_CHAN_CFM:
4348 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4349 break;
4350
4351 case L2CAP_MOVE_CHAN_CFM_RSP:
4352 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4353 break;
4354
4355 default:
4356 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4357 err = -EINVAL;
4358 break;
4359 }
4360
4361 return err;
4362 }
4363
4364 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4365 struct l2cap_cmd_hdr *cmd, u8 *data)
4366 {
4367 switch (cmd->code) {
4368 case L2CAP_COMMAND_REJ:
4369 return 0;
4370
4371 case L2CAP_CONN_PARAM_UPDATE_REQ:
4372 return l2cap_conn_param_update_req(conn, cmd, data);
4373
4374 case L2CAP_CONN_PARAM_UPDATE_RSP:
4375 return 0;
4376
4377 default:
4378 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4379 return -EINVAL;
4380 }
4381 }
4382
4383 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4384 struct sk_buff *skb)
4385 {
4386 u8 *data = skb->data;
4387 int len = skb->len;
4388 struct l2cap_cmd_hdr cmd;
4389 int err;
4390
4391 l2cap_raw_recv(conn, skb);
4392
4393 while (len >= L2CAP_CMD_HDR_SIZE) {
4394 u16 cmd_len;
4395 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4396 data += L2CAP_CMD_HDR_SIZE;
4397 len -= L2CAP_CMD_HDR_SIZE;
4398
4399 cmd_len = le16_to_cpu(cmd.len);
4400
4401 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
4402 cmd.ident);
4403
4404 if (cmd_len > len || !cmd.ident) {
4405 BT_DBG("corrupted command");
4406 break;
4407 }
4408
4409 if (conn->hcon->type == LE_LINK)
4410 err = l2cap_le_sig_cmd(conn, &cmd, data);
4411 else
4412 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4413
4414 if (err) {
4415 struct l2cap_cmd_rej_unk rej;
4416
4417 BT_ERR("Wrong link type (%d)", err);
4418
4419 /* FIXME: Map err to a valid reason */
4420 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4421 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
4422 sizeof(rej), &rej);
4423 }
4424
4425 data += cmd_len;
4426 len -= cmd_len;
4427 }
4428
4429 kfree_skb(skb);
4430 }
4431
4432 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4433 {
4434 u16 our_fcs, rcv_fcs;
4435 int hdr_size;
4436
4437 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4438 hdr_size = L2CAP_EXT_HDR_SIZE;
4439 else
4440 hdr_size = L2CAP_ENH_HDR_SIZE;
4441
4442 if (chan->fcs == L2CAP_FCS_CRC16) {
4443 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4444 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4445 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4446
4447 if (our_fcs != rcv_fcs)
4448 return -EBADMSG;
4449 }
4450 return 0;
4451 }
4452
4453 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4454 {
4455 struct l2cap_ctrl control;
4456
4457 BT_DBG("chan %p", chan);
4458
4459 memset(&control, 0, sizeof(control));
4460 control.sframe = 1;
4461 control.final = 1;
4462 control.reqseq = chan->buffer_seq;
4463 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4464
4465 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4466 control.super = L2CAP_SUPER_RNR;
4467 l2cap_send_sframe(chan, &control);
4468 }
4469
4470 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4471 chan->unacked_frames > 0)
4472 __set_retrans_timer(chan);
4473
4474 /* Send pending iframes */
4475 l2cap_ertm_send(chan);
4476
4477 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4478 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4479 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4480 * send it now.
4481 */
4482 control.super = L2CAP_SUPER_RR;
4483 l2cap_send_sframe(chan, &control);
4484 }
4485 }
4486
4487 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
4488 struct sk_buff **last_frag)
4489 {
4490 /* skb->len reflects data in skb as well as all fragments
4491 * skb->data_len reflects only data in fragments
4492 */
4493 if (!skb_has_frag_list(skb))
4494 skb_shinfo(skb)->frag_list = new_frag;
4495
4496 new_frag->next = NULL;
4497
4498 (*last_frag)->next = new_frag;
4499 *last_frag = new_frag;
4500
4501 skb->len += new_frag->len;
4502 skb->data_len += new_frag->len;
4503 skb->truesize += new_frag->truesize;
4504 }
4505
4506 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4507 struct l2cap_ctrl *control)
4508 {
4509 int err = -EINVAL;
4510
4511 switch (control->sar) {
4512 case L2CAP_SAR_UNSEGMENTED:
4513 if (chan->sdu)
4514 break;
4515
4516 err = chan->ops->recv(chan, skb);
4517 break;
4518
4519 case L2CAP_SAR_START:
4520 if (chan->sdu)
4521 break;
4522
4523 chan->sdu_len = get_unaligned_le16(skb->data);
4524 skb_pull(skb, L2CAP_SDULEN_SIZE);
4525
4526 if (chan->sdu_len > chan->imtu) {
4527 err = -EMSGSIZE;
4528 break;
4529 }
4530
4531 if (skb->len >= chan->sdu_len)
4532 break;
4533
4534 chan->sdu = skb;
4535 chan->sdu_last_frag = skb;
4536
4537 skb = NULL;
4538 err = 0;
4539 break;
4540
4541 case L2CAP_SAR_CONTINUE:
4542 if (!chan->sdu)
4543 break;
4544
4545 append_skb_frag(chan->sdu, skb,
4546 &chan->sdu_last_frag);
4547 skb = NULL;
4548
4549 if (chan->sdu->len >= chan->sdu_len)
4550 break;
4551
4552 err = 0;
4553 break;
4554
4555 case L2CAP_SAR_END:
4556 if (!chan->sdu)
4557 break;
4558
4559 append_skb_frag(chan->sdu, skb,
4560 &chan->sdu_last_frag);
4561 skb = NULL;
4562
4563 if (chan->sdu->len != chan->sdu_len)
4564 break;
4565
4566 err = chan->ops->recv(chan, chan->sdu);
4567
4568 if (!err) {
4569 /* Reassembly complete */
4570 chan->sdu = NULL;
4571 chan->sdu_last_frag = NULL;
4572 chan->sdu_len = 0;
4573 }
4574 break;
4575 }
4576
4577 if (err) {
4578 kfree_skb(skb);
4579 kfree_skb(chan->sdu);
4580 chan->sdu = NULL;
4581 chan->sdu_last_frag = NULL;
4582 chan->sdu_len = 0;
4583 }
4584
4585 return err;
4586 }
4587
4588 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4589 {
4590 u8 event;
4591
4592 if (chan->mode != L2CAP_MODE_ERTM)
4593 return;
4594
4595 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4596 l2cap_tx(chan, NULL, NULL, event);
4597 }
4598
4599 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4600 {
4601 int err = 0;
4602 /* Pass sequential frames to l2cap_reassemble_sdu()
4603 * until a gap is encountered.
4604 */
4605
4606 BT_DBG("chan %p", chan);
4607
4608 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4609 struct sk_buff *skb;
4610 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4611 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4612
4613 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4614
4615 if (!skb)
4616 break;
4617
4618 skb_unlink(skb, &chan->srej_q);
4619 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4620 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4621 if (err)
4622 break;
4623 }
4624
4625 if (skb_queue_empty(&chan->srej_q)) {
4626 chan->rx_state = L2CAP_RX_STATE_RECV;
4627 l2cap_send_ack(chan);
4628 }
4629
4630 return err;
4631 }
4632
4633 static void l2cap_handle_srej(struct l2cap_chan *chan,
4634 struct l2cap_ctrl *control)
4635 {
4636 struct sk_buff *skb;
4637
4638 BT_DBG("chan %p, control %p", chan, control);
4639
4640 if (control->reqseq == chan->next_tx_seq) {
4641 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4642 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4643 return;
4644 }
4645
4646 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4647
4648 if (skb == NULL) {
4649 BT_DBG("Seq %d not available for retransmission",
4650 control->reqseq);
4651 return;
4652 }
4653
4654 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4655 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4656 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4657 return;
4658 }
4659
4660 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4661
4662 if (control->poll) {
4663 l2cap_pass_to_tx(chan, control);
4664
4665 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4666 l2cap_retransmit(chan, control);
4667 l2cap_ertm_send(chan);
4668
4669 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4670 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4671 chan->srej_save_reqseq = control->reqseq;
4672 }
4673 } else {
4674 l2cap_pass_to_tx_fbit(chan, control);
4675
4676 if (control->final) {
4677 if (chan->srej_save_reqseq != control->reqseq ||
4678 !test_and_clear_bit(CONN_SREJ_ACT,
4679 &chan->conn_state))
4680 l2cap_retransmit(chan, control);
4681 } else {
4682 l2cap_retransmit(chan, control);
4683 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4684 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4685 chan->srej_save_reqseq = control->reqseq;
4686 }
4687 }
4688 }
4689 }
4690
4691 static void l2cap_handle_rej(struct l2cap_chan *chan,
4692 struct l2cap_ctrl *control)
4693 {
4694 struct sk_buff *skb;
4695
4696 BT_DBG("chan %p, control %p", chan, control);
4697
4698 if (control->reqseq == chan->next_tx_seq) {
4699 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4700 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4701 return;
4702 }
4703
4704 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4705
4706 if (chan->max_tx && skb &&
4707 bt_cb(skb)->control.retries >= chan->max_tx) {
4708 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4709 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4710 return;
4711 }
4712
4713 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4714
4715 l2cap_pass_to_tx(chan, control);
4716
4717 if (control->final) {
4718 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4719 l2cap_retransmit_all(chan, control);
4720 } else {
4721 l2cap_retransmit_all(chan, control);
4722 l2cap_ertm_send(chan);
4723 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4724 set_bit(CONN_REJ_ACT, &chan->conn_state);
4725 }
4726 }
4727
4728 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4729 {
4730 BT_DBG("chan %p, txseq %d", chan, txseq);
4731
4732 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4733 chan->expected_tx_seq);
4734
4735 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4736 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4737 chan->tx_win) {
4738 /* See notes below regarding "double poll" and
4739 * invalid packets.
4740 */
4741 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4742 BT_DBG("Invalid/Ignore - after SREJ");
4743 return L2CAP_TXSEQ_INVALID_IGNORE;
4744 } else {
4745 BT_DBG("Invalid - in window after SREJ sent");
4746 return L2CAP_TXSEQ_INVALID;
4747 }
4748 }
4749
4750 if (chan->srej_list.head == txseq) {
4751 BT_DBG("Expected SREJ");
4752 return L2CAP_TXSEQ_EXPECTED_SREJ;
4753 }
4754
4755 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4756 BT_DBG("Duplicate SREJ - txseq already stored");
4757 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4758 }
4759
4760 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4761 BT_DBG("Unexpected SREJ - not requested");
4762 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4763 }
4764 }
4765
4766 if (chan->expected_tx_seq == txseq) {
4767 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4768 chan->tx_win) {
4769 BT_DBG("Invalid - txseq outside tx window");
4770 return L2CAP_TXSEQ_INVALID;
4771 } else {
4772 BT_DBG("Expected");
4773 return L2CAP_TXSEQ_EXPECTED;
4774 }
4775 }
4776
4777 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4778 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
4779 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4780 return L2CAP_TXSEQ_DUPLICATE;
4781 }
4782
4783 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4784 /* A source of invalid packets is a "double poll" condition,
4785 * where delays cause us to send multiple poll packets. If
4786 * the remote stack receives and processes both polls,
4787 * sequence numbers can wrap around in such a way that a
4788 * resent frame has a sequence number that looks like new data
4789 * with a sequence gap. This would trigger an erroneous SREJ
4790 * request.
4791 *
4792 * Fortunately, this is impossible with a tx window that's
4793 * less than half of the maximum sequence number, which allows
4794 * invalid frames to be safely ignored.
4795 *
4796 * With tx window sizes greater than half of the tx window
4797 * maximum, the frame is invalid and cannot be ignored. This
4798 * causes a disconnect.
4799 */
4800
4801 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4802 BT_DBG("Invalid/Ignore - txseq outside tx window");
4803 return L2CAP_TXSEQ_INVALID_IGNORE;
4804 } else {
4805 BT_DBG("Invalid - txseq outside tx window");
4806 return L2CAP_TXSEQ_INVALID;
4807 }
4808 } else {
4809 BT_DBG("Unexpected - txseq indicates missing frames");
4810 return L2CAP_TXSEQ_UNEXPECTED;
4811 }
4812 }
4813
4814 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4815 struct l2cap_ctrl *control,
4816 struct sk_buff *skb, u8 event)
4817 {
4818 int err = 0;
4819 bool skb_in_use = 0;
4820
4821 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4822 event);
4823
4824 switch (event) {
4825 case L2CAP_EV_RECV_IFRAME:
4826 switch (l2cap_classify_txseq(chan, control->txseq)) {
4827 case L2CAP_TXSEQ_EXPECTED:
4828 l2cap_pass_to_tx(chan, control);
4829
4830 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4831 BT_DBG("Busy, discarding expected seq %d",
4832 control->txseq);
4833 break;
4834 }
4835
4836 chan->expected_tx_seq = __next_seq(chan,
4837 control->txseq);
4838
4839 chan->buffer_seq = chan->expected_tx_seq;
4840 skb_in_use = 1;
4841
4842 err = l2cap_reassemble_sdu(chan, skb, control);
4843 if (err)
4844 break;
4845
4846 if (control->final) {
4847 if (!test_and_clear_bit(CONN_REJ_ACT,
4848 &chan->conn_state)) {
4849 control->final = 0;
4850 l2cap_retransmit_all(chan, control);
4851 l2cap_ertm_send(chan);
4852 }
4853 }
4854
4855 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4856 l2cap_send_ack(chan);
4857 break;
4858 case L2CAP_TXSEQ_UNEXPECTED:
4859 l2cap_pass_to_tx(chan, control);
4860
4861 /* Can't issue SREJ frames in the local busy state.
4862 * Drop this frame, it will be seen as missing
4863 * when local busy is exited.
4864 */
4865 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4866 BT_DBG("Busy, discarding unexpected seq %d",
4867 control->txseq);
4868 break;
4869 }
4870
4871 /* There was a gap in the sequence, so an SREJ
4872 * must be sent for each missing frame. The
4873 * current frame is stored for later use.
4874 */
4875 skb_queue_tail(&chan->srej_q, skb);
4876 skb_in_use = 1;
4877 BT_DBG("Queued %p (queue len %d)", skb,
4878 skb_queue_len(&chan->srej_q));
4879
4880 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4881 l2cap_seq_list_clear(&chan->srej_list);
4882 l2cap_send_srej(chan, control->txseq);
4883
4884 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4885 break;
4886 case L2CAP_TXSEQ_DUPLICATE:
4887 l2cap_pass_to_tx(chan, control);
4888 break;
4889 case L2CAP_TXSEQ_INVALID_IGNORE:
4890 break;
4891 case L2CAP_TXSEQ_INVALID:
4892 default:
4893 l2cap_send_disconn_req(chan->conn, chan,
4894 ECONNRESET);
4895 break;
4896 }
4897 break;
4898 case L2CAP_EV_RECV_RR:
4899 l2cap_pass_to_tx(chan, control);
4900 if (control->final) {
4901 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4902
4903 if (!test_and_clear_bit(CONN_REJ_ACT,
4904 &chan->conn_state)) {
4905 control->final = 0;
4906 l2cap_retransmit_all(chan, control);
4907 }
4908
4909 l2cap_ertm_send(chan);
4910 } else if (control->poll) {
4911 l2cap_send_i_or_rr_or_rnr(chan);
4912 } else {
4913 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4914 &chan->conn_state) &&
4915 chan->unacked_frames)
4916 __set_retrans_timer(chan);
4917
4918 l2cap_ertm_send(chan);
4919 }
4920 break;
4921 case L2CAP_EV_RECV_RNR:
4922 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4923 l2cap_pass_to_tx(chan, control);
4924 if (control && control->poll) {
4925 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4926 l2cap_send_rr_or_rnr(chan, 0);
4927 }
4928 __clear_retrans_timer(chan);
4929 l2cap_seq_list_clear(&chan->retrans_list);
4930 break;
4931 case L2CAP_EV_RECV_REJ:
4932 l2cap_handle_rej(chan, control);
4933 break;
4934 case L2CAP_EV_RECV_SREJ:
4935 l2cap_handle_srej(chan, control);
4936 break;
4937 default:
4938 break;
4939 }
4940
4941 if (skb && !skb_in_use) {
4942 BT_DBG("Freeing %p", skb);
4943 kfree_skb(skb);
4944 }
4945
4946 return err;
4947 }
4948
4949 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4950 struct l2cap_ctrl *control,
4951 struct sk_buff *skb, u8 event)
4952 {
4953 int err = 0;
4954 u16 txseq = control->txseq;
4955 bool skb_in_use = 0;
4956
4957 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4958 event);
4959
4960 switch (event) {
4961 case L2CAP_EV_RECV_IFRAME:
4962 switch (l2cap_classify_txseq(chan, txseq)) {
4963 case L2CAP_TXSEQ_EXPECTED:
4964 /* Keep frame for reassembly later */
4965 l2cap_pass_to_tx(chan, control);
4966 skb_queue_tail(&chan->srej_q, skb);
4967 skb_in_use = 1;
4968 BT_DBG("Queued %p (queue len %d)", skb,
4969 skb_queue_len(&chan->srej_q));
4970
4971 chan->expected_tx_seq = __next_seq(chan, txseq);
4972 break;
4973 case L2CAP_TXSEQ_EXPECTED_SREJ:
4974 l2cap_seq_list_pop(&chan->srej_list);
4975
4976 l2cap_pass_to_tx(chan, control);
4977 skb_queue_tail(&chan->srej_q, skb);
4978 skb_in_use = 1;
4979 BT_DBG("Queued %p (queue len %d)", skb,
4980 skb_queue_len(&chan->srej_q));
4981
4982 err = l2cap_rx_queued_iframes(chan);
4983 if (err)
4984 break;
4985
4986 break;
4987 case L2CAP_TXSEQ_UNEXPECTED:
4988 /* Got a frame that can't be reassembled yet.
4989 * Save it for later, and send SREJs to cover
4990 * the missing frames.
4991 */
4992 skb_queue_tail(&chan->srej_q, skb);
4993 skb_in_use = 1;
4994 BT_DBG("Queued %p (queue len %d)", skb,
4995 skb_queue_len(&chan->srej_q));
4996
4997 l2cap_pass_to_tx(chan, control);
4998 l2cap_send_srej(chan, control->txseq);
4999 break;
5000 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
5001 /* This frame was requested with an SREJ, but
5002 * some expected retransmitted frames are
5003 * missing. Request retransmission of missing
5004 * SREJ'd frames.
5005 */
5006 skb_queue_tail(&chan->srej_q, skb);
5007 skb_in_use = 1;
5008 BT_DBG("Queued %p (queue len %d)", skb,
5009 skb_queue_len(&chan->srej_q));
5010
5011 l2cap_pass_to_tx(chan, control);
5012 l2cap_send_srej_list(chan, control->txseq);
5013 break;
5014 case L2CAP_TXSEQ_DUPLICATE_SREJ:
5015 /* We've already queued this frame. Drop this copy. */
5016 l2cap_pass_to_tx(chan, control);
5017 break;
5018 case L2CAP_TXSEQ_DUPLICATE:
5019 /* Expecting a later sequence number, so this frame
5020 * was already received. Ignore it completely.
5021 */
5022 break;
5023 case L2CAP_TXSEQ_INVALID_IGNORE:
5024 break;
5025 case L2CAP_TXSEQ_INVALID:
5026 default:
5027 l2cap_send_disconn_req(chan->conn, chan,
5028 ECONNRESET);
5029 break;
5030 }
5031 break;
5032 case L2CAP_EV_RECV_RR:
5033 l2cap_pass_to_tx(chan, control);
5034 if (control->final) {
5035 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5036
5037 if (!test_and_clear_bit(CONN_REJ_ACT,
5038 &chan->conn_state)) {
5039 control->final = 0;
5040 l2cap_retransmit_all(chan, control);
5041 }
5042
5043 l2cap_ertm_send(chan);
5044 } else if (control->poll) {
5045 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5046 &chan->conn_state) &&
5047 chan->unacked_frames) {
5048 __set_retrans_timer(chan);
5049 }
5050
5051 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5052 l2cap_send_srej_tail(chan);
5053 } else {
5054 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5055 &chan->conn_state) &&
5056 chan->unacked_frames)
5057 __set_retrans_timer(chan);
5058
5059 l2cap_send_ack(chan);
5060 }
5061 break;
5062 case L2CAP_EV_RECV_RNR:
5063 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5064 l2cap_pass_to_tx(chan, control);
5065 if (control->poll) {
5066 l2cap_send_srej_tail(chan);
5067 } else {
5068 struct l2cap_ctrl rr_control;
5069 memset(&rr_control, 0, sizeof(rr_control));
5070 rr_control.sframe = 1;
5071 rr_control.super = L2CAP_SUPER_RR;
5072 rr_control.reqseq = chan->buffer_seq;
5073 l2cap_send_sframe(chan, &rr_control);
5074 }
5075
5076 break;
5077 case L2CAP_EV_RECV_REJ:
5078 l2cap_handle_rej(chan, control);
5079 break;
5080 case L2CAP_EV_RECV_SREJ:
5081 l2cap_handle_srej(chan, control);
5082 break;
5083 }
5084
5085 if (skb && !skb_in_use) {
5086 BT_DBG("Freeing %p", skb);
5087 kfree_skb(skb);
5088 }
5089
5090 return err;
5091 }
5092
5093 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5094 {
5095 /* Make sure reqseq is for a packet that has been sent but not acked */
5096 u16 unacked;
5097
5098 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5099 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5100 }
5101
5102 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5103 struct sk_buff *skb, u8 event)
5104 {
5105 int err = 0;
5106
5107 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5108 control, skb, event, chan->rx_state);
5109
5110 if (__valid_reqseq(chan, control->reqseq)) {
5111 switch (chan->rx_state) {
5112 case L2CAP_RX_STATE_RECV:
5113 err = l2cap_rx_state_recv(chan, control, skb, event);
5114 break;
5115 case L2CAP_RX_STATE_SREJ_SENT:
5116 err = l2cap_rx_state_srej_sent(chan, control, skb,
5117 event);
5118 break;
5119 default:
5120 /* shut it down */
5121 break;
5122 }
5123 } else {
5124 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5125 control->reqseq, chan->next_tx_seq,
5126 chan->expected_ack_seq);
5127 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5128 }
5129
5130 return err;
5131 }
5132
5133 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5134 struct sk_buff *skb)
5135 {
5136 int err = 0;
5137
5138 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5139 chan->rx_state);
5140
5141 if (l2cap_classify_txseq(chan, control->txseq) ==
5142 L2CAP_TXSEQ_EXPECTED) {
5143 l2cap_pass_to_tx(chan, control);
5144
5145 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5146 __next_seq(chan, chan->buffer_seq));
5147
5148 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5149
5150 l2cap_reassemble_sdu(chan, skb, control);
5151 } else {
5152 if (chan->sdu) {
5153 kfree_skb(chan->sdu);
5154 chan->sdu = NULL;
5155 }
5156 chan->sdu_last_frag = NULL;
5157 chan->sdu_len = 0;
5158
5159 if (skb) {
5160 BT_DBG("Freeing %p", skb);
5161 kfree_skb(skb);
5162 }
5163 }
5164
5165 chan->last_acked_seq = control->txseq;
5166 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5167
5168 return err;
5169 }
5170
5171 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5172 {
5173 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5174 u16 len;
5175 u8 event;
5176
5177 __unpack_control(chan, skb);
5178
5179 len = skb->len;
5180
5181 /*
5182 * We can just drop the corrupted I-frame here.
5183 * Receiver will miss it and start proper recovery
5184 * procedures and ask for retransmission.
5185 */
5186 if (l2cap_check_fcs(chan, skb))
5187 goto drop;
5188
5189 if (!control->sframe && control->sar == L2CAP_SAR_START)
5190 len -= L2CAP_SDULEN_SIZE;
5191
5192 if (chan->fcs == L2CAP_FCS_CRC16)
5193 len -= L2CAP_FCS_SIZE;
5194
5195 if (len > chan->mps) {
5196 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5197 goto drop;
5198 }
5199
5200 if (!control->sframe) {
5201 int err;
5202
5203 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5204 control->sar, control->reqseq, control->final,
5205 control->txseq);
5206
5207 /* Validate F-bit - F=0 always valid, F=1 only
5208 * valid in TX WAIT_F
5209 */
5210 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5211 goto drop;
5212
5213 if (chan->mode != L2CAP_MODE_STREAMING) {
5214 event = L2CAP_EV_RECV_IFRAME;
5215 err = l2cap_rx(chan, control, skb, event);
5216 } else {
5217 err = l2cap_stream_rx(chan, control, skb);
5218 }
5219
5220 if (err)
5221 l2cap_send_disconn_req(chan->conn, chan,
5222 ECONNRESET);
5223 } else {
5224 const u8 rx_func_to_event[4] = {
5225 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5226 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5227 };
5228
5229 /* Only I-frames are expected in streaming mode */
5230 if (chan->mode == L2CAP_MODE_STREAMING)
5231 goto drop;
5232
5233 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5234 control->reqseq, control->final, control->poll,
5235 control->super);
5236
5237 if (len != 0) {
5238 BT_ERR("%d", len);
5239 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5240 goto drop;
5241 }
5242
5243 /* Validate F and P bits */
5244 if (control->final && (control->poll ||
5245 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5246 goto drop;
5247
5248 event = rx_func_to_event[control->super];
5249 if (l2cap_rx(chan, control, skb, event))
5250 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5251 }
5252
5253 return 0;
5254
5255 drop:
5256 kfree_skb(skb);
5257 return 0;
5258 }
5259
5260 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
5261 struct sk_buff *skb)
5262 {
5263 struct l2cap_chan *chan;
5264
5265 chan = l2cap_get_chan_by_scid(conn, cid);
5266 if (!chan) {
5267 if (cid == L2CAP_CID_A2MP) {
5268 chan = a2mp_channel_create(conn, skb);
5269 if (!chan) {
5270 kfree_skb(skb);
5271 return;
5272 }
5273
5274 l2cap_chan_lock(chan);
5275 } else {
5276 BT_DBG("unknown cid 0x%4.4x", cid);
5277 /* Drop packet and return */
5278 kfree_skb(skb);
5279 return;
5280 }
5281 }
5282
5283 BT_DBG("chan %p, len %d", chan, skb->len);
5284
5285 if (chan->state != BT_CONNECTED)
5286 goto drop;
5287
5288 switch (chan->mode) {
5289 case L2CAP_MODE_BASIC:
5290 /* If socket recv buffers overflows we drop data here
5291 * which is *bad* because L2CAP has to be reliable.
5292 * But we don't have any other choice. L2CAP doesn't
5293 * provide flow control mechanism. */
5294
5295 if (chan->imtu < skb->len)
5296 goto drop;
5297
5298 if (!chan->ops->recv(chan, skb))
5299 goto done;
5300 break;
5301
5302 case L2CAP_MODE_ERTM:
5303 case L2CAP_MODE_STREAMING:
5304 l2cap_data_rcv(chan, skb);
5305 goto done;
5306
5307 default:
5308 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5309 break;
5310 }
5311
5312 drop:
5313 kfree_skb(skb);
5314
5315 done:
5316 l2cap_chan_unlock(chan);
5317 }
5318
5319 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
5320 struct sk_buff *skb)
5321 {
5322 struct l2cap_chan *chan;
5323
5324 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5325 if (!chan)
5326 goto drop;
5327
5328 BT_DBG("chan %p, len %d", chan, skb->len);
5329
5330 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5331 goto drop;
5332
5333 if (chan->imtu < skb->len)
5334 goto drop;
5335
5336 if (!chan->ops->recv(chan, skb))
5337 return;
5338
5339 drop:
5340 kfree_skb(skb);
5341 }
5342
5343 static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5344 struct sk_buff *skb)
5345 {
5346 struct l2cap_chan *chan;
5347
5348 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5349 if (!chan)
5350 goto drop;
5351
5352 BT_DBG("chan %p, len %d", chan, skb->len);
5353
5354 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5355 goto drop;
5356
5357 if (chan->imtu < skb->len)
5358 goto drop;
5359
5360 if (!chan->ops->recv(chan, skb))
5361 return;
5362
5363 drop:
5364 kfree_skb(skb);
5365 }
5366
5367 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5368 {
5369 struct l2cap_hdr *lh = (void *) skb->data;
5370 u16 cid, len;
5371 __le16 psm;
5372
5373 skb_pull(skb, L2CAP_HDR_SIZE);
5374 cid = __le16_to_cpu(lh->cid);
5375 len = __le16_to_cpu(lh->len);
5376
5377 if (len != skb->len) {
5378 kfree_skb(skb);
5379 return;
5380 }
5381
5382 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5383
5384 switch (cid) {
5385 case L2CAP_CID_LE_SIGNALING:
5386 case L2CAP_CID_SIGNALING:
5387 l2cap_sig_channel(conn, skb);
5388 break;
5389
5390 case L2CAP_CID_CONN_LESS:
5391 psm = get_unaligned((__le16 *) skb->data);
5392 skb_pull(skb, L2CAP_PSMLEN_SIZE);
5393 l2cap_conless_channel(conn, psm, skb);
5394 break;
5395
5396 case L2CAP_CID_LE_DATA:
5397 l2cap_att_channel(conn, cid, skb);
5398 break;
5399
5400 case L2CAP_CID_SMP:
5401 if (smp_sig_channel(conn, skb))
5402 l2cap_conn_del(conn->hcon, EACCES);
5403 break;
5404
5405 default:
5406 l2cap_data_channel(conn, cid, skb);
5407 break;
5408 }
5409 }
5410
5411 /* ---- L2CAP interface with lower layer (HCI) ---- */
5412
5413 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5414 {
5415 int exact = 0, lm1 = 0, lm2 = 0;
5416 struct l2cap_chan *c;
5417
5418 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
5419
5420 /* Find listening sockets and check their link_mode */
5421 read_lock(&chan_list_lock);
5422 list_for_each_entry(c, &chan_list, global_l) {
5423 struct sock *sk = c->sk;
5424
5425 if (c->state != BT_LISTEN)
5426 continue;
5427
5428 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5429 lm1 |= HCI_LM_ACCEPT;
5430 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5431 lm1 |= HCI_LM_MASTER;
5432 exact++;
5433 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5434 lm2 |= HCI_LM_ACCEPT;
5435 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5436 lm2 |= HCI_LM_MASTER;
5437 }
5438 }
5439 read_unlock(&chan_list_lock);
5440
5441 return exact ? lm1 : lm2;
5442 }
5443
5444 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5445 {
5446 struct l2cap_conn *conn;
5447
5448 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
5449
5450 if (!status) {
5451 conn = l2cap_conn_add(hcon, status);
5452 if (conn)
5453 l2cap_conn_ready(conn);
5454 } else
5455 l2cap_conn_del(hcon, bt_to_errno(status));
5456
5457 }
5458
5459 int l2cap_disconn_ind(struct hci_conn *hcon)
5460 {
5461 struct l2cap_conn *conn = hcon->l2cap_data;
5462
5463 BT_DBG("hcon %p", hcon);
5464
5465 if (!conn)
5466 return HCI_ERROR_REMOTE_USER_TERM;
5467 return conn->disc_reason;
5468 }
5469
5470 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5471 {
5472 BT_DBG("hcon %p reason %d", hcon, reason);
5473
5474 l2cap_conn_del(hcon, bt_to_errno(reason));
5475 }
5476
5477 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5478 {
5479 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5480 return;
5481
5482 if (encrypt == 0x00) {
5483 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5484 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5485 } else if (chan->sec_level == BT_SECURITY_HIGH)
5486 l2cap_chan_close(chan, ECONNREFUSED);
5487 } else {
5488 if (chan->sec_level == BT_SECURITY_MEDIUM)
5489 __clear_chan_timer(chan);
5490 }
5491 }
5492
5493 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5494 {
5495 struct l2cap_conn *conn = hcon->l2cap_data;
5496 struct l2cap_chan *chan;
5497
5498 if (!conn)
5499 return 0;
5500
5501 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
5502
5503 if (hcon->type == LE_LINK) {
5504 if (!status && encrypt)
5505 smp_distribute_keys(conn, 0);
5506 cancel_delayed_work(&conn->security_timer);
5507 }
5508
5509 mutex_lock(&conn->chan_lock);
5510
5511 list_for_each_entry(chan, &conn->chan_l, list) {
5512 l2cap_chan_lock(chan);
5513
5514 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
5515 state_to_string(chan->state));
5516
5517 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
5518 l2cap_chan_unlock(chan);
5519 continue;
5520 }
5521
5522 if (chan->scid == L2CAP_CID_LE_DATA) {
5523 if (!status && encrypt) {
5524 chan->sec_level = hcon->sec_level;
5525 l2cap_chan_ready(chan);
5526 }
5527
5528 l2cap_chan_unlock(chan);
5529 continue;
5530 }
5531
5532 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5533 l2cap_chan_unlock(chan);
5534 continue;
5535 }
5536
5537 if (!status && (chan->state == BT_CONNECTED ||
5538 chan->state == BT_CONFIG)) {
5539 struct sock *sk = chan->sk;
5540
5541 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5542 sk->sk_state_change(sk);
5543
5544 l2cap_check_encryption(chan, encrypt);
5545 l2cap_chan_unlock(chan);
5546 continue;
5547 }
5548
5549 if (chan->state == BT_CONNECT) {
5550 if (!status) {
5551 l2cap_start_connection(chan);
5552 } else {
5553 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5554 }
5555 } else if (chan->state == BT_CONNECT2) {
5556 struct sock *sk = chan->sk;
5557 struct l2cap_conn_rsp rsp;
5558 __u16 res, stat;
5559
5560 lock_sock(sk);
5561
5562 if (!status) {
5563 if (test_bit(BT_SK_DEFER_SETUP,
5564 &bt_sk(sk)->flags)) {
5565 res = L2CAP_CR_PEND;
5566 stat = L2CAP_CS_AUTHOR_PEND;
5567 chan->ops->defer(chan);
5568 } else {
5569 __l2cap_state_change(chan, BT_CONFIG);
5570 res = L2CAP_CR_SUCCESS;
5571 stat = L2CAP_CS_NO_INFO;
5572 }
5573 } else {
5574 __l2cap_state_change(chan, BT_DISCONN);
5575 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5576 res = L2CAP_CR_SEC_BLOCK;
5577 stat = L2CAP_CS_NO_INFO;
5578 }
5579
5580 release_sock(sk);
5581
5582 rsp.scid = cpu_to_le16(chan->dcid);
5583 rsp.dcid = cpu_to_le16(chan->scid);
5584 rsp.result = cpu_to_le16(res);
5585 rsp.status = cpu_to_le16(stat);
5586 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5587 sizeof(rsp), &rsp);
5588
5589 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5590 res == L2CAP_CR_SUCCESS) {
5591 char buf[128];
5592 set_bit(CONF_REQ_SENT, &chan->conf_state);
5593 l2cap_send_cmd(conn, l2cap_get_ident(conn),
5594 L2CAP_CONF_REQ,
5595 l2cap_build_conf_req(chan, buf),
5596 buf);
5597 chan->num_conf_req++;
5598 }
5599 }
5600
5601 l2cap_chan_unlock(chan);
5602 }
5603
5604 mutex_unlock(&conn->chan_lock);
5605
5606 return 0;
5607 }
5608
5609 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5610 {
5611 struct l2cap_conn *conn = hcon->l2cap_data;
5612 struct l2cap_hdr *hdr;
5613 int len;
5614
5615 /* For AMP controller do not create l2cap conn */
5616 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
5617 goto drop;
5618
5619 if (!conn)
5620 conn = l2cap_conn_add(hcon, 0);
5621
5622 if (!conn)
5623 goto drop;
5624
5625 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5626
5627 switch (flags) {
5628 case ACL_START:
5629 case ACL_START_NO_FLUSH:
5630 case ACL_COMPLETE:
5631 if (conn->rx_len) {
5632 BT_ERR("Unexpected start frame (len %d)", skb->len);
5633 kfree_skb(conn->rx_skb);
5634 conn->rx_skb = NULL;
5635 conn->rx_len = 0;
5636 l2cap_conn_unreliable(conn, ECOMM);
5637 }
5638
5639 /* Start fragment always begin with Basic L2CAP header */
5640 if (skb->len < L2CAP_HDR_SIZE) {
5641 BT_ERR("Frame is too short (len %d)", skb->len);
5642 l2cap_conn_unreliable(conn, ECOMM);
5643 goto drop;
5644 }
5645
5646 hdr = (struct l2cap_hdr *) skb->data;
5647 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5648
5649 if (len == skb->len) {
5650 /* Complete frame received */
5651 l2cap_recv_frame(conn, skb);
5652 return 0;
5653 }
5654
5655 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5656
5657 if (skb->len > len) {
5658 BT_ERR("Frame is too long (len %d, expected len %d)",
5659 skb->len, len);
5660 l2cap_conn_unreliable(conn, ECOMM);
5661 goto drop;
5662 }
5663
5664 /* Allocate skb for the complete frame (with header) */
5665 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
5666 if (!conn->rx_skb)
5667 goto drop;
5668
5669 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5670 skb->len);
5671 conn->rx_len = len - skb->len;
5672 break;
5673
5674 case ACL_CONT:
5675 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5676
5677 if (!conn->rx_len) {
5678 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5679 l2cap_conn_unreliable(conn, ECOMM);
5680 goto drop;
5681 }
5682
5683 if (skb->len > conn->rx_len) {
5684 BT_ERR("Fragment is too long (len %d, expected %d)",
5685 skb->len, conn->rx_len);
5686 kfree_skb(conn->rx_skb);
5687 conn->rx_skb = NULL;
5688 conn->rx_len = 0;
5689 l2cap_conn_unreliable(conn, ECOMM);
5690 goto drop;
5691 }
5692
5693 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5694 skb->len);
5695 conn->rx_len -= skb->len;
5696
5697 if (!conn->rx_len) {
5698 /* Complete frame received */
5699 l2cap_recv_frame(conn, conn->rx_skb);
5700 conn->rx_skb = NULL;
5701 }
5702 break;
5703 }
5704
5705 drop:
5706 kfree_skb(skb);
5707 return 0;
5708 }
5709
5710 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5711 {
5712 struct l2cap_chan *c;
5713
5714 read_lock(&chan_list_lock);
5715
5716 list_for_each_entry(c, &chan_list, global_l) {
5717 struct sock *sk = c->sk;
5718
5719 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5720 &bt_sk(sk)->src, &bt_sk(sk)->dst,
5721 c->state, __le16_to_cpu(c->psm),
5722 c->scid, c->dcid, c->imtu, c->omtu,
5723 c->sec_level, c->mode);
5724 }
5725
5726 read_unlock(&chan_list_lock);
5727
5728 return 0;
5729 }
5730
5731 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5732 {
5733 return single_open(file, l2cap_debugfs_show, inode->i_private);
5734 }
5735
5736 static const struct file_operations l2cap_debugfs_fops = {
5737 .open = l2cap_debugfs_open,
5738 .read = seq_read,
5739 .llseek = seq_lseek,
5740 .release = single_release,
5741 };
5742
5743 static struct dentry *l2cap_debugfs;
5744
5745 int __init l2cap_init(void)
5746 {
5747 int err;
5748
5749 err = l2cap_init_sockets();
5750 if (err < 0)
5751 return err;
5752
5753 if (bt_debugfs) {
5754 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
5755 NULL, &l2cap_debugfs_fops);
5756 if (!l2cap_debugfs)
5757 BT_ERR("Failed to create L2CAP debug file");
5758 }
5759
5760 return 0;
5761 }
5762
5763 void l2cap_exit(void)
5764 {
5765 debugfs_remove(l2cap_debugfs);
5766 l2cap_cleanup_sockets();
5767 }
5768
5769 module_param(disable_ertm, bool, 0644);
5770 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");