Bluetooth: Channel move request handling
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41
42 bool disable_ertm;
43
44 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
45 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
46
47 static LIST_HEAD(chan_list);
48 static DEFINE_RWLOCK(chan_list_lock);
49
50 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
51 u8 code, u8 ident, u16 dlen, void *data);
52 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
53 void *data);
54 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
55 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
56 struct l2cap_chan *chan, int err);
57
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
60
61 /* ---- L2CAP channels ---- */
62
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
64 u16 cid)
65 {
66 struct l2cap_chan *c;
67
68 list_for_each_entry(c, &conn->chan_l, list) {
69 if (c->dcid == cid)
70 return c;
71 }
72 return NULL;
73 }
74
75 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
76 u16 cid)
77 {
78 struct l2cap_chan *c;
79
80 list_for_each_entry(c, &conn->chan_l, list) {
81 if (c->scid == cid)
82 return c;
83 }
84 return NULL;
85 }
86
87 /* Find channel with given SCID.
88 * Returns locked channel. */
89 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
90 u16 cid)
91 {
92 struct l2cap_chan *c;
93
94 mutex_lock(&conn->chan_lock);
95 c = __l2cap_get_chan_by_scid(conn, cid);
96 if (c)
97 l2cap_chan_lock(c);
98 mutex_unlock(&conn->chan_lock);
99
100 return c;
101 }
102
103 /* Find channel with given DCID.
104 * Returns locked channel.
105 */
106 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
107 u16 cid)
108 {
109 struct l2cap_chan *c;
110
111 mutex_lock(&conn->chan_lock);
112 c = __l2cap_get_chan_by_dcid(conn, cid);
113 if (c)
114 l2cap_chan_lock(c);
115 mutex_unlock(&conn->chan_lock);
116
117 return c;
118 }
119
120 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
121 u8 ident)
122 {
123 struct l2cap_chan *c;
124
125 list_for_each_entry(c, &conn->chan_l, list) {
126 if (c->ident == ident)
127 return c;
128 }
129 return NULL;
130 }
131
132 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
133 {
134 struct l2cap_chan *c;
135
136 list_for_each_entry(c, &chan_list, global_l) {
137 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
138 return c;
139 }
140 return NULL;
141 }
142
143 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
144 {
145 int err;
146
147 write_lock(&chan_list_lock);
148
149 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
150 err = -EADDRINUSE;
151 goto done;
152 }
153
154 if (psm) {
155 chan->psm = psm;
156 chan->sport = psm;
157 err = 0;
158 } else {
159 u16 p;
160
161 err = -EINVAL;
162 for (p = 0x1001; p < 0x1100; p += 2)
163 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
164 chan->psm = cpu_to_le16(p);
165 chan->sport = cpu_to_le16(p);
166 err = 0;
167 break;
168 }
169 }
170
171 done:
172 write_unlock(&chan_list_lock);
173 return err;
174 }
175
176 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
177 {
178 write_lock(&chan_list_lock);
179
180 chan->scid = scid;
181
182 write_unlock(&chan_list_lock);
183
184 return 0;
185 }
186
187 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
188 {
189 u16 cid = L2CAP_CID_DYN_START;
190
191 for (; cid < L2CAP_CID_DYN_END; cid++) {
192 if (!__l2cap_get_chan_by_scid(conn, cid))
193 return cid;
194 }
195
196 return 0;
197 }
198
199 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
200 {
201 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
202 state_to_string(state));
203
204 chan->state = state;
205 chan->ops->state_change(chan, state);
206 }
207
208 static void l2cap_state_change(struct l2cap_chan *chan, int state)
209 {
210 struct sock *sk = chan->sk;
211
212 lock_sock(sk);
213 __l2cap_state_change(chan, state);
214 release_sock(sk);
215 }
216
217 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
218 {
219 struct sock *sk = chan->sk;
220
221 sk->sk_err = err;
222 }
223
224 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
225 {
226 struct sock *sk = chan->sk;
227
228 lock_sock(sk);
229 __l2cap_chan_set_err(chan, err);
230 release_sock(sk);
231 }
232
233 static void __set_retrans_timer(struct l2cap_chan *chan)
234 {
235 if (!delayed_work_pending(&chan->monitor_timer) &&
236 chan->retrans_timeout) {
237 l2cap_set_timer(chan, &chan->retrans_timer,
238 msecs_to_jiffies(chan->retrans_timeout));
239 }
240 }
241
242 static void __set_monitor_timer(struct l2cap_chan *chan)
243 {
244 __clear_retrans_timer(chan);
245 if (chan->monitor_timeout) {
246 l2cap_set_timer(chan, &chan->monitor_timer,
247 msecs_to_jiffies(chan->monitor_timeout));
248 }
249 }
250
251 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
252 u16 seq)
253 {
254 struct sk_buff *skb;
255
256 skb_queue_walk(head, skb) {
257 if (bt_cb(skb)->control.txseq == seq)
258 return skb;
259 }
260
261 return NULL;
262 }
263
264 /* ---- L2CAP sequence number lists ---- */
265
266 /* For ERTM, ordered lists of sequence numbers must be tracked for
267 * SREJ requests that are received and for frames that are to be
268 * retransmitted. These seq_list functions implement a singly-linked
269 * list in an array, where membership in the list can also be checked
270 * in constant time. Items can also be added to the tail of the list
271 * and removed from the head in constant time, without further memory
272 * allocs or frees.
273 */
274
275 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
276 {
277 size_t alloc_size, i;
278
279 /* Allocated size is a power of 2 to map sequence numbers
280 * (which may be up to 14 bits) in to a smaller array that is
281 * sized for the negotiated ERTM transmit windows.
282 */
283 alloc_size = roundup_pow_of_two(size);
284
285 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
286 if (!seq_list->list)
287 return -ENOMEM;
288
289 seq_list->mask = alloc_size - 1;
290 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
291 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
292 for (i = 0; i < alloc_size; i++)
293 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
294
295 return 0;
296 }
297
298 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
299 {
300 kfree(seq_list->list);
301 }
302
303 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
304 u16 seq)
305 {
306 /* Constant-time check for list membership */
307 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
308 }
309
310 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
311 {
312 u16 mask = seq_list->mask;
313
314 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
315 /* In case someone tries to pop the head of an empty list */
316 return L2CAP_SEQ_LIST_CLEAR;
317 } else if (seq_list->head == seq) {
318 /* Head can be removed in constant time */
319 seq_list->head = seq_list->list[seq & mask];
320 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
321
322 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
323 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
324 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
325 }
326 } else {
327 /* Walk the list to find the sequence number */
328 u16 prev = seq_list->head;
329 while (seq_list->list[prev & mask] != seq) {
330 prev = seq_list->list[prev & mask];
331 if (prev == L2CAP_SEQ_LIST_TAIL)
332 return L2CAP_SEQ_LIST_CLEAR;
333 }
334
335 /* Unlink the number from the list and clear it */
336 seq_list->list[prev & mask] = seq_list->list[seq & mask];
337 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
338 if (seq_list->tail == seq)
339 seq_list->tail = prev;
340 }
341 return seq;
342 }
343
344 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
345 {
346 /* Remove the head in constant time */
347 return l2cap_seq_list_remove(seq_list, seq_list->head);
348 }
349
350 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
351 {
352 u16 i;
353
354 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
355 return;
356
357 for (i = 0; i <= seq_list->mask; i++)
358 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
359
360 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
361 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
362 }
363
364 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
365 {
366 u16 mask = seq_list->mask;
367
368 /* All appends happen in constant time */
369
370 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
371 return;
372
373 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
374 seq_list->head = seq;
375 else
376 seq_list->list[seq_list->tail & mask] = seq;
377
378 seq_list->tail = seq;
379 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
380 }
381
382 static void l2cap_chan_timeout(struct work_struct *work)
383 {
384 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
385 chan_timer.work);
386 struct l2cap_conn *conn = chan->conn;
387 int reason;
388
389 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
390
391 mutex_lock(&conn->chan_lock);
392 l2cap_chan_lock(chan);
393
394 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
395 reason = ECONNREFUSED;
396 else if (chan->state == BT_CONNECT &&
397 chan->sec_level != BT_SECURITY_SDP)
398 reason = ECONNREFUSED;
399 else
400 reason = ETIMEDOUT;
401
402 l2cap_chan_close(chan, reason);
403
404 l2cap_chan_unlock(chan);
405
406 chan->ops->close(chan);
407 mutex_unlock(&conn->chan_lock);
408
409 l2cap_chan_put(chan);
410 }
411
412 struct l2cap_chan *l2cap_chan_create(void)
413 {
414 struct l2cap_chan *chan;
415
416 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
417 if (!chan)
418 return NULL;
419
420 mutex_init(&chan->lock);
421
422 write_lock(&chan_list_lock);
423 list_add(&chan->global_l, &chan_list);
424 write_unlock(&chan_list_lock);
425
426 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
427
428 chan->state = BT_OPEN;
429
430 kref_init(&chan->kref);
431
432 /* This flag is cleared in l2cap_chan_ready() */
433 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
434
435 BT_DBG("chan %p", chan);
436
437 return chan;
438 }
439
440 static void l2cap_chan_destroy(struct kref *kref)
441 {
442 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
443
444 BT_DBG("chan %p", chan);
445
446 write_lock(&chan_list_lock);
447 list_del(&chan->global_l);
448 write_unlock(&chan_list_lock);
449
450 kfree(chan);
451 }
452
453 void l2cap_chan_hold(struct l2cap_chan *c)
454 {
455 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
456
457 kref_get(&c->kref);
458 }
459
460 void l2cap_chan_put(struct l2cap_chan *c)
461 {
462 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
463
464 kref_put(&c->kref, l2cap_chan_destroy);
465 }
466
467 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
468 {
469 chan->fcs = L2CAP_FCS_CRC16;
470 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
471 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
472 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
473 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
474 chan->sec_level = BT_SECURITY_LOW;
475
476 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
477 }
478
479 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
480 {
481 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
482 __le16_to_cpu(chan->psm), chan->dcid);
483
484 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
485
486 chan->conn = conn;
487
488 switch (chan->chan_type) {
489 case L2CAP_CHAN_CONN_ORIENTED:
490 if (conn->hcon->type == LE_LINK) {
491 /* LE connection */
492 chan->omtu = L2CAP_DEFAULT_MTU;
493 chan->scid = L2CAP_CID_LE_DATA;
494 chan->dcid = L2CAP_CID_LE_DATA;
495 } else {
496 /* Alloc CID for connection-oriented socket */
497 chan->scid = l2cap_alloc_cid(conn);
498 chan->omtu = L2CAP_DEFAULT_MTU;
499 }
500 break;
501
502 case L2CAP_CHAN_CONN_LESS:
503 /* Connectionless socket */
504 chan->scid = L2CAP_CID_CONN_LESS;
505 chan->dcid = L2CAP_CID_CONN_LESS;
506 chan->omtu = L2CAP_DEFAULT_MTU;
507 break;
508
509 case L2CAP_CHAN_CONN_FIX_A2MP:
510 chan->scid = L2CAP_CID_A2MP;
511 chan->dcid = L2CAP_CID_A2MP;
512 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
513 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
514 break;
515
516 default:
517 /* Raw socket can send/recv signalling messages only */
518 chan->scid = L2CAP_CID_SIGNALING;
519 chan->dcid = L2CAP_CID_SIGNALING;
520 chan->omtu = L2CAP_DEFAULT_MTU;
521 }
522
523 chan->local_id = L2CAP_BESTEFFORT_ID;
524 chan->local_stype = L2CAP_SERV_BESTEFFORT;
525 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
526 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
527 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
528 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
529
530 l2cap_chan_hold(chan);
531
532 list_add(&chan->list, &conn->chan_l);
533 }
534
535 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
536 {
537 mutex_lock(&conn->chan_lock);
538 __l2cap_chan_add(conn, chan);
539 mutex_unlock(&conn->chan_lock);
540 }
541
542 void l2cap_chan_del(struct l2cap_chan *chan, int err)
543 {
544 struct l2cap_conn *conn = chan->conn;
545
546 __clear_chan_timer(chan);
547
548 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
549
550 if (conn) {
551 struct amp_mgr *mgr = conn->hcon->amp_mgr;
552 /* Delete from channel list */
553 list_del(&chan->list);
554
555 l2cap_chan_put(chan);
556
557 chan->conn = NULL;
558
559 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
560 hci_conn_put(conn->hcon);
561
562 if (mgr && mgr->bredr_chan == chan)
563 mgr->bredr_chan = NULL;
564 }
565
566 chan->ops->teardown(chan, err);
567
568 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
569 return;
570
571 switch(chan->mode) {
572 case L2CAP_MODE_BASIC:
573 break;
574
575 case L2CAP_MODE_ERTM:
576 __clear_retrans_timer(chan);
577 __clear_monitor_timer(chan);
578 __clear_ack_timer(chan);
579
580 skb_queue_purge(&chan->srej_q);
581
582 l2cap_seq_list_free(&chan->srej_list);
583 l2cap_seq_list_free(&chan->retrans_list);
584
585 /* fall through */
586
587 case L2CAP_MODE_STREAMING:
588 skb_queue_purge(&chan->tx_q);
589 break;
590 }
591
592 return;
593 }
594
595 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
596 {
597 struct l2cap_conn *conn = chan->conn;
598 struct sock *sk = chan->sk;
599
600 BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
601 sk);
602
603 switch (chan->state) {
604 case BT_LISTEN:
605 chan->ops->teardown(chan, 0);
606 break;
607
608 case BT_CONNECTED:
609 case BT_CONFIG:
610 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
611 conn->hcon->type == ACL_LINK) {
612 __set_chan_timer(chan, sk->sk_sndtimeo);
613 l2cap_send_disconn_req(conn, chan, reason);
614 } else
615 l2cap_chan_del(chan, reason);
616 break;
617
618 case BT_CONNECT2:
619 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
620 conn->hcon->type == ACL_LINK) {
621 struct l2cap_conn_rsp rsp;
622 __u16 result;
623
624 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
625 result = L2CAP_CR_SEC_BLOCK;
626 else
627 result = L2CAP_CR_BAD_PSM;
628 l2cap_state_change(chan, BT_DISCONN);
629
630 rsp.scid = cpu_to_le16(chan->dcid);
631 rsp.dcid = cpu_to_le16(chan->scid);
632 rsp.result = cpu_to_le16(result);
633 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
634 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
635 sizeof(rsp), &rsp);
636 }
637
638 l2cap_chan_del(chan, reason);
639 break;
640
641 case BT_CONNECT:
642 case BT_DISCONN:
643 l2cap_chan_del(chan, reason);
644 break;
645
646 default:
647 chan->ops->teardown(chan, 0);
648 break;
649 }
650 }
651
652 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
653 {
654 if (chan->chan_type == L2CAP_CHAN_RAW) {
655 switch (chan->sec_level) {
656 case BT_SECURITY_HIGH:
657 return HCI_AT_DEDICATED_BONDING_MITM;
658 case BT_SECURITY_MEDIUM:
659 return HCI_AT_DEDICATED_BONDING;
660 default:
661 return HCI_AT_NO_BONDING;
662 }
663 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
664 if (chan->sec_level == BT_SECURITY_LOW)
665 chan->sec_level = BT_SECURITY_SDP;
666
667 if (chan->sec_level == BT_SECURITY_HIGH)
668 return HCI_AT_NO_BONDING_MITM;
669 else
670 return HCI_AT_NO_BONDING;
671 } else {
672 switch (chan->sec_level) {
673 case BT_SECURITY_HIGH:
674 return HCI_AT_GENERAL_BONDING_MITM;
675 case BT_SECURITY_MEDIUM:
676 return HCI_AT_GENERAL_BONDING;
677 default:
678 return HCI_AT_NO_BONDING;
679 }
680 }
681 }
682
683 /* Service level security */
684 int l2cap_chan_check_security(struct l2cap_chan *chan)
685 {
686 struct l2cap_conn *conn = chan->conn;
687 __u8 auth_type;
688
689 auth_type = l2cap_get_auth_type(chan);
690
691 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
692 }
693
694 static u8 l2cap_get_ident(struct l2cap_conn *conn)
695 {
696 u8 id;
697
698 /* Get next available identificator.
699 * 1 - 128 are used by kernel.
700 * 129 - 199 are reserved.
701 * 200 - 254 are used by utilities like l2ping, etc.
702 */
703
704 spin_lock(&conn->lock);
705
706 if (++conn->tx_ident > 128)
707 conn->tx_ident = 1;
708
709 id = conn->tx_ident;
710
711 spin_unlock(&conn->lock);
712
713 return id;
714 }
715
716 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
717 void *data)
718 {
719 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
720 u8 flags;
721
722 BT_DBG("code 0x%2.2x", code);
723
724 if (!skb)
725 return;
726
727 if (lmp_no_flush_capable(conn->hcon->hdev))
728 flags = ACL_START_NO_FLUSH;
729 else
730 flags = ACL_START;
731
732 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
733 skb->priority = HCI_PRIO_MAX;
734
735 hci_send_acl(conn->hchan, skb, flags);
736 }
737
738 static bool __chan_is_moving(struct l2cap_chan *chan)
739 {
740 return chan->move_state != L2CAP_MOVE_STABLE &&
741 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
742 }
743
744 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
745 {
746 struct hci_conn *hcon = chan->conn->hcon;
747 u16 flags;
748
749 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
750 skb->priority);
751
752 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
753 lmp_no_flush_capable(hcon->hdev))
754 flags = ACL_START_NO_FLUSH;
755 else
756 flags = ACL_START;
757
758 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
759 hci_send_acl(chan->conn->hchan, skb, flags);
760 }
761
762 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
763 {
764 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
765 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
766
767 if (enh & L2CAP_CTRL_FRAME_TYPE) {
768 /* S-Frame */
769 control->sframe = 1;
770 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
771 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
772
773 control->sar = 0;
774 control->txseq = 0;
775 } else {
776 /* I-Frame */
777 control->sframe = 0;
778 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
779 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
780
781 control->poll = 0;
782 control->super = 0;
783 }
784 }
785
786 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
787 {
788 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
789 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
790
791 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
792 /* S-Frame */
793 control->sframe = 1;
794 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
795 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
796
797 control->sar = 0;
798 control->txseq = 0;
799 } else {
800 /* I-Frame */
801 control->sframe = 0;
802 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
803 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
804
805 control->poll = 0;
806 control->super = 0;
807 }
808 }
809
810 static inline void __unpack_control(struct l2cap_chan *chan,
811 struct sk_buff *skb)
812 {
813 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
814 __unpack_extended_control(get_unaligned_le32(skb->data),
815 &bt_cb(skb)->control);
816 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
817 } else {
818 __unpack_enhanced_control(get_unaligned_le16(skb->data),
819 &bt_cb(skb)->control);
820 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
821 }
822 }
823
824 static u32 __pack_extended_control(struct l2cap_ctrl *control)
825 {
826 u32 packed;
827
828 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
829 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
830
831 if (control->sframe) {
832 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
833 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
834 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
835 } else {
836 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
837 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
838 }
839
840 return packed;
841 }
842
843 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
844 {
845 u16 packed;
846
847 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
848 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
849
850 if (control->sframe) {
851 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
852 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
853 packed |= L2CAP_CTRL_FRAME_TYPE;
854 } else {
855 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
856 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
857 }
858
859 return packed;
860 }
861
862 static inline void __pack_control(struct l2cap_chan *chan,
863 struct l2cap_ctrl *control,
864 struct sk_buff *skb)
865 {
866 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
867 put_unaligned_le32(__pack_extended_control(control),
868 skb->data + L2CAP_HDR_SIZE);
869 } else {
870 put_unaligned_le16(__pack_enhanced_control(control),
871 skb->data + L2CAP_HDR_SIZE);
872 }
873 }
874
875 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
876 {
877 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
878 return L2CAP_EXT_HDR_SIZE;
879 else
880 return L2CAP_ENH_HDR_SIZE;
881 }
882
883 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
884 u32 control)
885 {
886 struct sk_buff *skb;
887 struct l2cap_hdr *lh;
888 int hlen = __ertm_hdr_size(chan);
889
890 if (chan->fcs == L2CAP_FCS_CRC16)
891 hlen += L2CAP_FCS_SIZE;
892
893 skb = bt_skb_alloc(hlen, GFP_KERNEL);
894
895 if (!skb)
896 return ERR_PTR(-ENOMEM);
897
898 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
899 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
900 lh->cid = cpu_to_le16(chan->dcid);
901
902 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
903 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
904 else
905 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
906
907 if (chan->fcs == L2CAP_FCS_CRC16) {
908 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
909 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
910 }
911
912 skb->priority = HCI_PRIO_MAX;
913 return skb;
914 }
915
916 static void l2cap_send_sframe(struct l2cap_chan *chan,
917 struct l2cap_ctrl *control)
918 {
919 struct sk_buff *skb;
920 u32 control_field;
921
922 BT_DBG("chan %p, control %p", chan, control);
923
924 if (!control->sframe)
925 return;
926
927 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
928 !control->poll)
929 control->final = 1;
930
931 if (control->super == L2CAP_SUPER_RR)
932 clear_bit(CONN_RNR_SENT, &chan->conn_state);
933 else if (control->super == L2CAP_SUPER_RNR)
934 set_bit(CONN_RNR_SENT, &chan->conn_state);
935
936 if (control->super != L2CAP_SUPER_SREJ) {
937 chan->last_acked_seq = control->reqseq;
938 __clear_ack_timer(chan);
939 }
940
941 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
942 control->final, control->poll, control->super);
943
944 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
945 control_field = __pack_extended_control(control);
946 else
947 control_field = __pack_enhanced_control(control);
948
949 skb = l2cap_create_sframe_pdu(chan, control_field);
950 if (!IS_ERR(skb))
951 l2cap_do_send(chan, skb);
952 }
953
954 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
955 {
956 struct l2cap_ctrl control;
957
958 BT_DBG("chan %p, poll %d", chan, poll);
959
960 memset(&control, 0, sizeof(control));
961 control.sframe = 1;
962 control.poll = poll;
963
964 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
965 control.super = L2CAP_SUPER_RNR;
966 else
967 control.super = L2CAP_SUPER_RR;
968
969 control.reqseq = chan->buffer_seq;
970 l2cap_send_sframe(chan, &control);
971 }
972
973 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
974 {
975 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
976 }
977
978 static bool __amp_capable(struct l2cap_chan *chan)
979 {
980 struct l2cap_conn *conn = chan->conn;
981
982 if (enable_hs &&
983 chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
984 conn->fixed_chan_mask & L2CAP_FC_A2MP)
985 return true;
986 else
987 return false;
988 }
989
990 void l2cap_send_conn_req(struct l2cap_chan *chan)
991 {
992 struct l2cap_conn *conn = chan->conn;
993 struct l2cap_conn_req req;
994
995 req.scid = cpu_to_le16(chan->scid);
996 req.psm = chan->psm;
997
998 chan->ident = l2cap_get_ident(conn);
999
1000 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1001
1002 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1003 }
1004
1005 static void l2cap_move_setup(struct l2cap_chan *chan)
1006 {
1007 struct sk_buff *skb;
1008
1009 BT_DBG("chan %p", chan);
1010
1011 if (chan->mode != L2CAP_MODE_ERTM)
1012 return;
1013
1014 __clear_retrans_timer(chan);
1015 __clear_monitor_timer(chan);
1016 __clear_ack_timer(chan);
1017
1018 chan->retry_count = 0;
1019 skb_queue_walk(&chan->tx_q, skb) {
1020 if (bt_cb(skb)->control.retries)
1021 bt_cb(skb)->control.retries = 1;
1022 else
1023 break;
1024 }
1025
1026 chan->expected_tx_seq = chan->buffer_seq;
1027
1028 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1029 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1030 l2cap_seq_list_clear(&chan->retrans_list);
1031 l2cap_seq_list_clear(&chan->srej_list);
1032 skb_queue_purge(&chan->srej_q);
1033
1034 chan->tx_state = L2CAP_TX_STATE_XMIT;
1035 chan->rx_state = L2CAP_RX_STATE_MOVE;
1036
1037 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1038 }
1039
1040 static void l2cap_chan_ready(struct l2cap_chan *chan)
1041 {
1042 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1043 chan->conf_state = 0;
1044 __clear_chan_timer(chan);
1045
1046 chan->state = BT_CONNECTED;
1047
1048 chan->ops->ready(chan);
1049 }
1050
1051 static void l2cap_start_connection(struct l2cap_chan *chan)
1052 {
1053 if (__amp_capable(chan)) {
1054 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1055 a2mp_discover_amp(chan);
1056 } else {
1057 l2cap_send_conn_req(chan);
1058 }
1059 }
1060
1061 static void l2cap_do_start(struct l2cap_chan *chan)
1062 {
1063 struct l2cap_conn *conn = chan->conn;
1064
1065 if (conn->hcon->type == LE_LINK) {
1066 l2cap_chan_ready(chan);
1067 return;
1068 }
1069
1070 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1071 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1072 return;
1073
1074 if (l2cap_chan_check_security(chan) &&
1075 __l2cap_no_conn_pending(chan)) {
1076 l2cap_start_connection(chan);
1077 }
1078 } else {
1079 struct l2cap_info_req req;
1080 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1081
1082 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1083 conn->info_ident = l2cap_get_ident(conn);
1084
1085 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1086
1087 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1088 sizeof(req), &req);
1089 }
1090 }
1091
1092 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1093 {
1094 u32 local_feat_mask = l2cap_feat_mask;
1095 if (!disable_ertm)
1096 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1097
1098 switch (mode) {
1099 case L2CAP_MODE_ERTM:
1100 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1101 case L2CAP_MODE_STREAMING:
1102 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1103 default:
1104 return 0x00;
1105 }
1106 }
1107
1108 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
1109 struct l2cap_chan *chan, int err)
1110 {
1111 struct sock *sk = chan->sk;
1112 struct l2cap_disconn_req req;
1113
1114 if (!conn)
1115 return;
1116
1117 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1118 __clear_retrans_timer(chan);
1119 __clear_monitor_timer(chan);
1120 __clear_ack_timer(chan);
1121 }
1122
1123 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1124 l2cap_state_change(chan, BT_DISCONN);
1125 return;
1126 }
1127
1128 req.dcid = cpu_to_le16(chan->dcid);
1129 req.scid = cpu_to_le16(chan->scid);
1130 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1131 sizeof(req), &req);
1132
1133 lock_sock(sk);
1134 __l2cap_state_change(chan, BT_DISCONN);
1135 __l2cap_chan_set_err(chan, err);
1136 release_sock(sk);
1137 }
1138
1139 /* ---- L2CAP connections ---- */
1140 static void l2cap_conn_start(struct l2cap_conn *conn)
1141 {
1142 struct l2cap_chan *chan, *tmp;
1143
1144 BT_DBG("conn %p", conn);
1145
1146 mutex_lock(&conn->chan_lock);
1147
1148 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1149 struct sock *sk = chan->sk;
1150
1151 l2cap_chan_lock(chan);
1152
1153 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1154 l2cap_chan_unlock(chan);
1155 continue;
1156 }
1157
1158 if (chan->state == BT_CONNECT) {
1159 if (!l2cap_chan_check_security(chan) ||
1160 !__l2cap_no_conn_pending(chan)) {
1161 l2cap_chan_unlock(chan);
1162 continue;
1163 }
1164
1165 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1166 && test_bit(CONF_STATE2_DEVICE,
1167 &chan->conf_state)) {
1168 l2cap_chan_close(chan, ECONNRESET);
1169 l2cap_chan_unlock(chan);
1170 continue;
1171 }
1172
1173 l2cap_start_connection(chan);
1174
1175 } else if (chan->state == BT_CONNECT2) {
1176 struct l2cap_conn_rsp rsp;
1177 char buf[128];
1178 rsp.scid = cpu_to_le16(chan->dcid);
1179 rsp.dcid = cpu_to_le16(chan->scid);
1180
1181 if (l2cap_chan_check_security(chan)) {
1182 lock_sock(sk);
1183 if (test_bit(BT_SK_DEFER_SETUP,
1184 &bt_sk(sk)->flags)) {
1185 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1186 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1187 chan->ops->defer(chan);
1188
1189 } else {
1190 __l2cap_state_change(chan, BT_CONFIG);
1191 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1192 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1193 }
1194 release_sock(sk);
1195 } else {
1196 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1197 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1198 }
1199
1200 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1201 sizeof(rsp), &rsp);
1202
1203 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1204 rsp.result != L2CAP_CR_SUCCESS) {
1205 l2cap_chan_unlock(chan);
1206 continue;
1207 }
1208
1209 set_bit(CONF_REQ_SENT, &chan->conf_state);
1210 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1211 l2cap_build_conf_req(chan, buf), buf);
1212 chan->num_conf_req++;
1213 }
1214
1215 l2cap_chan_unlock(chan);
1216 }
1217
1218 mutex_unlock(&conn->chan_lock);
1219 }
1220
1221 /* Find socket with cid and source/destination bdaddr.
1222 * Returns closest match, locked.
1223 */
1224 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1225 bdaddr_t *src,
1226 bdaddr_t *dst)
1227 {
1228 struct l2cap_chan *c, *c1 = NULL;
1229
1230 read_lock(&chan_list_lock);
1231
1232 list_for_each_entry(c, &chan_list, global_l) {
1233 struct sock *sk = c->sk;
1234
1235 if (state && c->state != state)
1236 continue;
1237
1238 if (c->scid == cid) {
1239 int src_match, dst_match;
1240 int src_any, dst_any;
1241
1242 /* Exact match. */
1243 src_match = !bacmp(&bt_sk(sk)->src, src);
1244 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1245 if (src_match && dst_match) {
1246 read_unlock(&chan_list_lock);
1247 return c;
1248 }
1249
1250 /* Closest match */
1251 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1252 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1253 if ((src_match && dst_any) || (src_any && dst_match) ||
1254 (src_any && dst_any))
1255 c1 = c;
1256 }
1257 }
1258
1259 read_unlock(&chan_list_lock);
1260
1261 return c1;
1262 }
1263
1264 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1265 {
1266 struct sock *parent, *sk;
1267 struct l2cap_chan *chan, *pchan;
1268
1269 BT_DBG("");
1270
1271 /* Check if we have socket listening on cid */
1272 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1273 conn->src, conn->dst);
1274 if (!pchan)
1275 return;
1276
1277 parent = pchan->sk;
1278
1279 lock_sock(parent);
1280
1281 chan = pchan->ops->new_connection(pchan);
1282 if (!chan)
1283 goto clean;
1284
1285 sk = chan->sk;
1286
1287 hci_conn_hold(conn->hcon);
1288 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
1289
1290 bacpy(&bt_sk(sk)->src, conn->src);
1291 bacpy(&bt_sk(sk)->dst, conn->dst);
1292
1293 l2cap_chan_add(conn, chan);
1294
1295 l2cap_chan_ready(chan);
1296
1297 clean:
1298 release_sock(parent);
1299 }
1300
1301 static void l2cap_conn_ready(struct l2cap_conn *conn)
1302 {
1303 struct l2cap_chan *chan;
1304 struct hci_conn *hcon = conn->hcon;
1305
1306 BT_DBG("conn %p", conn);
1307
1308 if (!hcon->out && hcon->type == LE_LINK)
1309 l2cap_le_conn_ready(conn);
1310
1311 if (hcon->out && hcon->type == LE_LINK)
1312 smp_conn_security(hcon, hcon->pending_sec_level);
1313
1314 mutex_lock(&conn->chan_lock);
1315
1316 list_for_each_entry(chan, &conn->chan_l, list) {
1317
1318 l2cap_chan_lock(chan);
1319
1320 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1321 l2cap_chan_unlock(chan);
1322 continue;
1323 }
1324
1325 if (hcon->type == LE_LINK) {
1326 if (smp_conn_security(hcon, chan->sec_level))
1327 l2cap_chan_ready(chan);
1328
1329 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1330 struct sock *sk = chan->sk;
1331 __clear_chan_timer(chan);
1332 lock_sock(sk);
1333 __l2cap_state_change(chan, BT_CONNECTED);
1334 sk->sk_state_change(sk);
1335 release_sock(sk);
1336
1337 } else if (chan->state == BT_CONNECT)
1338 l2cap_do_start(chan);
1339
1340 l2cap_chan_unlock(chan);
1341 }
1342
1343 mutex_unlock(&conn->chan_lock);
1344 }
1345
1346 /* Notify sockets that we cannot guaranty reliability anymore */
1347 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1348 {
1349 struct l2cap_chan *chan;
1350
1351 BT_DBG("conn %p", conn);
1352
1353 mutex_lock(&conn->chan_lock);
1354
1355 list_for_each_entry(chan, &conn->chan_l, list) {
1356 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1357 l2cap_chan_set_err(chan, err);
1358 }
1359
1360 mutex_unlock(&conn->chan_lock);
1361 }
1362
1363 static void l2cap_info_timeout(struct work_struct *work)
1364 {
1365 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1366 info_timer.work);
1367
1368 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1369 conn->info_ident = 0;
1370
1371 l2cap_conn_start(conn);
1372 }
1373
1374 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1375 {
1376 struct l2cap_conn *conn = hcon->l2cap_data;
1377 struct l2cap_chan *chan, *l;
1378
1379 if (!conn)
1380 return;
1381
1382 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1383
1384 kfree_skb(conn->rx_skb);
1385
1386 mutex_lock(&conn->chan_lock);
1387
1388 /* Kill channels */
1389 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1390 l2cap_chan_hold(chan);
1391 l2cap_chan_lock(chan);
1392
1393 l2cap_chan_del(chan, err);
1394
1395 l2cap_chan_unlock(chan);
1396
1397 chan->ops->close(chan);
1398 l2cap_chan_put(chan);
1399 }
1400
1401 mutex_unlock(&conn->chan_lock);
1402
1403 hci_chan_del(conn->hchan);
1404
1405 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1406 cancel_delayed_work_sync(&conn->info_timer);
1407
1408 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1409 cancel_delayed_work_sync(&conn->security_timer);
1410 smp_chan_destroy(conn);
1411 }
1412
1413 hcon->l2cap_data = NULL;
1414 kfree(conn);
1415 }
1416
1417 static void security_timeout(struct work_struct *work)
1418 {
1419 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1420 security_timer.work);
1421
1422 BT_DBG("conn %p", conn);
1423
1424 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1425 smp_chan_destroy(conn);
1426 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1427 }
1428 }
1429
1430 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1431 {
1432 struct l2cap_conn *conn = hcon->l2cap_data;
1433 struct hci_chan *hchan;
1434
1435 if (conn || status)
1436 return conn;
1437
1438 hchan = hci_chan_create(hcon);
1439 if (!hchan)
1440 return NULL;
1441
1442 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1443 if (!conn) {
1444 hci_chan_del(hchan);
1445 return NULL;
1446 }
1447
1448 hcon->l2cap_data = conn;
1449 conn->hcon = hcon;
1450 conn->hchan = hchan;
1451
1452 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1453
1454 switch (hcon->type) {
1455 case AMP_LINK:
1456 conn->mtu = hcon->hdev->block_mtu;
1457 break;
1458
1459 case LE_LINK:
1460 if (hcon->hdev->le_mtu) {
1461 conn->mtu = hcon->hdev->le_mtu;
1462 break;
1463 }
1464 /* fall through */
1465
1466 default:
1467 conn->mtu = hcon->hdev->acl_mtu;
1468 break;
1469 }
1470
1471 conn->src = &hcon->hdev->bdaddr;
1472 conn->dst = &hcon->dst;
1473
1474 conn->feat_mask = 0;
1475
1476 spin_lock_init(&conn->lock);
1477 mutex_init(&conn->chan_lock);
1478
1479 INIT_LIST_HEAD(&conn->chan_l);
1480
1481 if (hcon->type == LE_LINK)
1482 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1483 else
1484 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1485
1486 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1487
1488 return conn;
1489 }
1490
1491 /* ---- Socket interface ---- */
1492
1493 /* Find socket with psm and source / destination bdaddr.
1494 * Returns closest match.
1495 */
1496 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1497 bdaddr_t *src,
1498 bdaddr_t *dst)
1499 {
1500 struct l2cap_chan *c, *c1 = NULL;
1501
1502 read_lock(&chan_list_lock);
1503
1504 list_for_each_entry(c, &chan_list, global_l) {
1505 struct sock *sk = c->sk;
1506
1507 if (state && c->state != state)
1508 continue;
1509
1510 if (c->psm == psm) {
1511 int src_match, dst_match;
1512 int src_any, dst_any;
1513
1514 /* Exact match. */
1515 src_match = !bacmp(&bt_sk(sk)->src, src);
1516 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1517 if (src_match && dst_match) {
1518 read_unlock(&chan_list_lock);
1519 return c;
1520 }
1521
1522 /* Closest match */
1523 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1524 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1525 if ((src_match && dst_any) || (src_any && dst_match) ||
1526 (src_any && dst_any))
1527 c1 = c;
1528 }
1529 }
1530
1531 read_unlock(&chan_list_lock);
1532
1533 return c1;
1534 }
1535
1536 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1537 bdaddr_t *dst, u8 dst_type)
1538 {
1539 struct sock *sk = chan->sk;
1540 bdaddr_t *src = &bt_sk(sk)->src;
1541 struct l2cap_conn *conn;
1542 struct hci_conn *hcon;
1543 struct hci_dev *hdev;
1544 __u8 auth_type;
1545 int err;
1546
1547 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1548 dst_type, __le16_to_cpu(psm));
1549
1550 hdev = hci_get_route(dst, src);
1551 if (!hdev)
1552 return -EHOSTUNREACH;
1553
1554 hci_dev_lock(hdev);
1555
1556 l2cap_chan_lock(chan);
1557
1558 /* PSM must be odd and lsb of upper byte must be 0 */
1559 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1560 chan->chan_type != L2CAP_CHAN_RAW) {
1561 err = -EINVAL;
1562 goto done;
1563 }
1564
1565 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1566 err = -EINVAL;
1567 goto done;
1568 }
1569
1570 switch (chan->mode) {
1571 case L2CAP_MODE_BASIC:
1572 break;
1573 case L2CAP_MODE_ERTM:
1574 case L2CAP_MODE_STREAMING:
1575 if (!disable_ertm)
1576 break;
1577 /* fall through */
1578 default:
1579 err = -ENOTSUPP;
1580 goto done;
1581 }
1582
1583 switch (chan->state) {
1584 case BT_CONNECT:
1585 case BT_CONNECT2:
1586 case BT_CONFIG:
1587 /* Already connecting */
1588 err = 0;
1589 goto done;
1590
1591 case BT_CONNECTED:
1592 /* Already connected */
1593 err = -EISCONN;
1594 goto done;
1595
1596 case BT_OPEN:
1597 case BT_BOUND:
1598 /* Can connect */
1599 break;
1600
1601 default:
1602 err = -EBADFD;
1603 goto done;
1604 }
1605
1606 /* Set destination address and psm */
1607 lock_sock(sk);
1608 bacpy(&bt_sk(sk)->dst, dst);
1609 release_sock(sk);
1610
1611 chan->psm = psm;
1612 chan->dcid = cid;
1613
1614 auth_type = l2cap_get_auth_type(chan);
1615
1616 if (chan->dcid == L2CAP_CID_LE_DATA)
1617 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1618 chan->sec_level, auth_type);
1619 else
1620 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1621 chan->sec_level, auth_type);
1622
1623 if (IS_ERR(hcon)) {
1624 err = PTR_ERR(hcon);
1625 goto done;
1626 }
1627
1628 conn = l2cap_conn_add(hcon, 0);
1629 if (!conn) {
1630 hci_conn_put(hcon);
1631 err = -ENOMEM;
1632 goto done;
1633 }
1634
1635 if (hcon->type == LE_LINK) {
1636 err = 0;
1637
1638 if (!list_empty(&conn->chan_l)) {
1639 err = -EBUSY;
1640 hci_conn_put(hcon);
1641 }
1642
1643 if (err)
1644 goto done;
1645 }
1646
1647 /* Update source addr of the socket */
1648 bacpy(src, conn->src);
1649
1650 l2cap_chan_unlock(chan);
1651 l2cap_chan_add(conn, chan);
1652 l2cap_chan_lock(chan);
1653
1654 l2cap_state_change(chan, BT_CONNECT);
1655 __set_chan_timer(chan, sk->sk_sndtimeo);
1656
1657 if (hcon->state == BT_CONNECTED) {
1658 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1659 __clear_chan_timer(chan);
1660 if (l2cap_chan_check_security(chan))
1661 l2cap_state_change(chan, BT_CONNECTED);
1662 } else
1663 l2cap_do_start(chan);
1664 }
1665
1666 err = 0;
1667
1668 done:
1669 l2cap_chan_unlock(chan);
1670 hci_dev_unlock(hdev);
1671 hci_dev_put(hdev);
1672 return err;
1673 }
1674
1675 int __l2cap_wait_ack(struct sock *sk)
1676 {
1677 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1678 DECLARE_WAITQUEUE(wait, current);
1679 int err = 0;
1680 int timeo = HZ/5;
1681
1682 add_wait_queue(sk_sleep(sk), &wait);
1683 set_current_state(TASK_INTERRUPTIBLE);
1684 while (chan->unacked_frames > 0 && chan->conn) {
1685 if (!timeo)
1686 timeo = HZ/5;
1687
1688 if (signal_pending(current)) {
1689 err = sock_intr_errno(timeo);
1690 break;
1691 }
1692
1693 release_sock(sk);
1694 timeo = schedule_timeout(timeo);
1695 lock_sock(sk);
1696 set_current_state(TASK_INTERRUPTIBLE);
1697
1698 err = sock_error(sk);
1699 if (err)
1700 break;
1701 }
1702 set_current_state(TASK_RUNNING);
1703 remove_wait_queue(sk_sleep(sk), &wait);
1704 return err;
1705 }
1706
1707 static void l2cap_monitor_timeout(struct work_struct *work)
1708 {
1709 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1710 monitor_timer.work);
1711
1712 BT_DBG("chan %p", chan);
1713
1714 l2cap_chan_lock(chan);
1715
1716 if (!chan->conn) {
1717 l2cap_chan_unlock(chan);
1718 l2cap_chan_put(chan);
1719 return;
1720 }
1721
1722 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1723
1724 l2cap_chan_unlock(chan);
1725 l2cap_chan_put(chan);
1726 }
1727
1728 static void l2cap_retrans_timeout(struct work_struct *work)
1729 {
1730 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1731 retrans_timer.work);
1732
1733 BT_DBG("chan %p", chan);
1734
1735 l2cap_chan_lock(chan);
1736
1737 if (!chan->conn) {
1738 l2cap_chan_unlock(chan);
1739 l2cap_chan_put(chan);
1740 return;
1741 }
1742
1743 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1744 l2cap_chan_unlock(chan);
1745 l2cap_chan_put(chan);
1746 }
1747
1748 static void l2cap_streaming_send(struct l2cap_chan *chan,
1749 struct sk_buff_head *skbs)
1750 {
1751 struct sk_buff *skb;
1752 struct l2cap_ctrl *control;
1753
1754 BT_DBG("chan %p, skbs %p", chan, skbs);
1755
1756 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1757
1758 while (!skb_queue_empty(&chan->tx_q)) {
1759
1760 skb = skb_dequeue(&chan->tx_q);
1761
1762 bt_cb(skb)->control.retries = 1;
1763 control = &bt_cb(skb)->control;
1764
1765 control->reqseq = 0;
1766 control->txseq = chan->next_tx_seq;
1767
1768 __pack_control(chan, control, skb);
1769
1770 if (chan->fcs == L2CAP_FCS_CRC16) {
1771 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1772 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1773 }
1774
1775 l2cap_do_send(chan, skb);
1776
1777 BT_DBG("Sent txseq %u", control->txseq);
1778
1779 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1780 chan->frames_sent++;
1781 }
1782 }
1783
1784 static int l2cap_ertm_send(struct l2cap_chan *chan)
1785 {
1786 struct sk_buff *skb, *tx_skb;
1787 struct l2cap_ctrl *control;
1788 int sent = 0;
1789
1790 BT_DBG("chan %p", chan);
1791
1792 if (chan->state != BT_CONNECTED)
1793 return -ENOTCONN;
1794
1795 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1796 return 0;
1797
1798 while (chan->tx_send_head &&
1799 chan->unacked_frames < chan->remote_tx_win &&
1800 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1801
1802 skb = chan->tx_send_head;
1803
1804 bt_cb(skb)->control.retries = 1;
1805 control = &bt_cb(skb)->control;
1806
1807 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1808 control->final = 1;
1809
1810 control->reqseq = chan->buffer_seq;
1811 chan->last_acked_seq = chan->buffer_seq;
1812 control->txseq = chan->next_tx_seq;
1813
1814 __pack_control(chan, control, skb);
1815
1816 if (chan->fcs == L2CAP_FCS_CRC16) {
1817 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1818 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1819 }
1820
1821 /* Clone after data has been modified. Data is assumed to be
1822 read-only (for locking purposes) on cloned sk_buffs.
1823 */
1824 tx_skb = skb_clone(skb, GFP_KERNEL);
1825
1826 if (!tx_skb)
1827 break;
1828
1829 __set_retrans_timer(chan);
1830
1831 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1832 chan->unacked_frames++;
1833 chan->frames_sent++;
1834 sent++;
1835
1836 if (skb_queue_is_last(&chan->tx_q, skb))
1837 chan->tx_send_head = NULL;
1838 else
1839 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1840
1841 l2cap_do_send(chan, tx_skb);
1842 BT_DBG("Sent txseq %u", control->txseq);
1843 }
1844
1845 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1846 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1847
1848 return sent;
1849 }
1850
1851 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1852 {
1853 struct l2cap_ctrl control;
1854 struct sk_buff *skb;
1855 struct sk_buff *tx_skb;
1856 u16 seq;
1857
1858 BT_DBG("chan %p", chan);
1859
1860 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1861 return;
1862
1863 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1864 seq = l2cap_seq_list_pop(&chan->retrans_list);
1865
1866 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1867 if (!skb) {
1868 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1869 seq);
1870 continue;
1871 }
1872
1873 bt_cb(skb)->control.retries++;
1874 control = bt_cb(skb)->control;
1875
1876 if (chan->max_tx != 0 &&
1877 bt_cb(skb)->control.retries > chan->max_tx) {
1878 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1879 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1880 l2cap_seq_list_clear(&chan->retrans_list);
1881 break;
1882 }
1883
1884 control.reqseq = chan->buffer_seq;
1885 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1886 control.final = 1;
1887 else
1888 control.final = 0;
1889
1890 if (skb_cloned(skb)) {
1891 /* Cloned sk_buffs are read-only, so we need a
1892 * writeable copy
1893 */
1894 tx_skb = skb_copy(skb, GFP_KERNEL);
1895 } else {
1896 tx_skb = skb_clone(skb, GFP_KERNEL);
1897 }
1898
1899 if (!tx_skb) {
1900 l2cap_seq_list_clear(&chan->retrans_list);
1901 break;
1902 }
1903
1904 /* Update skb contents */
1905 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1906 put_unaligned_le32(__pack_extended_control(&control),
1907 tx_skb->data + L2CAP_HDR_SIZE);
1908 } else {
1909 put_unaligned_le16(__pack_enhanced_control(&control),
1910 tx_skb->data + L2CAP_HDR_SIZE);
1911 }
1912
1913 if (chan->fcs == L2CAP_FCS_CRC16) {
1914 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1915 put_unaligned_le16(fcs, skb_put(tx_skb,
1916 L2CAP_FCS_SIZE));
1917 }
1918
1919 l2cap_do_send(chan, tx_skb);
1920
1921 BT_DBG("Resent txseq %d", control.txseq);
1922
1923 chan->last_acked_seq = chan->buffer_seq;
1924 }
1925 }
1926
1927 static void l2cap_retransmit(struct l2cap_chan *chan,
1928 struct l2cap_ctrl *control)
1929 {
1930 BT_DBG("chan %p, control %p", chan, control);
1931
1932 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1933 l2cap_ertm_resend(chan);
1934 }
1935
1936 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1937 struct l2cap_ctrl *control)
1938 {
1939 struct sk_buff *skb;
1940
1941 BT_DBG("chan %p, control %p", chan, control);
1942
1943 if (control->poll)
1944 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1945
1946 l2cap_seq_list_clear(&chan->retrans_list);
1947
1948 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1949 return;
1950
1951 if (chan->unacked_frames) {
1952 skb_queue_walk(&chan->tx_q, skb) {
1953 if (bt_cb(skb)->control.txseq == control->reqseq ||
1954 skb == chan->tx_send_head)
1955 break;
1956 }
1957
1958 skb_queue_walk_from(&chan->tx_q, skb) {
1959 if (skb == chan->tx_send_head)
1960 break;
1961
1962 l2cap_seq_list_append(&chan->retrans_list,
1963 bt_cb(skb)->control.txseq);
1964 }
1965
1966 l2cap_ertm_resend(chan);
1967 }
1968 }
1969
1970 static void l2cap_send_ack(struct l2cap_chan *chan)
1971 {
1972 struct l2cap_ctrl control;
1973 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1974 chan->last_acked_seq);
1975 int threshold;
1976
1977 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1978 chan, chan->last_acked_seq, chan->buffer_seq);
1979
1980 memset(&control, 0, sizeof(control));
1981 control.sframe = 1;
1982
1983 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1984 chan->rx_state == L2CAP_RX_STATE_RECV) {
1985 __clear_ack_timer(chan);
1986 control.super = L2CAP_SUPER_RNR;
1987 control.reqseq = chan->buffer_seq;
1988 l2cap_send_sframe(chan, &control);
1989 } else {
1990 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1991 l2cap_ertm_send(chan);
1992 /* If any i-frames were sent, they included an ack */
1993 if (chan->buffer_seq == chan->last_acked_seq)
1994 frames_to_ack = 0;
1995 }
1996
1997 /* Ack now if the window is 3/4ths full.
1998 * Calculate without mul or div
1999 */
2000 threshold = chan->ack_win;
2001 threshold += threshold << 1;
2002 threshold >>= 2;
2003
2004 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2005 threshold);
2006
2007 if (frames_to_ack >= threshold) {
2008 __clear_ack_timer(chan);
2009 control.super = L2CAP_SUPER_RR;
2010 control.reqseq = chan->buffer_seq;
2011 l2cap_send_sframe(chan, &control);
2012 frames_to_ack = 0;
2013 }
2014
2015 if (frames_to_ack)
2016 __set_ack_timer(chan);
2017 }
2018 }
2019
2020 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2021 struct msghdr *msg, int len,
2022 int count, struct sk_buff *skb)
2023 {
2024 struct l2cap_conn *conn = chan->conn;
2025 struct sk_buff **frag;
2026 int sent = 0;
2027
2028 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2029 return -EFAULT;
2030
2031 sent += count;
2032 len -= count;
2033
2034 /* Continuation fragments (no L2CAP header) */
2035 frag = &skb_shinfo(skb)->frag_list;
2036 while (len) {
2037 struct sk_buff *tmp;
2038
2039 count = min_t(unsigned int, conn->mtu, len);
2040
2041 tmp = chan->ops->alloc_skb(chan, count,
2042 msg->msg_flags & MSG_DONTWAIT);
2043 if (IS_ERR(tmp))
2044 return PTR_ERR(tmp);
2045
2046 *frag = tmp;
2047
2048 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2049 return -EFAULT;
2050
2051 (*frag)->priority = skb->priority;
2052
2053 sent += count;
2054 len -= count;
2055
2056 skb->len += (*frag)->len;
2057 skb->data_len += (*frag)->len;
2058
2059 frag = &(*frag)->next;
2060 }
2061
2062 return sent;
2063 }
2064
2065 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2066 struct msghdr *msg, size_t len,
2067 u32 priority)
2068 {
2069 struct l2cap_conn *conn = chan->conn;
2070 struct sk_buff *skb;
2071 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2072 struct l2cap_hdr *lh;
2073
2074 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
2075
2076 count = min_t(unsigned int, (conn->mtu - hlen), len);
2077
2078 skb = chan->ops->alloc_skb(chan, count + hlen,
2079 msg->msg_flags & MSG_DONTWAIT);
2080 if (IS_ERR(skb))
2081 return skb;
2082
2083 skb->priority = priority;
2084
2085 /* Create L2CAP header */
2086 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2087 lh->cid = cpu_to_le16(chan->dcid);
2088 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2089 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2090
2091 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2092 if (unlikely(err < 0)) {
2093 kfree_skb(skb);
2094 return ERR_PTR(err);
2095 }
2096 return skb;
2097 }
2098
2099 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2100 struct msghdr *msg, size_t len,
2101 u32 priority)
2102 {
2103 struct l2cap_conn *conn = chan->conn;
2104 struct sk_buff *skb;
2105 int err, count;
2106 struct l2cap_hdr *lh;
2107
2108 BT_DBG("chan %p len %zu", chan, len);
2109
2110 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2111
2112 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2113 msg->msg_flags & MSG_DONTWAIT);
2114 if (IS_ERR(skb))
2115 return skb;
2116
2117 skb->priority = priority;
2118
2119 /* Create L2CAP header */
2120 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2121 lh->cid = cpu_to_le16(chan->dcid);
2122 lh->len = cpu_to_le16(len);
2123
2124 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2125 if (unlikely(err < 0)) {
2126 kfree_skb(skb);
2127 return ERR_PTR(err);
2128 }
2129 return skb;
2130 }
2131
2132 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2133 struct msghdr *msg, size_t len,
2134 u16 sdulen)
2135 {
2136 struct l2cap_conn *conn = chan->conn;
2137 struct sk_buff *skb;
2138 int err, count, hlen;
2139 struct l2cap_hdr *lh;
2140
2141 BT_DBG("chan %p len %zu", chan, len);
2142
2143 if (!conn)
2144 return ERR_PTR(-ENOTCONN);
2145
2146 hlen = __ertm_hdr_size(chan);
2147
2148 if (sdulen)
2149 hlen += L2CAP_SDULEN_SIZE;
2150
2151 if (chan->fcs == L2CAP_FCS_CRC16)
2152 hlen += L2CAP_FCS_SIZE;
2153
2154 count = min_t(unsigned int, (conn->mtu - hlen), len);
2155
2156 skb = chan->ops->alloc_skb(chan, count + hlen,
2157 msg->msg_flags & MSG_DONTWAIT);
2158 if (IS_ERR(skb))
2159 return skb;
2160
2161 /* Create L2CAP header */
2162 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2163 lh->cid = cpu_to_le16(chan->dcid);
2164 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2165
2166 /* Control header is populated later */
2167 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2168 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2169 else
2170 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2171
2172 if (sdulen)
2173 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2174
2175 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2176 if (unlikely(err < 0)) {
2177 kfree_skb(skb);
2178 return ERR_PTR(err);
2179 }
2180
2181 bt_cb(skb)->control.fcs = chan->fcs;
2182 bt_cb(skb)->control.retries = 0;
2183 return skb;
2184 }
2185
2186 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2187 struct sk_buff_head *seg_queue,
2188 struct msghdr *msg, size_t len)
2189 {
2190 struct sk_buff *skb;
2191 u16 sdu_len;
2192 size_t pdu_len;
2193 u8 sar;
2194
2195 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2196
2197 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2198 * so fragmented skbs are not used. The HCI layer's handling
2199 * of fragmented skbs is not compatible with ERTM's queueing.
2200 */
2201
2202 /* PDU size is derived from the HCI MTU */
2203 pdu_len = chan->conn->mtu;
2204
2205 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2206
2207 /* Adjust for largest possible L2CAP overhead. */
2208 if (chan->fcs)
2209 pdu_len -= L2CAP_FCS_SIZE;
2210
2211 pdu_len -= __ertm_hdr_size(chan);
2212
2213 /* Remote device may have requested smaller PDUs */
2214 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2215
2216 if (len <= pdu_len) {
2217 sar = L2CAP_SAR_UNSEGMENTED;
2218 sdu_len = 0;
2219 pdu_len = len;
2220 } else {
2221 sar = L2CAP_SAR_START;
2222 sdu_len = len;
2223 pdu_len -= L2CAP_SDULEN_SIZE;
2224 }
2225
2226 while (len > 0) {
2227 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2228
2229 if (IS_ERR(skb)) {
2230 __skb_queue_purge(seg_queue);
2231 return PTR_ERR(skb);
2232 }
2233
2234 bt_cb(skb)->control.sar = sar;
2235 __skb_queue_tail(seg_queue, skb);
2236
2237 len -= pdu_len;
2238 if (sdu_len) {
2239 sdu_len = 0;
2240 pdu_len += L2CAP_SDULEN_SIZE;
2241 }
2242
2243 if (len <= pdu_len) {
2244 sar = L2CAP_SAR_END;
2245 pdu_len = len;
2246 } else {
2247 sar = L2CAP_SAR_CONTINUE;
2248 }
2249 }
2250
2251 return 0;
2252 }
2253
2254 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2255 u32 priority)
2256 {
2257 struct sk_buff *skb;
2258 int err;
2259 struct sk_buff_head seg_queue;
2260
2261 /* Connectionless channel */
2262 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2263 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2264 if (IS_ERR(skb))
2265 return PTR_ERR(skb);
2266
2267 l2cap_do_send(chan, skb);
2268 return len;
2269 }
2270
2271 switch (chan->mode) {
2272 case L2CAP_MODE_BASIC:
2273 /* Check outgoing MTU */
2274 if (len > chan->omtu)
2275 return -EMSGSIZE;
2276
2277 /* Create a basic PDU */
2278 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2279 if (IS_ERR(skb))
2280 return PTR_ERR(skb);
2281
2282 l2cap_do_send(chan, skb);
2283 err = len;
2284 break;
2285
2286 case L2CAP_MODE_ERTM:
2287 case L2CAP_MODE_STREAMING:
2288 /* Check outgoing MTU */
2289 if (len > chan->omtu) {
2290 err = -EMSGSIZE;
2291 break;
2292 }
2293
2294 __skb_queue_head_init(&seg_queue);
2295
2296 /* Do segmentation before calling in to the state machine,
2297 * since it's possible to block while waiting for memory
2298 * allocation.
2299 */
2300 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2301
2302 /* The channel could have been closed while segmenting,
2303 * check that it is still connected.
2304 */
2305 if (chan->state != BT_CONNECTED) {
2306 __skb_queue_purge(&seg_queue);
2307 err = -ENOTCONN;
2308 }
2309
2310 if (err)
2311 break;
2312
2313 if (chan->mode == L2CAP_MODE_ERTM)
2314 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2315 else
2316 l2cap_streaming_send(chan, &seg_queue);
2317
2318 err = len;
2319
2320 /* If the skbs were not queued for sending, they'll still be in
2321 * seg_queue and need to be purged.
2322 */
2323 __skb_queue_purge(&seg_queue);
2324 break;
2325
2326 default:
2327 BT_DBG("bad state %1.1x", chan->mode);
2328 err = -EBADFD;
2329 }
2330
2331 return err;
2332 }
2333
2334 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2335 {
2336 struct l2cap_ctrl control;
2337 u16 seq;
2338
2339 BT_DBG("chan %p, txseq %u", chan, txseq);
2340
2341 memset(&control, 0, sizeof(control));
2342 control.sframe = 1;
2343 control.super = L2CAP_SUPER_SREJ;
2344
2345 for (seq = chan->expected_tx_seq; seq != txseq;
2346 seq = __next_seq(chan, seq)) {
2347 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2348 control.reqseq = seq;
2349 l2cap_send_sframe(chan, &control);
2350 l2cap_seq_list_append(&chan->srej_list, seq);
2351 }
2352 }
2353
2354 chan->expected_tx_seq = __next_seq(chan, txseq);
2355 }
2356
2357 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2358 {
2359 struct l2cap_ctrl control;
2360
2361 BT_DBG("chan %p", chan);
2362
2363 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2364 return;
2365
2366 memset(&control, 0, sizeof(control));
2367 control.sframe = 1;
2368 control.super = L2CAP_SUPER_SREJ;
2369 control.reqseq = chan->srej_list.tail;
2370 l2cap_send_sframe(chan, &control);
2371 }
2372
2373 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2374 {
2375 struct l2cap_ctrl control;
2376 u16 initial_head;
2377 u16 seq;
2378
2379 BT_DBG("chan %p, txseq %u", chan, txseq);
2380
2381 memset(&control, 0, sizeof(control));
2382 control.sframe = 1;
2383 control.super = L2CAP_SUPER_SREJ;
2384
2385 /* Capture initial list head to allow only one pass through the list. */
2386 initial_head = chan->srej_list.head;
2387
2388 do {
2389 seq = l2cap_seq_list_pop(&chan->srej_list);
2390 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2391 break;
2392
2393 control.reqseq = seq;
2394 l2cap_send_sframe(chan, &control);
2395 l2cap_seq_list_append(&chan->srej_list, seq);
2396 } while (chan->srej_list.head != initial_head);
2397 }
2398
2399 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2400 {
2401 struct sk_buff *acked_skb;
2402 u16 ackseq;
2403
2404 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2405
2406 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2407 return;
2408
2409 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2410 chan->expected_ack_seq, chan->unacked_frames);
2411
2412 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2413 ackseq = __next_seq(chan, ackseq)) {
2414
2415 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2416 if (acked_skb) {
2417 skb_unlink(acked_skb, &chan->tx_q);
2418 kfree_skb(acked_skb);
2419 chan->unacked_frames--;
2420 }
2421 }
2422
2423 chan->expected_ack_seq = reqseq;
2424
2425 if (chan->unacked_frames == 0)
2426 __clear_retrans_timer(chan);
2427
2428 BT_DBG("unacked_frames %u", chan->unacked_frames);
2429 }
2430
2431 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2432 {
2433 BT_DBG("chan %p", chan);
2434
2435 chan->expected_tx_seq = chan->buffer_seq;
2436 l2cap_seq_list_clear(&chan->srej_list);
2437 skb_queue_purge(&chan->srej_q);
2438 chan->rx_state = L2CAP_RX_STATE_RECV;
2439 }
2440
2441 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2442 struct l2cap_ctrl *control,
2443 struct sk_buff_head *skbs, u8 event)
2444 {
2445 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2446 event);
2447
2448 switch (event) {
2449 case L2CAP_EV_DATA_REQUEST:
2450 if (chan->tx_send_head == NULL)
2451 chan->tx_send_head = skb_peek(skbs);
2452
2453 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2454 l2cap_ertm_send(chan);
2455 break;
2456 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2457 BT_DBG("Enter LOCAL_BUSY");
2458 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2459
2460 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2461 /* The SREJ_SENT state must be aborted if we are to
2462 * enter the LOCAL_BUSY state.
2463 */
2464 l2cap_abort_rx_srej_sent(chan);
2465 }
2466
2467 l2cap_send_ack(chan);
2468
2469 break;
2470 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2471 BT_DBG("Exit LOCAL_BUSY");
2472 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2473
2474 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2475 struct l2cap_ctrl local_control;
2476
2477 memset(&local_control, 0, sizeof(local_control));
2478 local_control.sframe = 1;
2479 local_control.super = L2CAP_SUPER_RR;
2480 local_control.poll = 1;
2481 local_control.reqseq = chan->buffer_seq;
2482 l2cap_send_sframe(chan, &local_control);
2483
2484 chan->retry_count = 1;
2485 __set_monitor_timer(chan);
2486 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2487 }
2488 break;
2489 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2490 l2cap_process_reqseq(chan, control->reqseq);
2491 break;
2492 case L2CAP_EV_EXPLICIT_POLL:
2493 l2cap_send_rr_or_rnr(chan, 1);
2494 chan->retry_count = 1;
2495 __set_monitor_timer(chan);
2496 __clear_ack_timer(chan);
2497 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2498 break;
2499 case L2CAP_EV_RETRANS_TO:
2500 l2cap_send_rr_or_rnr(chan, 1);
2501 chan->retry_count = 1;
2502 __set_monitor_timer(chan);
2503 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2504 break;
2505 case L2CAP_EV_RECV_FBIT:
2506 /* Nothing to process */
2507 break;
2508 default:
2509 break;
2510 }
2511 }
2512
2513 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2514 struct l2cap_ctrl *control,
2515 struct sk_buff_head *skbs, u8 event)
2516 {
2517 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2518 event);
2519
2520 switch (event) {
2521 case L2CAP_EV_DATA_REQUEST:
2522 if (chan->tx_send_head == NULL)
2523 chan->tx_send_head = skb_peek(skbs);
2524 /* Queue data, but don't send. */
2525 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2526 break;
2527 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2528 BT_DBG("Enter LOCAL_BUSY");
2529 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2530
2531 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2532 /* The SREJ_SENT state must be aborted if we are to
2533 * enter the LOCAL_BUSY state.
2534 */
2535 l2cap_abort_rx_srej_sent(chan);
2536 }
2537
2538 l2cap_send_ack(chan);
2539
2540 break;
2541 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2542 BT_DBG("Exit LOCAL_BUSY");
2543 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2544
2545 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2546 struct l2cap_ctrl local_control;
2547 memset(&local_control, 0, sizeof(local_control));
2548 local_control.sframe = 1;
2549 local_control.super = L2CAP_SUPER_RR;
2550 local_control.poll = 1;
2551 local_control.reqseq = chan->buffer_seq;
2552 l2cap_send_sframe(chan, &local_control);
2553
2554 chan->retry_count = 1;
2555 __set_monitor_timer(chan);
2556 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2557 }
2558 break;
2559 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2560 l2cap_process_reqseq(chan, control->reqseq);
2561
2562 /* Fall through */
2563
2564 case L2CAP_EV_RECV_FBIT:
2565 if (control && control->final) {
2566 __clear_monitor_timer(chan);
2567 if (chan->unacked_frames > 0)
2568 __set_retrans_timer(chan);
2569 chan->retry_count = 0;
2570 chan->tx_state = L2CAP_TX_STATE_XMIT;
2571 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2572 }
2573 break;
2574 case L2CAP_EV_EXPLICIT_POLL:
2575 /* Ignore */
2576 break;
2577 case L2CAP_EV_MONITOR_TO:
2578 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2579 l2cap_send_rr_or_rnr(chan, 1);
2580 __set_monitor_timer(chan);
2581 chan->retry_count++;
2582 } else {
2583 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2584 }
2585 break;
2586 default:
2587 break;
2588 }
2589 }
2590
2591 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2592 struct sk_buff_head *skbs, u8 event)
2593 {
2594 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2595 chan, control, skbs, event, chan->tx_state);
2596
2597 switch (chan->tx_state) {
2598 case L2CAP_TX_STATE_XMIT:
2599 l2cap_tx_state_xmit(chan, control, skbs, event);
2600 break;
2601 case L2CAP_TX_STATE_WAIT_F:
2602 l2cap_tx_state_wait_f(chan, control, skbs, event);
2603 break;
2604 default:
2605 /* Ignore event */
2606 break;
2607 }
2608 }
2609
2610 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2611 struct l2cap_ctrl *control)
2612 {
2613 BT_DBG("chan %p, control %p", chan, control);
2614 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2615 }
2616
2617 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2618 struct l2cap_ctrl *control)
2619 {
2620 BT_DBG("chan %p, control %p", chan, control);
2621 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2622 }
2623
2624 /* Copy frame to all raw sockets on that connection */
2625 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2626 {
2627 struct sk_buff *nskb;
2628 struct l2cap_chan *chan;
2629
2630 BT_DBG("conn %p", conn);
2631
2632 mutex_lock(&conn->chan_lock);
2633
2634 list_for_each_entry(chan, &conn->chan_l, list) {
2635 struct sock *sk = chan->sk;
2636 if (chan->chan_type != L2CAP_CHAN_RAW)
2637 continue;
2638
2639 /* Don't send frame to the socket it came from */
2640 if (skb->sk == sk)
2641 continue;
2642 nskb = skb_clone(skb, GFP_KERNEL);
2643 if (!nskb)
2644 continue;
2645
2646 if (chan->ops->recv(chan, nskb))
2647 kfree_skb(nskb);
2648 }
2649
2650 mutex_unlock(&conn->chan_lock);
2651 }
2652
2653 /* ---- L2CAP signalling commands ---- */
2654 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2655 u8 ident, u16 dlen, void *data)
2656 {
2657 struct sk_buff *skb, **frag;
2658 struct l2cap_cmd_hdr *cmd;
2659 struct l2cap_hdr *lh;
2660 int len, count;
2661
2662 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2663 conn, code, ident, dlen);
2664
2665 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2666 count = min_t(unsigned int, conn->mtu, len);
2667
2668 skb = bt_skb_alloc(count, GFP_KERNEL);
2669 if (!skb)
2670 return NULL;
2671
2672 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2673 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2674
2675 if (conn->hcon->type == LE_LINK)
2676 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2677 else
2678 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2679
2680 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2681 cmd->code = code;
2682 cmd->ident = ident;
2683 cmd->len = cpu_to_le16(dlen);
2684
2685 if (dlen) {
2686 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2687 memcpy(skb_put(skb, count), data, count);
2688 data += count;
2689 }
2690
2691 len -= skb->len;
2692
2693 /* Continuation fragments (no L2CAP header) */
2694 frag = &skb_shinfo(skb)->frag_list;
2695 while (len) {
2696 count = min_t(unsigned int, conn->mtu, len);
2697
2698 *frag = bt_skb_alloc(count, GFP_KERNEL);
2699 if (!*frag)
2700 goto fail;
2701
2702 memcpy(skb_put(*frag, count), data, count);
2703
2704 len -= count;
2705 data += count;
2706
2707 frag = &(*frag)->next;
2708 }
2709
2710 return skb;
2711
2712 fail:
2713 kfree_skb(skb);
2714 return NULL;
2715 }
2716
2717 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2718 unsigned long *val)
2719 {
2720 struct l2cap_conf_opt *opt = *ptr;
2721 int len;
2722
2723 len = L2CAP_CONF_OPT_SIZE + opt->len;
2724 *ptr += len;
2725
2726 *type = opt->type;
2727 *olen = opt->len;
2728
2729 switch (opt->len) {
2730 case 1:
2731 *val = *((u8 *) opt->val);
2732 break;
2733
2734 case 2:
2735 *val = get_unaligned_le16(opt->val);
2736 break;
2737
2738 case 4:
2739 *val = get_unaligned_le32(opt->val);
2740 break;
2741
2742 default:
2743 *val = (unsigned long) opt->val;
2744 break;
2745 }
2746
2747 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2748 return len;
2749 }
2750
2751 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2752 {
2753 struct l2cap_conf_opt *opt = *ptr;
2754
2755 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2756
2757 opt->type = type;
2758 opt->len = len;
2759
2760 switch (len) {
2761 case 1:
2762 *((u8 *) opt->val) = val;
2763 break;
2764
2765 case 2:
2766 put_unaligned_le16(val, opt->val);
2767 break;
2768
2769 case 4:
2770 put_unaligned_le32(val, opt->val);
2771 break;
2772
2773 default:
2774 memcpy(opt->val, (void *) val, len);
2775 break;
2776 }
2777
2778 *ptr += L2CAP_CONF_OPT_SIZE + len;
2779 }
2780
2781 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2782 {
2783 struct l2cap_conf_efs efs;
2784
2785 switch (chan->mode) {
2786 case L2CAP_MODE_ERTM:
2787 efs.id = chan->local_id;
2788 efs.stype = chan->local_stype;
2789 efs.msdu = cpu_to_le16(chan->local_msdu);
2790 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2791 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2792 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2793 break;
2794
2795 case L2CAP_MODE_STREAMING:
2796 efs.id = 1;
2797 efs.stype = L2CAP_SERV_BESTEFFORT;
2798 efs.msdu = cpu_to_le16(chan->local_msdu);
2799 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2800 efs.acc_lat = 0;
2801 efs.flush_to = 0;
2802 break;
2803
2804 default:
2805 return;
2806 }
2807
2808 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2809 (unsigned long) &efs);
2810 }
2811
2812 static void l2cap_ack_timeout(struct work_struct *work)
2813 {
2814 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2815 ack_timer.work);
2816 u16 frames_to_ack;
2817
2818 BT_DBG("chan %p", chan);
2819
2820 l2cap_chan_lock(chan);
2821
2822 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2823 chan->last_acked_seq);
2824
2825 if (frames_to_ack)
2826 l2cap_send_rr_or_rnr(chan, 0);
2827
2828 l2cap_chan_unlock(chan);
2829 l2cap_chan_put(chan);
2830 }
2831
2832 int l2cap_ertm_init(struct l2cap_chan *chan)
2833 {
2834 int err;
2835
2836 chan->next_tx_seq = 0;
2837 chan->expected_tx_seq = 0;
2838 chan->expected_ack_seq = 0;
2839 chan->unacked_frames = 0;
2840 chan->buffer_seq = 0;
2841 chan->frames_sent = 0;
2842 chan->last_acked_seq = 0;
2843 chan->sdu = NULL;
2844 chan->sdu_last_frag = NULL;
2845 chan->sdu_len = 0;
2846
2847 skb_queue_head_init(&chan->tx_q);
2848
2849 chan->local_amp_id = 0;
2850 chan->move_id = 0;
2851 chan->move_state = L2CAP_MOVE_STABLE;
2852 chan->move_role = L2CAP_MOVE_ROLE_NONE;
2853
2854 if (chan->mode != L2CAP_MODE_ERTM)
2855 return 0;
2856
2857 chan->rx_state = L2CAP_RX_STATE_RECV;
2858 chan->tx_state = L2CAP_TX_STATE_XMIT;
2859
2860 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2861 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2862 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2863
2864 skb_queue_head_init(&chan->srej_q);
2865
2866 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2867 if (err < 0)
2868 return err;
2869
2870 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2871 if (err < 0)
2872 l2cap_seq_list_free(&chan->srej_list);
2873
2874 return err;
2875 }
2876
2877 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2878 {
2879 switch (mode) {
2880 case L2CAP_MODE_STREAMING:
2881 case L2CAP_MODE_ERTM:
2882 if (l2cap_mode_supported(mode, remote_feat_mask))
2883 return mode;
2884 /* fall through */
2885 default:
2886 return L2CAP_MODE_BASIC;
2887 }
2888 }
2889
2890 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2891 {
2892 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2893 }
2894
2895 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2896 {
2897 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2898 }
2899
2900 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2901 {
2902 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2903 __l2cap_ews_supported(chan)) {
2904 /* use extended control field */
2905 set_bit(FLAG_EXT_CTRL, &chan->flags);
2906 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2907 } else {
2908 chan->tx_win = min_t(u16, chan->tx_win,
2909 L2CAP_DEFAULT_TX_WINDOW);
2910 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2911 }
2912 chan->ack_win = chan->tx_win;
2913 }
2914
2915 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2916 {
2917 struct l2cap_conf_req *req = data;
2918 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2919 void *ptr = req->data;
2920 u16 size;
2921
2922 BT_DBG("chan %p", chan);
2923
2924 if (chan->num_conf_req || chan->num_conf_rsp)
2925 goto done;
2926
2927 switch (chan->mode) {
2928 case L2CAP_MODE_STREAMING:
2929 case L2CAP_MODE_ERTM:
2930 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2931 break;
2932
2933 if (__l2cap_efs_supported(chan))
2934 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2935
2936 /* fall through */
2937 default:
2938 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2939 break;
2940 }
2941
2942 done:
2943 if (chan->imtu != L2CAP_DEFAULT_MTU)
2944 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2945
2946 switch (chan->mode) {
2947 case L2CAP_MODE_BASIC:
2948 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2949 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2950 break;
2951
2952 rfc.mode = L2CAP_MODE_BASIC;
2953 rfc.txwin_size = 0;
2954 rfc.max_transmit = 0;
2955 rfc.retrans_timeout = 0;
2956 rfc.monitor_timeout = 0;
2957 rfc.max_pdu_size = 0;
2958
2959 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2960 (unsigned long) &rfc);
2961 break;
2962
2963 case L2CAP_MODE_ERTM:
2964 rfc.mode = L2CAP_MODE_ERTM;
2965 rfc.max_transmit = chan->max_tx;
2966 rfc.retrans_timeout = 0;
2967 rfc.monitor_timeout = 0;
2968
2969 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2970 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
2971 L2CAP_FCS_SIZE);
2972 rfc.max_pdu_size = cpu_to_le16(size);
2973
2974 l2cap_txwin_setup(chan);
2975
2976 rfc.txwin_size = min_t(u16, chan->tx_win,
2977 L2CAP_DEFAULT_TX_WINDOW);
2978
2979 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2980 (unsigned long) &rfc);
2981
2982 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2983 l2cap_add_opt_efs(&ptr, chan);
2984
2985 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2986 break;
2987
2988 if (chan->fcs == L2CAP_FCS_NONE ||
2989 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2990 chan->fcs = L2CAP_FCS_NONE;
2991 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2992 }
2993
2994 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2995 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2996 chan->tx_win);
2997 break;
2998
2999 case L2CAP_MODE_STREAMING:
3000 l2cap_txwin_setup(chan);
3001 rfc.mode = L2CAP_MODE_STREAMING;
3002 rfc.txwin_size = 0;
3003 rfc.max_transmit = 0;
3004 rfc.retrans_timeout = 0;
3005 rfc.monitor_timeout = 0;
3006
3007 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3008 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3009 L2CAP_FCS_SIZE);
3010 rfc.max_pdu_size = cpu_to_le16(size);
3011
3012 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3013 (unsigned long) &rfc);
3014
3015 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3016 l2cap_add_opt_efs(&ptr, chan);
3017
3018 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
3019 break;
3020
3021 if (chan->fcs == L2CAP_FCS_NONE ||
3022 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
3023 chan->fcs = L2CAP_FCS_NONE;
3024 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
3025 }
3026 break;
3027 }
3028
3029 req->dcid = cpu_to_le16(chan->dcid);
3030 req->flags = __constant_cpu_to_le16(0);
3031
3032 return ptr - data;
3033 }
3034
3035 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3036 {
3037 struct l2cap_conf_rsp *rsp = data;
3038 void *ptr = rsp->data;
3039 void *req = chan->conf_req;
3040 int len = chan->conf_len;
3041 int type, hint, olen;
3042 unsigned long val;
3043 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3044 struct l2cap_conf_efs efs;
3045 u8 remote_efs = 0;
3046 u16 mtu = L2CAP_DEFAULT_MTU;
3047 u16 result = L2CAP_CONF_SUCCESS;
3048 u16 size;
3049
3050 BT_DBG("chan %p", chan);
3051
3052 while (len >= L2CAP_CONF_OPT_SIZE) {
3053 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3054
3055 hint = type & L2CAP_CONF_HINT;
3056 type &= L2CAP_CONF_MASK;
3057
3058 switch (type) {
3059 case L2CAP_CONF_MTU:
3060 mtu = val;
3061 break;
3062
3063 case L2CAP_CONF_FLUSH_TO:
3064 chan->flush_to = val;
3065 break;
3066
3067 case L2CAP_CONF_QOS:
3068 break;
3069
3070 case L2CAP_CONF_RFC:
3071 if (olen == sizeof(rfc))
3072 memcpy(&rfc, (void *) val, olen);
3073 break;
3074
3075 case L2CAP_CONF_FCS:
3076 if (val == L2CAP_FCS_NONE)
3077 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
3078 break;
3079
3080 case L2CAP_CONF_EFS:
3081 remote_efs = 1;
3082 if (olen == sizeof(efs))
3083 memcpy(&efs, (void *) val, olen);
3084 break;
3085
3086 case L2CAP_CONF_EWS:
3087 if (!enable_hs)
3088 return -ECONNREFUSED;
3089
3090 set_bit(FLAG_EXT_CTRL, &chan->flags);
3091 set_bit(CONF_EWS_RECV, &chan->conf_state);
3092 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3093 chan->remote_tx_win = val;
3094 break;
3095
3096 default:
3097 if (hint)
3098 break;
3099
3100 result = L2CAP_CONF_UNKNOWN;
3101 *((u8 *) ptr++) = type;
3102 break;
3103 }
3104 }
3105
3106 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3107 goto done;
3108
3109 switch (chan->mode) {
3110 case L2CAP_MODE_STREAMING:
3111 case L2CAP_MODE_ERTM:
3112 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3113 chan->mode = l2cap_select_mode(rfc.mode,
3114 chan->conn->feat_mask);
3115 break;
3116 }
3117
3118 if (remote_efs) {
3119 if (__l2cap_efs_supported(chan))
3120 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3121 else
3122 return -ECONNREFUSED;
3123 }
3124
3125 if (chan->mode != rfc.mode)
3126 return -ECONNREFUSED;
3127
3128 break;
3129 }
3130
3131 done:
3132 if (chan->mode != rfc.mode) {
3133 result = L2CAP_CONF_UNACCEPT;
3134 rfc.mode = chan->mode;
3135
3136 if (chan->num_conf_rsp == 1)
3137 return -ECONNREFUSED;
3138
3139 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3140 (unsigned long) &rfc);
3141 }
3142
3143 if (result == L2CAP_CONF_SUCCESS) {
3144 /* Configure output options and let the other side know
3145 * which ones we don't like. */
3146
3147 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3148 result = L2CAP_CONF_UNACCEPT;
3149 else {
3150 chan->omtu = mtu;
3151 set_bit(CONF_MTU_DONE, &chan->conf_state);
3152 }
3153 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3154
3155 if (remote_efs) {
3156 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3157 efs.stype != L2CAP_SERV_NOTRAFIC &&
3158 efs.stype != chan->local_stype) {
3159
3160 result = L2CAP_CONF_UNACCEPT;
3161
3162 if (chan->num_conf_req >= 1)
3163 return -ECONNREFUSED;
3164
3165 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3166 sizeof(efs),
3167 (unsigned long) &efs);
3168 } else {
3169 /* Send PENDING Conf Rsp */
3170 result = L2CAP_CONF_PENDING;
3171 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3172 }
3173 }
3174
3175 switch (rfc.mode) {
3176 case L2CAP_MODE_BASIC:
3177 chan->fcs = L2CAP_FCS_NONE;
3178 set_bit(CONF_MODE_DONE, &chan->conf_state);
3179 break;
3180
3181 case L2CAP_MODE_ERTM:
3182 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3183 chan->remote_tx_win = rfc.txwin_size;
3184 else
3185 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3186
3187 chan->remote_max_tx = rfc.max_transmit;
3188
3189 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3190 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3191 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3192 rfc.max_pdu_size = cpu_to_le16(size);
3193 chan->remote_mps = size;
3194
3195 rfc.retrans_timeout =
3196 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3197 rfc.monitor_timeout =
3198 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3199
3200 set_bit(CONF_MODE_DONE, &chan->conf_state);
3201
3202 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3203 sizeof(rfc), (unsigned long) &rfc);
3204
3205 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3206 chan->remote_id = efs.id;
3207 chan->remote_stype = efs.stype;
3208 chan->remote_msdu = le16_to_cpu(efs.msdu);
3209 chan->remote_flush_to =
3210 le32_to_cpu(efs.flush_to);
3211 chan->remote_acc_lat =
3212 le32_to_cpu(efs.acc_lat);
3213 chan->remote_sdu_itime =
3214 le32_to_cpu(efs.sdu_itime);
3215 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3216 sizeof(efs),
3217 (unsigned long) &efs);
3218 }
3219 break;
3220
3221 case L2CAP_MODE_STREAMING:
3222 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3223 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3224 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3225 rfc.max_pdu_size = cpu_to_le16(size);
3226 chan->remote_mps = size;
3227
3228 set_bit(CONF_MODE_DONE, &chan->conf_state);
3229
3230 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3231 (unsigned long) &rfc);
3232
3233 break;
3234
3235 default:
3236 result = L2CAP_CONF_UNACCEPT;
3237
3238 memset(&rfc, 0, sizeof(rfc));
3239 rfc.mode = chan->mode;
3240 }
3241
3242 if (result == L2CAP_CONF_SUCCESS)
3243 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3244 }
3245 rsp->scid = cpu_to_le16(chan->dcid);
3246 rsp->result = cpu_to_le16(result);
3247 rsp->flags = __constant_cpu_to_le16(0);
3248
3249 return ptr - data;
3250 }
3251
3252 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3253 void *data, u16 *result)
3254 {
3255 struct l2cap_conf_req *req = data;
3256 void *ptr = req->data;
3257 int type, olen;
3258 unsigned long val;
3259 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3260 struct l2cap_conf_efs efs;
3261
3262 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3263
3264 while (len >= L2CAP_CONF_OPT_SIZE) {
3265 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3266
3267 switch (type) {
3268 case L2CAP_CONF_MTU:
3269 if (val < L2CAP_DEFAULT_MIN_MTU) {
3270 *result = L2CAP_CONF_UNACCEPT;
3271 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3272 } else
3273 chan->imtu = val;
3274 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3275 break;
3276
3277 case L2CAP_CONF_FLUSH_TO:
3278 chan->flush_to = val;
3279 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3280 2, chan->flush_to);
3281 break;
3282
3283 case L2CAP_CONF_RFC:
3284 if (olen == sizeof(rfc))
3285 memcpy(&rfc, (void *)val, olen);
3286
3287 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3288 rfc.mode != chan->mode)
3289 return -ECONNREFUSED;
3290
3291 chan->fcs = 0;
3292
3293 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3294 sizeof(rfc), (unsigned long) &rfc);
3295 break;
3296
3297 case L2CAP_CONF_EWS:
3298 chan->ack_win = min_t(u16, val, chan->ack_win);
3299 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3300 chan->tx_win);
3301 break;
3302
3303 case L2CAP_CONF_EFS:
3304 if (olen == sizeof(efs))
3305 memcpy(&efs, (void *)val, olen);
3306
3307 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3308 efs.stype != L2CAP_SERV_NOTRAFIC &&
3309 efs.stype != chan->local_stype)
3310 return -ECONNREFUSED;
3311
3312 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3313 (unsigned long) &efs);
3314 break;
3315 }
3316 }
3317
3318 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3319 return -ECONNREFUSED;
3320
3321 chan->mode = rfc.mode;
3322
3323 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3324 switch (rfc.mode) {
3325 case L2CAP_MODE_ERTM:
3326 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3327 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3328 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3329 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3330 chan->ack_win = min_t(u16, chan->ack_win,
3331 rfc.txwin_size);
3332
3333 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3334 chan->local_msdu = le16_to_cpu(efs.msdu);
3335 chan->local_sdu_itime =
3336 le32_to_cpu(efs.sdu_itime);
3337 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3338 chan->local_flush_to =
3339 le32_to_cpu(efs.flush_to);
3340 }
3341 break;
3342
3343 case L2CAP_MODE_STREAMING:
3344 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3345 }
3346 }
3347
3348 req->dcid = cpu_to_le16(chan->dcid);
3349 req->flags = __constant_cpu_to_le16(0);
3350
3351 return ptr - data;
3352 }
3353
3354 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3355 u16 result, u16 flags)
3356 {
3357 struct l2cap_conf_rsp *rsp = data;
3358 void *ptr = rsp->data;
3359
3360 BT_DBG("chan %p", chan);
3361
3362 rsp->scid = cpu_to_le16(chan->dcid);
3363 rsp->result = cpu_to_le16(result);
3364 rsp->flags = cpu_to_le16(flags);
3365
3366 return ptr - data;
3367 }
3368
3369 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3370 {
3371 struct l2cap_conn_rsp rsp;
3372 struct l2cap_conn *conn = chan->conn;
3373 u8 buf[128];
3374
3375 rsp.scid = cpu_to_le16(chan->dcid);
3376 rsp.dcid = cpu_to_le16(chan->scid);
3377 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3378 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3379 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3380
3381 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3382 return;
3383
3384 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3385 l2cap_build_conf_req(chan, buf), buf);
3386 chan->num_conf_req++;
3387 }
3388
3389 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3390 {
3391 int type, olen;
3392 unsigned long val;
3393 /* Use sane default values in case a misbehaving remote device
3394 * did not send an RFC or extended window size option.
3395 */
3396 u16 txwin_ext = chan->ack_win;
3397 struct l2cap_conf_rfc rfc = {
3398 .mode = chan->mode,
3399 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3400 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3401 .max_pdu_size = cpu_to_le16(chan->imtu),
3402 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3403 };
3404
3405 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3406
3407 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3408 return;
3409
3410 while (len >= L2CAP_CONF_OPT_SIZE) {
3411 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3412
3413 switch (type) {
3414 case L2CAP_CONF_RFC:
3415 if (olen == sizeof(rfc))
3416 memcpy(&rfc, (void *)val, olen);
3417 break;
3418 case L2CAP_CONF_EWS:
3419 txwin_ext = val;
3420 break;
3421 }
3422 }
3423
3424 switch (rfc.mode) {
3425 case L2CAP_MODE_ERTM:
3426 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3427 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3428 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3429 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3430 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3431 else
3432 chan->ack_win = min_t(u16, chan->ack_win,
3433 rfc.txwin_size);
3434 break;
3435 case L2CAP_MODE_STREAMING:
3436 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3437 }
3438 }
3439
3440 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3441 struct l2cap_cmd_hdr *cmd, u8 *data)
3442 {
3443 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3444
3445 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3446 return 0;
3447
3448 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3449 cmd->ident == conn->info_ident) {
3450 cancel_delayed_work(&conn->info_timer);
3451
3452 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3453 conn->info_ident = 0;
3454
3455 l2cap_conn_start(conn);
3456 }
3457
3458 return 0;
3459 }
3460
3461 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3462 struct l2cap_cmd_hdr *cmd,
3463 u8 *data, u8 rsp_code, u8 amp_id)
3464 {
3465 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3466 struct l2cap_conn_rsp rsp;
3467 struct l2cap_chan *chan = NULL, *pchan;
3468 struct sock *parent, *sk = NULL;
3469 int result, status = L2CAP_CS_NO_INFO;
3470
3471 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3472 __le16 psm = req->psm;
3473
3474 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3475
3476 /* Check if we have socket listening on psm */
3477 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3478 if (!pchan) {
3479 result = L2CAP_CR_BAD_PSM;
3480 goto sendresp;
3481 }
3482
3483 parent = pchan->sk;
3484
3485 mutex_lock(&conn->chan_lock);
3486 lock_sock(parent);
3487
3488 /* Check if the ACL is secure enough (if not SDP) */
3489 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3490 !hci_conn_check_link_mode(conn->hcon)) {
3491 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3492 result = L2CAP_CR_SEC_BLOCK;
3493 goto response;
3494 }
3495
3496 result = L2CAP_CR_NO_MEM;
3497
3498 /* Check if we already have channel with that dcid */
3499 if (__l2cap_get_chan_by_dcid(conn, scid))
3500 goto response;
3501
3502 chan = pchan->ops->new_connection(pchan);
3503 if (!chan)
3504 goto response;
3505
3506 sk = chan->sk;
3507
3508 hci_conn_hold(conn->hcon);
3509
3510 bacpy(&bt_sk(sk)->src, conn->src);
3511 bacpy(&bt_sk(sk)->dst, conn->dst);
3512 chan->psm = psm;
3513 chan->dcid = scid;
3514 chan->local_amp_id = amp_id;
3515
3516 __l2cap_chan_add(conn, chan);
3517
3518 dcid = chan->scid;
3519
3520 __set_chan_timer(chan, sk->sk_sndtimeo);
3521
3522 chan->ident = cmd->ident;
3523
3524 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3525 if (l2cap_chan_check_security(chan)) {
3526 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3527 __l2cap_state_change(chan, BT_CONNECT2);
3528 result = L2CAP_CR_PEND;
3529 status = L2CAP_CS_AUTHOR_PEND;
3530 chan->ops->defer(chan);
3531 } else {
3532 /* Force pending result for AMP controllers.
3533 * The connection will succeed after the
3534 * physical link is up.
3535 */
3536 if (amp_id) {
3537 __l2cap_state_change(chan, BT_CONNECT2);
3538 result = L2CAP_CR_PEND;
3539 } else {
3540 __l2cap_state_change(chan, BT_CONFIG);
3541 result = L2CAP_CR_SUCCESS;
3542 }
3543 status = L2CAP_CS_NO_INFO;
3544 }
3545 } else {
3546 __l2cap_state_change(chan, BT_CONNECT2);
3547 result = L2CAP_CR_PEND;
3548 status = L2CAP_CS_AUTHEN_PEND;
3549 }
3550 } else {
3551 __l2cap_state_change(chan, BT_CONNECT2);
3552 result = L2CAP_CR_PEND;
3553 status = L2CAP_CS_NO_INFO;
3554 }
3555
3556 response:
3557 release_sock(parent);
3558 mutex_unlock(&conn->chan_lock);
3559
3560 sendresp:
3561 rsp.scid = cpu_to_le16(scid);
3562 rsp.dcid = cpu_to_le16(dcid);
3563 rsp.result = cpu_to_le16(result);
3564 rsp.status = cpu_to_le16(status);
3565 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3566
3567 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3568 struct l2cap_info_req info;
3569 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3570
3571 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3572 conn->info_ident = l2cap_get_ident(conn);
3573
3574 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3575
3576 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3577 sizeof(info), &info);
3578 }
3579
3580 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3581 result == L2CAP_CR_SUCCESS) {
3582 u8 buf[128];
3583 set_bit(CONF_REQ_SENT, &chan->conf_state);
3584 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3585 l2cap_build_conf_req(chan, buf), buf);
3586 chan->num_conf_req++;
3587 }
3588
3589 return chan;
3590 }
3591
3592 static int l2cap_connect_req(struct l2cap_conn *conn,
3593 struct l2cap_cmd_hdr *cmd, u8 *data)
3594 {
3595 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3596 return 0;
3597 }
3598
3599 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3600 struct l2cap_cmd_hdr *cmd, u8 *data)
3601 {
3602 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3603 u16 scid, dcid, result, status;
3604 struct l2cap_chan *chan;
3605 u8 req[128];
3606 int err;
3607
3608 scid = __le16_to_cpu(rsp->scid);
3609 dcid = __le16_to_cpu(rsp->dcid);
3610 result = __le16_to_cpu(rsp->result);
3611 status = __le16_to_cpu(rsp->status);
3612
3613 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3614 dcid, scid, result, status);
3615
3616 mutex_lock(&conn->chan_lock);
3617
3618 if (scid) {
3619 chan = __l2cap_get_chan_by_scid(conn, scid);
3620 if (!chan) {
3621 err = -EFAULT;
3622 goto unlock;
3623 }
3624 } else {
3625 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3626 if (!chan) {
3627 err = -EFAULT;
3628 goto unlock;
3629 }
3630 }
3631
3632 err = 0;
3633
3634 l2cap_chan_lock(chan);
3635
3636 switch (result) {
3637 case L2CAP_CR_SUCCESS:
3638 l2cap_state_change(chan, BT_CONFIG);
3639 chan->ident = 0;
3640 chan->dcid = dcid;
3641 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3642
3643 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3644 break;
3645
3646 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3647 l2cap_build_conf_req(chan, req), req);
3648 chan->num_conf_req++;
3649 break;
3650
3651 case L2CAP_CR_PEND:
3652 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3653 break;
3654
3655 default:
3656 l2cap_chan_del(chan, ECONNREFUSED);
3657 break;
3658 }
3659
3660 l2cap_chan_unlock(chan);
3661
3662 unlock:
3663 mutex_unlock(&conn->chan_lock);
3664
3665 return err;
3666 }
3667
3668 static inline void set_default_fcs(struct l2cap_chan *chan)
3669 {
3670 /* FCS is enabled only in ERTM or streaming mode, if one or both
3671 * sides request it.
3672 */
3673 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3674 chan->fcs = L2CAP_FCS_NONE;
3675 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3676 chan->fcs = L2CAP_FCS_CRC16;
3677 }
3678
3679 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3680 u8 ident, u16 flags)
3681 {
3682 struct l2cap_conn *conn = chan->conn;
3683
3684 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3685 flags);
3686
3687 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3688 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3689
3690 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3691 l2cap_build_conf_rsp(chan, data,
3692 L2CAP_CONF_SUCCESS, flags), data);
3693 }
3694
3695 static inline int l2cap_config_req(struct l2cap_conn *conn,
3696 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3697 u8 *data)
3698 {
3699 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3700 u16 dcid, flags;
3701 u8 rsp[64];
3702 struct l2cap_chan *chan;
3703 int len, err = 0;
3704
3705 dcid = __le16_to_cpu(req->dcid);
3706 flags = __le16_to_cpu(req->flags);
3707
3708 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3709
3710 chan = l2cap_get_chan_by_scid(conn, dcid);
3711 if (!chan)
3712 return -ENOENT;
3713
3714 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3715 struct l2cap_cmd_rej_cid rej;
3716
3717 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3718 rej.scid = cpu_to_le16(chan->scid);
3719 rej.dcid = cpu_to_le16(chan->dcid);
3720
3721 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3722 sizeof(rej), &rej);
3723 goto unlock;
3724 }
3725
3726 /* Reject if config buffer is too small. */
3727 len = cmd_len - sizeof(*req);
3728 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3729 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3730 l2cap_build_conf_rsp(chan, rsp,
3731 L2CAP_CONF_REJECT, flags), rsp);
3732 goto unlock;
3733 }
3734
3735 /* Store config. */
3736 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3737 chan->conf_len += len;
3738
3739 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3740 /* Incomplete config. Send empty response. */
3741 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3742 l2cap_build_conf_rsp(chan, rsp,
3743 L2CAP_CONF_SUCCESS, flags), rsp);
3744 goto unlock;
3745 }
3746
3747 /* Complete config. */
3748 len = l2cap_parse_conf_req(chan, rsp);
3749 if (len < 0) {
3750 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3751 goto unlock;
3752 }
3753
3754 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3755 chan->num_conf_rsp++;
3756
3757 /* Reset config buffer. */
3758 chan->conf_len = 0;
3759
3760 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3761 goto unlock;
3762
3763 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3764 set_default_fcs(chan);
3765
3766 if (chan->mode == L2CAP_MODE_ERTM ||
3767 chan->mode == L2CAP_MODE_STREAMING)
3768 err = l2cap_ertm_init(chan);
3769
3770 if (err < 0)
3771 l2cap_send_disconn_req(chan->conn, chan, -err);
3772 else
3773 l2cap_chan_ready(chan);
3774
3775 goto unlock;
3776 }
3777
3778 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3779 u8 buf[64];
3780 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3781 l2cap_build_conf_req(chan, buf), buf);
3782 chan->num_conf_req++;
3783 }
3784
3785 /* Got Conf Rsp PENDING from remote side and asume we sent
3786 Conf Rsp PENDING in the code above */
3787 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3788 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3789
3790 /* check compatibility */
3791
3792 /* Send rsp for BR/EDR channel */
3793 if (!chan->ctrl_id)
3794 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
3795 else
3796 chan->ident = cmd->ident;
3797 }
3798
3799 unlock:
3800 l2cap_chan_unlock(chan);
3801 return err;
3802 }
3803
3804 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
3805 struct l2cap_cmd_hdr *cmd, u8 *data)
3806 {
3807 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3808 u16 scid, flags, result;
3809 struct l2cap_chan *chan;
3810 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3811 int err = 0;
3812
3813 scid = __le16_to_cpu(rsp->scid);
3814 flags = __le16_to_cpu(rsp->flags);
3815 result = __le16_to_cpu(rsp->result);
3816
3817 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3818 result, len);
3819
3820 chan = l2cap_get_chan_by_scid(conn, scid);
3821 if (!chan)
3822 return 0;
3823
3824 switch (result) {
3825 case L2CAP_CONF_SUCCESS:
3826 l2cap_conf_rfc_get(chan, rsp->data, len);
3827 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3828 break;
3829
3830 case L2CAP_CONF_PENDING:
3831 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3832
3833 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3834 char buf[64];
3835
3836 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3837 buf, &result);
3838 if (len < 0) {
3839 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3840 goto done;
3841 }
3842
3843 /* check compatibility */
3844
3845 if (!chan->ctrl_id)
3846 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
3847 0);
3848 else
3849 chan->ident = cmd->ident;
3850 }
3851 goto done;
3852
3853 case L2CAP_CONF_UNACCEPT:
3854 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3855 char req[64];
3856
3857 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3858 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3859 goto done;
3860 }
3861
3862 /* throw out any old stored conf requests */
3863 result = L2CAP_CONF_SUCCESS;
3864 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3865 req, &result);
3866 if (len < 0) {
3867 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3868 goto done;
3869 }
3870
3871 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3872 L2CAP_CONF_REQ, len, req);
3873 chan->num_conf_req++;
3874 if (result != L2CAP_CONF_SUCCESS)
3875 goto done;
3876 break;
3877 }
3878
3879 default:
3880 l2cap_chan_set_err(chan, ECONNRESET);
3881
3882 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3883 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3884 goto done;
3885 }
3886
3887 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3888 goto done;
3889
3890 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3891
3892 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3893 set_default_fcs(chan);
3894
3895 if (chan->mode == L2CAP_MODE_ERTM ||
3896 chan->mode == L2CAP_MODE_STREAMING)
3897 err = l2cap_ertm_init(chan);
3898
3899 if (err < 0)
3900 l2cap_send_disconn_req(chan->conn, chan, -err);
3901 else
3902 l2cap_chan_ready(chan);
3903 }
3904
3905 done:
3906 l2cap_chan_unlock(chan);
3907 return err;
3908 }
3909
3910 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
3911 struct l2cap_cmd_hdr *cmd, u8 *data)
3912 {
3913 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3914 struct l2cap_disconn_rsp rsp;
3915 u16 dcid, scid;
3916 struct l2cap_chan *chan;
3917 struct sock *sk;
3918
3919 scid = __le16_to_cpu(req->scid);
3920 dcid = __le16_to_cpu(req->dcid);
3921
3922 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3923
3924 mutex_lock(&conn->chan_lock);
3925
3926 chan = __l2cap_get_chan_by_scid(conn, dcid);
3927 if (!chan) {
3928 mutex_unlock(&conn->chan_lock);
3929 return 0;
3930 }
3931
3932 l2cap_chan_lock(chan);
3933
3934 sk = chan->sk;
3935
3936 rsp.dcid = cpu_to_le16(chan->scid);
3937 rsp.scid = cpu_to_le16(chan->dcid);
3938 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3939
3940 lock_sock(sk);
3941 sk->sk_shutdown = SHUTDOWN_MASK;
3942 release_sock(sk);
3943
3944 l2cap_chan_hold(chan);
3945 l2cap_chan_del(chan, ECONNRESET);
3946
3947 l2cap_chan_unlock(chan);
3948
3949 chan->ops->close(chan);
3950 l2cap_chan_put(chan);
3951
3952 mutex_unlock(&conn->chan_lock);
3953
3954 return 0;
3955 }
3956
3957 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
3958 struct l2cap_cmd_hdr *cmd, u8 *data)
3959 {
3960 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3961 u16 dcid, scid;
3962 struct l2cap_chan *chan;
3963
3964 scid = __le16_to_cpu(rsp->scid);
3965 dcid = __le16_to_cpu(rsp->dcid);
3966
3967 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3968
3969 mutex_lock(&conn->chan_lock);
3970
3971 chan = __l2cap_get_chan_by_scid(conn, scid);
3972 if (!chan) {
3973 mutex_unlock(&conn->chan_lock);
3974 return 0;
3975 }
3976
3977 l2cap_chan_lock(chan);
3978
3979 l2cap_chan_hold(chan);
3980 l2cap_chan_del(chan, 0);
3981
3982 l2cap_chan_unlock(chan);
3983
3984 chan->ops->close(chan);
3985 l2cap_chan_put(chan);
3986
3987 mutex_unlock(&conn->chan_lock);
3988
3989 return 0;
3990 }
3991
3992 static inline int l2cap_information_req(struct l2cap_conn *conn,
3993 struct l2cap_cmd_hdr *cmd, u8 *data)
3994 {
3995 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3996 u16 type;
3997
3998 type = __le16_to_cpu(req->type);
3999
4000 BT_DBG("type 0x%4.4x", type);
4001
4002 if (type == L2CAP_IT_FEAT_MASK) {
4003 u8 buf[8];
4004 u32 feat_mask = l2cap_feat_mask;
4005 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4006 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4007 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4008 if (!disable_ertm)
4009 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4010 | L2CAP_FEAT_FCS;
4011 if (enable_hs)
4012 feat_mask |= L2CAP_FEAT_EXT_FLOW
4013 | L2CAP_FEAT_EXT_WINDOW;
4014
4015 put_unaligned_le32(feat_mask, rsp->data);
4016 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4017 buf);
4018 } else if (type == L2CAP_IT_FIXED_CHAN) {
4019 u8 buf[12];
4020 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4021
4022 if (enable_hs)
4023 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4024 else
4025 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4026
4027 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4028 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4029 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4030 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4031 buf);
4032 } else {
4033 struct l2cap_info_rsp rsp;
4034 rsp.type = cpu_to_le16(type);
4035 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4036 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4037 &rsp);
4038 }
4039
4040 return 0;
4041 }
4042
4043 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4044 struct l2cap_cmd_hdr *cmd, u8 *data)
4045 {
4046 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4047 u16 type, result;
4048
4049 type = __le16_to_cpu(rsp->type);
4050 result = __le16_to_cpu(rsp->result);
4051
4052 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4053
4054 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4055 if (cmd->ident != conn->info_ident ||
4056 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4057 return 0;
4058
4059 cancel_delayed_work(&conn->info_timer);
4060
4061 if (result != L2CAP_IR_SUCCESS) {
4062 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4063 conn->info_ident = 0;
4064
4065 l2cap_conn_start(conn);
4066
4067 return 0;
4068 }
4069
4070 switch (type) {
4071 case L2CAP_IT_FEAT_MASK:
4072 conn->feat_mask = get_unaligned_le32(rsp->data);
4073
4074 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4075 struct l2cap_info_req req;
4076 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4077
4078 conn->info_ident = l2cap_get_ident(conn);
4079
4080 l2cap_send_cmd(conn, conn->info_ident,
4081 L2CAP_INFO_REQ, sizeof(req), &req);
4082 } else {
4083 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4084 conn->info_ident = 0;
4085
4086 l2cap_conn_start(conn);
4087 }
4088 break;
4089
4090 case L2CAP_IT_FIXED_CHAN:
4091 conn->fixed_chan_mask = rsp->data[0];
4092 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4093 conn->info_ident = 0;
4094
4095 l2cap_conn_start(conn);
4096 break;
4097 }
4098
4099 return 0;
4100 }
4101
4102 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4103 struct l2cap_cmd_hdr *cmd,
4104 u16 cmd_len, void *data)
4105 {
4106 struct l2cap_create_chan_req *req = data;
4107 struct l2cap_chan *chan;
4108 u16 psm, scid;
4109
4110 if (cmd_len != sizeof(*req))
4111 return -EPROTO;
4112
4113 if (!enable_hs)
4114 return -EINVAL;
4115
4116 psm = le16_to_cpu(req->psm);
4117 scid = le16_to_cpu(req->scid);
4118
4119 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4120
4121 if (req->amp_id) {
4122 struct hci_dev *hdev;
4123
4124 /* Validate AMP controller id */
4125 hdev = hci_dev_get(req->amp_id);
4126 if (!hdev || hdev->dev_type != HCI_AMP ||
4127 !test_bit(HCI_UP, &hdev->flags)) {
4128 struct l2cap_create_chan_rsp rsp;
4129
4130 rsp.dcid = 0;
4131 rsp.scid = cpu_to_le16(scid);
4132 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4133 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4134
4135 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4136 sizeof(rsp), &rsp);
4137
4138 if (hdev)
4139 hci_dev_put(hdev);
4140
4141 return 0;
4142 }
4143
4144 hci_dev_put(hdev);
4145 }
4146
4147 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4148 req->amp_id);
4149
4150 return 0;
4151 }
4152
4153 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4154 u16 icid, u16 result)
4155 {
4156 struct l2cap_move_chan_rsp rsp;
4157
4158 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4159
4160 rsp.icid = cpu_to_le16(icid);
4161 rsp.result = cpu_to_le16(result);
4162
4163 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4164 }
4165
4166 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4167 struct l2cap_chan *chan,
4168 u16 icid, u16 result)
4169 {
4170 struct l2cap_move_chan_cfm cfm;
4171 u8 ident;
4172
4173 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4174
4175 ident = l2cap_get_ident(conn);
4176 if (chan)
4177 chan->ident = ident;
4178
4179 cfm.icid = cpu_to_le16(icid);
4180 cfm.result = cpu_to_le16(result);
4181
4182 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4183 }
4184
4185 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4186 u16 icid)
4187 {
4188 struct l2cap_move_chan_cfm_rsp rsp;
4189
4190 BT_DBG("icid 0x%4.4x", icid);
4191
4192 rsp.icid = cpu_to_le16(icid);
4193 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4194 }
4195
4196 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4197 struct l2cap_cmd_hdr *cmd,
4198 u16 cmd_len, void *data)
4199 {
4200 struct l2cap_move_chan_req *req = data;
4201 struct l2cap_chan *chan;
4202 u16 icid = 0;
4203 u16 result = L2CAP_MR_NOT_ALLOWED;
4204
4205 if (cmd_len != sizeof(*req))
4206 return -EPROTO;
4207
4208 icid = le16_to_cpu(req->icid);
4209
4210 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4211
4212 if (!enable_hs)
4213 return -EINVAL;
4214
4215 chan = l2cap_get_chan_by_dcid(conn, icid);
4216 if (!chan) {
4217 l2cap_send_move_chan_rsp(conn, cmd->ident, icid,
4218 L2CAP_MR_NOT_ALLOWED);
4219 return 0;
4220 }
4221
4222 if (chan->scid < L2CAP_CID_DYN_START ||
4223 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4224 (chan->mode != L2CAP_MODE_ERTM &&
4225 chan->mode != L2CAP_MODE_STREAMING)) {
4226 result = L2CAP_MR_NOT_ALLOWED;
4227 goto send_move_response;
4228 }
4229
4230 if (chan->local_amp_id == req->dest_amp_id) {
4231 result = L2CAP_MR_SAME_ID;
4232 goto send_move_response;
4233 }
4234
4235 if (req->dest_amp_id) {
4236 struct hci_dev *hdev;
4237 hdev = hci_dev_get(req->dest_amp_id);
4238 if (!hdev || hdev->dev_type != HCI_AMP ||
4239 !test_bit(HCI_UP, &hdev->flags)) {
4240 if (hdev)
4241 hci_dev_put(hdev);
4242
4243 result = L2CAP_MR_BAD_ID;
4244 goto send_move_response;
4245 }
4246 hci_dev_put(hdev);
4247 }
4248
4249 /* Detect a move collision. Only send a collision response
4250 * if this side has "lost", otherwise proceed with the move.
4251 * The winner has the larger bd_addr.
4252 */
4253 if ((__chan_is_moving(chan) ||
4254 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4255 bacmp(conn->src, conn->dst) > 0) {
4256 result = L2CAP_MR_COLLISION;
4257 goto send_move_response;
4258 }
4259
4260 chan->ident = cmd->ident;
4261 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4262 l2cap_move_setup(chan);
4263 chan->move_id = req->dest_amp_id;
4264 icid = chan->dcid;
4265
4266 if (!req->dest_amp_id) {
4267 /* Moving to BR/EDR */
4268 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4269 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4270 result = L2CAP_MR_PEND;
4271 } else {
4272 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4273 result = L2CAP_MR_SUCCESS;
4274 }
4275 } else {
4276 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4277 /* Placeholder - uncomment when amp functions are available */
4278 /*amp_accept_physical(chan, req->dest_amp_id);*/
4279 result = L2CAP_MR_PEND;
4280 }
4281
4282 send_move_response:
4283 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4284
4285 l2cap_chan_unlock(chan);
4286
4287 return 0;
4288 }
4289
4290 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4291 struct l2cap_cmd_hdr *cmd,
4292 u16 cmd_len, void *data)
4293 {
4294 struct l2cap_move_chan_rsp *rsp = data;
4295 u16 icid, result;
4296
4297 if (cmd_len != sizeof(*rsp))
4298 return -EPROTO;
4299
4300 icid = le16_to_cpu(rsp->icid);
4301 result = le16_to_cpu(rsp->result);
4302
4303 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4304
4305 /* Placeholder: Always unconfirmed */
4306 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4307
4308 return 0;
4309 }
4310
4311 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4312 struct l2cap_cmd_hdr *cmd,
4313 u16 cmd_len, void *data)
4314 {
4315 struct l2cap_move_chan_cfm *cfm = data;
4316 u16 icid, result;
4317
4318 if (cmd_len != sizeof(*cfm))
4319 return -EPROTO;
4320
4321 icid = le16_to_cpu(cfm->icid);
4322 result = le16_to_cpu(cfm->result);
4323
4324 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4325
4326 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4327
4328 return 0;
4329 }
4330
4331 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4332 struct l2cap_cmd_hdr *cmd,
4333 u16 cmd_len, void *data)
4334 {
4335 struct l2cap_move_chan_cfm_rsp *rsp = data;
4336 u16 icid;
4337
4338 if (cmd_len != sizeof(*rsp))
4339 return -EPROTO;
4340
4341 icid = le16_to_cpu(rsp->icid);
4342
4343 BT_DBG("icid 0x%4.4x", icid);
4344
4345 return 0;
4346 }
4347
4348 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4349 u16 to_multiplier)
4350 {
4351 u16 max_latency;
4352
4353 if (min > max || min < 6 || max > 3200)
4354 return -EINVAL;
4355
4356 if (to_multiplier < 10 || to_multiplier > 3200)
4357 return -EINVAL;
4358
4359 if (max >= to_multiplier * 8)
4360 return -EINVAL;
4361
4362 max_latency = (to_multiplier * 8 / max) - 1;
4363 if (latency > 499 || latency > max_latency)
4364 return -EINVAL;
4365
4366 return 0;
4367 }
4368
4369 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4370 struct l2cap_cmd_hdr *cmd,
4371 u8 *data)
4372 {
4373 struct hci_conn *hcon = conn->hcon;
4374 struct l2cap_conn_param_update_req *req;
4375 struct l2cap_conn_param_update_rsp rsp;
4376 u16 min, max, latency, to_multiplier, cmd_len;
4377 int err;
4378
4379 if (!(hcon->link_mode & HCI_LM_MASTER))
4380 return -EINVAL;
4381
4382 cmd_len = __le16_to_cpu(cmd->len);
4383 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4384 return -EPROTO;
4385
4386 req = (struct l2cap_conn_param_update_req *) data;
4387 min = __le16_to_cpu(req->min);
4388 max = __le16_to_cpu(req->max);
4389 latency = __le16_to_cpu(req->latency);
4390 to_multiplier = __le16_to_cpu(req->to_multiplier);
4391
4392 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4393 min, max, latency, to_multiplier);
4394
4395 memset(&rsp, 0, sizeof(rsp));
4396
4397 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4398 if (err)
4399 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4400 else
4401 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4402
4403 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4404 sizeof(rsp), &rsp);
4405
4406 if (!err)
4407 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4408
4409 return 0;
4410 }
4411
4412 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4413 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4414 u8 *data)
4415 {
4416 int err = 0;
4417
4418 switch (cmd->code) {
4419 case L2CAP_COMMAND_REJ:
4420 l2cap_command_rej(conn, cmd, data);
4421 break;
4422
4423 case L2CAP_CONN_REQ:
4424 err = l2cap_connect_req(conn, cmd, data);
4425 break;
4426
4427 case L2CAP_CONN_RSP:
4428 case L2CAP_CREATE_CHAN_RSP:
4429 err = l2cap_connect_create_rsp(conn, cmd, data);
4430 break;
4431
4432 case L2CAP_CONF_REQ:
4433 err = l2cap_config_req(conn, cmd, cmd_len, data);
4434 break;
4435
4436 case L2CAP_CONF_RSP:
4437 err = l2cap_config_rsp(conn, cmd, data);
4438 break;
4439
4440 case L2CAP_DISCONN_REQ:
4441 err = l2cap_disconnect_req(conn, cmd, data);
4442 break;
4443
4444 case L2CAP_DISCONN_RSP:
4445 err = l2cap_disconnect_rsp(conn, cmd, data);
4446 break;
4447
4448 case L2CAP_ECHO_REQ:
4449 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4450 break;
4451
4452 case L2CAP_ECHO_RSP:
4453 break;
4454
4455 case L2CAP_INFO_REQ:
4456 err = l2cap_information_req(conn, cmd, data);
4457 break;
4458
4459 case L2CAP_INFO_RSP:
4460 err = l2cap_information_rsp(conn, cmd, data);
4461 break;
4462
4463 case L2CAP_CREATE_CHAN_REQ:
4464 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4465 break;
4466
4467 case L2CAP_MOVE_CHAN_REQ:
4468 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4469 break;
4470
4471 case L2CAP_MOVE_CHAN_RSP:
4472 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4473 break;
4474
4475 case L2CAP_MOVE_CHAN_CFM:
4476 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4477 break;
4478
4479 case L2CAP_MOVE_CHAN_CFM_RSP:
4480 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4481 break;
4482
4483 default:
4484 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4485 err = -EINVAL;
4486 break;
4487 }
4488
4489 return err;
4490 }
4491
4492 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4493 struct l2cap_cmd_hdr *cmd, u8 *data)
4494 {
4495 switch (cmd->code) {
4496 case L2CAP_COMMAND_REJ:
4497 return 0;
4498
4499 case L2CAP_CONN_PARAM_UPDATE_REQ:
4500 return l2cap_conn_param_update_req(conn, cmd, data);
4501
4502 case L2CAP_CONN_PARAM_UPDATE_RSP:
4503 return 0;
4504
4505 default:
4506 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4507 return -EINVAL;
4508 }
4509 }
4510
4511 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4512 struct sk_buff *skb)
4513 {
4514 u8 *data = skb->data;
4515 int len = skb->len;
4516 struct l2cap_cmd_hdr cmd;
4517 int err;
4518
4519 l2cap_raw_recv(conn, skb);
4520
4521 while (len >= L2CAP_CMD_HDR_SIZE) {
4522 u16 cmd_len;
4523 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4524 data += L2CAP_CMD_HDR_SIZE;
4525 len -= L2CAP_CMD_HDR_SIZE;
4526
4527 cmd_len = le16_to_cpu(cmd.len);
4528
4529 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
4530 cmd.ident);
4531
4532 if (cmd_len > len || !cmd.ident) {
4533 BT_DBG("corrupted command");
4534 break;
4535 }
4536
4537 if (conn->hcon->type == LE_LINK)
4538 err = l2cap_le_sig_cmd(conn, &cmd, data);
4539 else
4540 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4541
4542 if (err) {
4543 struct l2cap_cmd_rej_unk rej;
4544
4545 BT_ERR("Wrong link type (%d)", err);
4546
4547 /* FIXME: Map err to a valid reason */
4548 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4549 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
4550 sizeof(rej), &rej);
4551 }
4552
4553 data += cmd_len;
4554 len -= cmd_len;
4555 }
4556
4557 kfree_skb(skb);
4558 }
4559
4560 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4561 {
4562 u16 our_fcs, rcv_fcs;
4563 int hdr_size;
4564
4565 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4566 hdr_size = L2CAP_EXT_HDR_SIZE;
4567 else
4568 hdr_size = L2CAP_ENH_HDR_SIZE;
4569
4570 if (chan->fcs == L2CAP_FCS_CRC16) {
4571 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4572 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4573 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4574
4575 if (our_fcs != rcv_fcs)
4576 return -EBADMSG;
4577 }
4578 return 0;
4579 }
4580
4581 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4582 {
4583 struct l2cap_ctrl control;
4584
4585 BT_DBG("chan %p", chan);
4586
4587 memset(&control, 0, sizeof(control));
4588 control.sframe = 1;
4589 control.final = 1;
4590 control.reqseq = chan->buffer_seq;
4591 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4592
4593 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4594 control.super = L2CAP_SUPER_RNR;
4595 l2cap_send_sframe(chan, &control);
4596 }
4597
4598 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4599 chan->unacked_frames > 0)
4600 __set_retrans_timer(chan);
4601
4602 /* Send pending iframes */
4603 l2cap_ertm_send(chan);
4604
4605 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4606 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4607 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4608 * send it now.
4609 */
4610 control.super = L2CAP_SUPER_RR;
4611 l2cap_send_sframe(chan, &control);
4612 }
4613 }
4614
4615 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
4616 struct sk_buff **last_frag)
4617 {
4618 /* skb->len reflects data in skb as well as all fragments
4619 * skb->data_len reflects only data in fragments
4620 */
4621 if (!skb_has_frag_list(skb))
4622 skb_shinfo(skb)->frag_list = new_frag;
4623
4624 new_frag->next = NULL;
4625
4626 (*last_frag)->next = new_frag;
4627 *last_frag = new_frag;
4628
4629 skb->len += new_frag->len;
4630 skb->data_len += new_frag->len;
4631 skb->truesize += new_frag->truesize;
4632 }
4633
4634 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4635 struct l2cap_ctrl *control)
4636 {
4637 int err = -EINVAL;
4638
4639 switch (control->sar) {
4640 case L2CAP_SAR_UNSEGMENTED:
4641 if (chan->sdu)
4642 break;
4643
4644 err = chan->ops->recv(chan, skb);
4645 break;
4646
4647 case L2CAP_SAR_START:
4648 if (chan->sdu)
4649 break;
4650
4651 chan->sdu_len = get_unaligned_le16(skb->data);
4652 skb_pull(skb, L2CAP_SDULEN_SIZE);
4653
4654 if (chan->sdu_len > chan->imtu) {
4655 err = -EMSGSIZE;
4656 break;
4657 }
4658
4659 if (skb->len >= chan->sdu_len)
4660 break;
4661
4662 chan->sdu = skb;
4663 chan->sdu_last_frag = skb;
4664
4665 skb = NULL;
4666 err = 0;
4667 break;
4668
4669 case L2CAP_SAR_CONTINUE:
4670 if (!chan->sdu)
4671 break;
4672
4673 append_skb_frag(chan->sdu, skb,
4674 &chan->sdu_last_frag);
4675 skb = NULL;
4676
4677 if (chan->sdu->len >= chan->sdu_len)
4678 break;
4679
4680 err = 0;
4681 break;
4682
4683 case L2CAP_SAR_END:
4684 if (!chan->sdu)
4685 break;
4686
4687 append_skb_frag(chan->sdu, skb,
4688 &chan->sdu_last_frag);
4689 skb = NULL;
4690
4691 if (chan->sdu->len != chan->sdu_len)
4692 break;
4693
4694 err = chan->ops->recv(chan, chan->sdu);
4695
4696 if (!err) {
4697 /* Reassembly complete */
4698 chan->sdu = NULL;
4699 chan->sdu_last_frag = NULL;
4700 chan->sdu_len = 0;
4701 }
4702 break;
4703 }
4704
4705 if (err) {
4706 kfree_skb(skb);
4707 kfree_skb(chan->sdu);
4708 chan->sdu = NULL;
4709 chan->sdu_last_frag = NULL;
4710 chan->sdu_len = 0;
4711 }
4712
4713 return err;
4714 }
4715
4716 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4717 {
4718 u8 event;
4719
4720 if (chan->mode != L2CAP_MODE_ERTM)
4721 return;
4722
4723 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4724 l2cap_tx(chan, NULL, NULL, event);
4725 }
4726
4727 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4728 {
4729 int err = 0;
4730 /* Pass sequential frames to l2cap_reassemble_sdu()
4731 * until a gap is encountered.
4732 */
4733
4734 BT_DBG("chan %p", chan);
4735
4736 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4737 struct sk_buff *skb;
4738 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4739 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4740
4741 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4742
4743 if (!skb)
4744 break;
4745
4746 skb_unlink(skb, &chan->srej_q);
4747 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4748 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4749 if (err)
4750 break;
4751 }
4752
4753 if (skb_queue_empty(&chan->srej_q)) {
4754 chan->rx_state = L2CAP_RX_STATE_RECV;
4755 l2cap_send_ack(chan);
4756 }
4757
4758 return err;
4759 }
4760
4761 static void l2cap_handle_srej(struct l2cap_chan *chan,
4762 struct l2cap_ctrl *control)
4763 {
4764 struct sk_buff *skb;
4765
4766 BT_DBG("chan %p, control %p", chan, control);
4767
4768 if (control->reqseq == chan->next_tx_seq) {
4769 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4770 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4771 return;
4772 }
4773
4774 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4775
4776 if (skb == NULL) {
4777 BT_DBG("Seq %d not available for retransmission",
4778 control->reqseq);
4779 return;
4780 }
4781
4782 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4783 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4784 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4785 return;
4786 }
4787
4788 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4789
4790 if (control->poll) {
4791 l2cap_pass_to_tx(chan, control);
4792
4793 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4794 l2cap_retransmit(chan, control);
4795 l2cap_ertm_send(chan);
4796
4797 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4798 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4799 chan->srej_save_reqseq = control->reqseq;
4800 }
4801 } else {
4802 l2cap_pass_to_tx_fbit(chan, control);
4803
4804 if (control->final) {
4805 if (chan->srej_save_reqseq != control->reqseq ||
4806 !test_and_clear_bit(CONN_SREJ_ACT,
4807 &chan->conn_state))
4808 l2cap_retransmit(chan, control);
4809 } else {
4810 l2cap_retransmit(chan, control);
4811 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4812 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4813 chan->srej_save_reqseq = control->reqseq;
4814 }
4815 }
4816 }
4817 }
4818
4819 static void l2cap_handle_rej(struct l2cap_chan *chan,
4820 struct l2cap_ctrl *control)
4821 {
4822 struct sk_buff *skb;
4823
4824 BT_DBG("chan %p, control %p", chan, control);
4825
4826 if (control->reqseq == chan->next_tx_seq) {
4827 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4828 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4829 return;
4830 }
4831
4832 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4833
4834 if (chan->max_tx && skb &&
4835 bt_cb(skb)->control.retries >= chan->max_tx) {
4836 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4837 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4838 return;
4839 }
4840
4841 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4842
4843 l2cap_pass_to_tx(chan, control);
4844
4845 if (control->final) {
4846 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4847 l2cap_retransmit_all(chan, control);
4848 } else {
4849 l2cap_retransmit_all(chan, control);
4850 l2cap_ertm_send(chan);
4851 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4852 set_bit(CONN_REJ_ACT, &chan->conn_state);
4853 }
4854 }
4855
4856 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4857 {
4858 BT_DBG("chan %p, txseq %d", chan, txseq);
4859
4860 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4861 chan->expected_tx_seq);
4862
4863 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4864 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4865 chan->tx_win) {
4866 /* See notes below regarding "double poll" and
4867 * invalid packets.
4868 */
4869 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4870 BT_DBG("Invalid/Ignore - after SREJ");
4871 return L2CAP_TXSEQ_INVALID_IGNORE;
4872 } else {
4873 BT_DBG("Invalid - in window after SREJ sent");
4874 return L2CAP_TXSEQ_INVALID;
4875 }
4876 }
4877
4878 if (chan->srej_list.head == txseq) {
4879 BT_DBG("Expected SREJ");
4880 return L2CAP_TXSEQ_EXPECTED_SREJ;
4881 }
4882
4883 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4884 BT_DBG("Duplicate SREJ - txseq already stored");
4885 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4886 }
4887
4888 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4889 BT_DBG("Unexpected SREJ - not requested");
4890 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4891 }
4892 }
4893
4894 if (chan->expected_tx_seq == txseq) {
4895 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4896 chan->tx_win) {
4897 BT_DBG("Invalid - txseq outside tx window");
4898 return L2CAP_TXSEQ_INVALID;
4899 } else {
4900 BT_DBG("Expected");
4901 return L2CAP_TXSEQ_EXPECTED;
4902 }
4903 }
4904
4905 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4906 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
4907 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4908 return L2CAP_TXSEQ_DUPLICATE;
4909 }
4910
4911 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4912 /* A source of invalid packets is a "double poll" condition,
4913 * where delays cause us to send multiple poll packets. If
4914 * the remote stack receives and processes both polls,
4915 * sequence numbers can wrap around in such a way that a
4916 * resent frame has a sequence number that looks like new data
4917 * with a sequence gap. This would trigger an erroneous SREJ
4918 * request.
4919 *
4920 * Fortunately, this is impossible with a tx window that's
4921 * less than half of the maximum sequence number, which allows
4922 * invalid frames to be safely ignored.
4923 *
4924 * With tx window sizes greater than half of the tx window
4925 * maximum, the frame is invalid and cannot be ignored. This
4926 * causes a disconnect.
4927 */
4928
4929 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4930 BT_DBG("Invalid/Ignore - txseq outside tx window");
4931 return L2CAP_TXSEQ_INVALID_IGNORE;
4932 } else {
4933 BT_DBG("Invalid - txseq outside tx window");
4934 return L2CAP_TXSEQ_INVALID;
4935 }
4936 } else {
4937 BT_DBG("Unexpected - txseq indicates missing frames");
4938 return L2CAP_TXSEQ_UNEXPECTED;
4939 }
4940 }
4941
4942 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4943 struct l2cap_ctrl *control,
4944 struct sk_buff *skb, u8 event)
4945 {
4946 int err = 0;
4947 bool skb_in_use = 0;
4948
4949 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4950 event);
4951
4952 switch (event) {
4953 case L2CAP_EV_RECV_IFRAME:
4954 switch (l2cap_classify_txseq(chan, control->txseq)) {
4955 case L2CAP_TXSEQ_EXPECTED:
4956 l2cap_pass_to_tx(chan, control);
4957
4958 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4959 BT_DBG("Busy, discarding expected seq %d",
4960 control->txseq);
4961 break;
4962 }
4963
4964 chan->expected_tx_seq = __next_seq(chan,
4965 control->txseq);
4966
4967 chan->buffer_seq = chan->expected_tx_seq;
4968 skb_in_use = 1;
4969
4970 err = l2cap_reassemble_sdu(chan, skb, control);
4971 if (err)
4972 break;
4973
4974 if (control->final) {
4975 if (!test_and_clear_bit(CONN_REJ_ACT,
4976 &chan->conn_state)) {
4977 control->final = 0;
4978 l2cap_retransmit_all(chan, control);
4979 l2cap_ertm_send(chan);
4980 }
4981 }
4982
4983 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4984 l2cap_send_ack(chan);
4985 break;
4986 case L2CAP_TXSEQ_UNEXPECTED:
4987 l2cap_pass_to_tx(chan, control);
4988
4989 /* Can't issue SREJ frames in the local busy state.
4990 * Drop this frame, it will be seen as missing
4991 * when local busy is exited.
4992 */
4993 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4994 BT_DBG("Busy, discarding unexpected seq %d",
4995 control->txseq);
4996 break;
4997 }
4998
4999 /* There was a gap in the sequence, so an SREJ
5000 * must be sent for each missing frame. The
5001 * current frame is stored for later use.
5002 */
5003 skb_queue_tail(&chan->srej_q, skb);
5004 skb_in_use = 1;
5005 BT_DBG("Queued %p (queue len %d)", skb,
5006 skb_queue_len(&chan->srej_q));
5007
5008 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5009 l2cap_seq_list_clear(&chan->srej_list);
5010 l2cap_send_srej(chan, control->txseq);
5011
5012 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5013 break;
5014 case L2CAP_TXSEQ_DUPLICATE:
5015 l2cap_pass_to_tx(chan, control);
5016 break;
5017 case L2CAP_TXSEQ_INVALID_IGNORE:
5018 break;
5019 case L2CAP_TXSEQ_INVALID:
5020 default:
5021 l2cap_send_disconn_req(chan->conn, chan,
5022 ECONNRESET);
5023 break;
5024 }
5025 break;
5026 case L2CAP_EV_RECV_RR:
5027 l2cap_pass_to_tx(chan, control);
5028 if (control->final) {
5029 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5030
5031 if (!test_and_clear_bit(CONN_REJ_ACT,
5032 &chan->conn_state)) {
5033 control->final = 0;
5034 l2cap_retransmit_all(chan, control);
5035 }
5036
5037 l2cap_ertm_send(chan);
5038 } else if (control->poll) {
5039 l2cap_send_i_or_rr_or_rnr(chan);
5040 } else {
5041 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5042 &chan->conn_state) &&
5043 chan->unacked_frames)
5044 __set_retrans_timer(chan);
5045
5046 l2cap_ertm_send(chan);
5047 }
5048 break;
5049 case L2CAP_EV_RECV_RNR:
5050 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5051 l2cap_pass_to_tx(chan, control);
5052 if (control && control->poll) {
5053 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5054 l2cap_send_rr_or_rnr(chan, 0);
5055 }
5056 __clear_retrans_timer(chan);
5057 l2cap_seq_list_clear(&chan->retrans_list);
5058 break;
5059 case L2CAP_EV_RECV_REJ:
5060 l2cap_handle_rej(chan, control);
5061 break;
5062 case L2CAP_EV_RECV_SREJ:
5063 l2cap_handle_srej(chan, control);
5064 break;
5065 default:
5066 break;
5067 }
5068
5069 if (skb && !skb_in_use) {
5070 BT_DBG("Freeing %p", skb);
5071 kfree_skb(skb);
5072 }
5073
5074 return err;
5075 }
5076
5077 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5078 struct l2cap_ctrl *control,
5079 struct sk_buff *skb, u8 event)
5080 {
5081 int err = 0;
5082 u16 txseq = control->txseq;
5083 bool skb_in_use = 0;
5084
5085 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5086 event);
5087
5088 switch (event) {
5089 case L2CAP_EV_RECV_IFRAME:
5090 switch (l2cap_classify_txseq(chan, txseq)) {
5091 case L2CAP_TXSEQ_EXPECTED:
5092 /* Keep frame for reassembly later */
5093 l2cap_pass_to_tx(chan, control);
5094 skb_queue_tail(&chan->srej_q, skb);
5095 skb_in_use = 1;
5096 BT_DBG("Queued %p (queue len %d)", skb,
5097 skb_queue_len(&chan->srej_q));
5098
5099 chan->expected_tx_seq = __next_seq(chan, txseq);
5100 break;
5101 case L2CAP_TXSEQ_EXPECTED_SREJ:
5102 l2cap_seq_list_pop(&chan->srej_list);
5103
5104 l2cap_pass_to_tx(chan, control);
5105 skb_queue_tail(&chan->srej_q, skb);
5106 skb_in_use = 1;
5107 BT_DBG("Queued %p (queue len %d)", skb,
5108 skb_queue_len(&chan->srej_q));
5109
5110 err = l2cap_rx_queued_iframes(chan);
5111 if (err)
5112 break;
5113
5114 break;
5115 case L2CAP_TXSEQ_UNEXPECTED:
5116 /* Got a frame that can't be reassembled yet.
5117 * Save it for later, and send SREJs to cover
5118 * the missing frames.
5119 */
5120 skb_queue_tail(&chan->srej_q, skb);
5121 skb_in_use = 1;
5122 BT_DBG("Queued %p (queue len %d)", skb,
5123 skb_queue_len(&chan->srej_q));
5124
5125 l2cap_pass_to_tx(chan, control);
5126 l2cap_send_srej(chan, control->txseq);
5127 break;
5128 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
5129 /* This frame was requested with an SREJ, but
5130 * some expected retransmitted frames are
5131 * missing. Request retransmission of missing
5132 * SREJ'd frames.
5133 */
5134 skb_queue_tail(&chan->srej_q, skb);
5135 skb_in_use = 1;
5136 BT_DBG("Queued %p (queue len %d)", skb,
5137 skb_queue_len(&chan->srej_q));
5138
5139 l2cap_pass_to_tx(chan, control);
5140 l2cap_send_srej_list(chan, control->txseq);
5141 break;
5142 case L2CAP_TXSEQ_DUPLICATE_SREJ:
5143 /* We've already queued this frame. Drop this copy. */
5144 l2cap_pass_to_tx(chan, control);
5145 break;
5146 case L2CAP_TXSEQ_DUPLICATE:
5147 /* Expecting a later sequence number, so this frame
5148 * was already received. Ignore it completely.
5149 */
5150 break;
5151 case L2CAP_TXSEQ_INVALID_IGNORE:
5152 break;
5153 case L2CAP_TXSEQ_INVALID:
5154 default:
5155 l2cap_send_disconn_req(chan->conn, chan,
5156 ECONNRESET);
5157 break;
5158 }
5159 break;
5160 case L2CAP_EV_RECV_RR:
5161 l2cap_pass_to_tx(chan, control);
5162 if (control->final) {
5163 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5164
5165 if (!test_and_clear_bit(CONN_REJ_ACT,
5166 &chan->conn_state)) {
5167 control->final = 0;
5168 l2cap_retransmit_all(chan, control);
5169 }
5170
5171 l2cap_ertm_send(chan);
5172 } else if (control->poll) {
5173 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5174 &chan->conn_state) &&
5175 chan->unacked_frames) {
5176 __set_retrans_timer(chan);
5177 }
5178
5179 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5180 l2cap_send_srej_tail(chan);
5181 } else {
5182 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5183 &chan->conn_state) &&
5184 chan->unacked_frames)
5185 __set_retrans_timer(chan);
5186
5187 l2cap_send_ack(chan);
5188 }
5189 break;
5190 case L2CAP_EV_RECV_RNR:
5191 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5192 l2cap_pass_to_tx(chan, control);
5193 if (control->poll) {
5194 l2cap_send_srej_tail(chan);
5195 } else {
5196 struct l2cap_ctrl rr_control;
5197 memset(&rr_control, 0, sizeof(rr_control));
5198 rr_control.sframe = 1;
5199 rr_control.super = L2CAP_SUPER_RR;
5200 rr_control.reqseq = chan->buffer_seq;
5201 l2cap_send_sframe(chan, &rr_control);
5202 }
5203
5204 break;
5205 case L2CAP_EV_RECV_REJ:
5206 l2cap_handle_rej(chan, control);
5207 break;
5208 case L2CAP_EV_RECV_SREJ:
5209 l2cap_handle_srej(chan, control);
5210 break;
5211 }
5212
5213 if (skb && !skb_in_use) {
5214 BT_DBG("Freeing %p", skb);
5215 kfree_skb(skb);
5216 }
5217
5218 return err;
5219 }
5220
5221 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5222 {
5223 /* Make sure reqseq is for a packet that has been sent but not acked */
5224 u16 unacked;
5225
5226 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5227 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5228 }
5229
5230 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5231 struct sk_buff *skb, u8 event)
5232 {
5233 int err = 0;
5234
5235 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5236 control, skb, event, chan->rx_state);
5237
5238 if (__valid_reqseq(chan, control->reqseq)) {
5239 switch (chan->rx_state) {
5240 case L2CAP_RX_STATE_RECV:
5241 err = l2cap_rx_state_recv(chan, control, skb, event);
5242 break;
5243 case L2CAP_RX_STATE_SREJ_SENT:
5244 err = l2cap_rx_state_srej_sent(chan, control, skb,
5245 event);
5246 break;
5247 default:
5248 /* shut it down */
5249 break;
5250 }
5251 } else {
5252 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5253 control->reqseq, chan->next_tx_seq,
5254 chan->expected_ack_seq);
5255 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5256 }
5257
5258 return err;
5259 }
5260
5261 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5262 struct sk_buff *skb)
5263 {
5264 int err = 0;
5265
5266 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5267 chan->rx_state);
5268
5269 if (l2cap_classify_txseq(chan, control->txseq) ==
5270 L2CAP_TXSEQ_EXPECTED) {
5271 l2cap_pass_to_tx(chan, control);
5272
5273 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5274 __next_seq(chan, chan->buffer_seq));
5275
5276 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5277
5278 l2cap_reassemble_sdu(chan, skb, control);
5279 } else {
5280 if (chan->sdu) {
5281 kfree_skb(chan->sdu);
5282 chan->sdu = NULL;
5283 }
5284 chan->sdu_last_frag = NULL;
5285 chan->sdu_len = 0;
5286
5287 if (skb) {
5288 BT_DBG("Freeing %p", skb);
5289 kfree_skb(skb);
5290 }
5291 }
5292
5293 chan->last_acked_seq = control->txseq;
5294 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5295
5296 return err;
5297 }
5298
5299 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5300 {
5301 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5302 u16 len;
5303 u8 event;
5304
5305 __unpack_control(chan, skb);
5306
5307 len = skb->len;
5308
5309 /*
5310 * We can just drop the corrupted I-frame here.
5311 * Receiver will miss it and start proper recovery
5312 * procedures and ask for retransmission.
5313 */
5314 if (l2cap_check_fcs(chan, skb))
5315 goto drop;
5316
5317 if (!control->sframe && control->sar == L2CAP_SAR_START)
5318 len -= L2CAP_SDULEN_SIZE;
5319
5320 if (chan->fcs == L2CAP_FCS_CRC16)
5321 len -= L2CAP_FCS_SIZE;
5322
5323 if (len > chan->mps) {
5324 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5325 goto drop;
5326 }
5327
5328 if (!control->sframe) {
5329 int err;
5330
5331 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5332 control->sar, control->reqseq, control->final,
5333 control->txseq);
5334
5335 /* Validate F-bit - F=0 always valid, F=1 only
5336 * valid in TX WAIT_F
5337 */
5338 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5339 goto drop;
5340
5341 if (chan->mode != L2CAP_MODE_STREAMING) {
5342 event = L2CAP_EV_RECV_IFRAME;
5343 err = l2cap_rx(chan, control, skb, event);
5344 } else {
5345 err = l2cap_stream_rx(chan, control, skb);
5346 }
5347
5348 if (err)
5349 l2cap_send_disconn_req(chan->conn, chan,
5350 ECONNRESET);
5351 } else {
5352 const u8 rx_func_to_event[4] = {
5353 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5354 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5355 };
5356
5357 /* Only I-frames are expected in streaming mode */
5358 if (chan->mode == L2CAP_MODE_STREAMING)
5359 goto drop;
5360
5361 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5362 control->reqseq, control->final, control->poll,
5363 control->super);
5364
5365 if (len != 0) {
5366 BT_ERR("%d", len);
5367 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5368 goto drop;
5369 }
5370
5371 /* Validate F and P bits */
5372 if (control->final && (control->poll ||
5373 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5374 goto drop;
5375
5376 event = rx_func_to_event[control->super];
5377 if (l2cap_rx(chan, control, skb, event))
5378 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5379 }
5380
5381 return 0;
5382
5383 drop:
5384 kfree_skb(skb);
5385 return 0;
5386 }
5387
5388 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
5389 struct sk_buff *skb)
5390 {
5391 struct l2cap_chan *chan;
5392
5393 chan = l2cap_get_chan_by_scid(conn, cid);
5394 if (!chan) {
5395 if (cid == L2CAP_CID_A2MP) {
5396 chan = a2mp_channel_create(conn, skb);
5397 if (!chan) {
5398 kfree_skb(skb);
5399 return;
5400 }
5401
5402 l2cap_chan_lock(chan);
5403 } else {
5404 BT_DBG("unknown cid 0x%4.4x", cid);
5405 /* Drop packet and return */
5406 kfree_skb(skb);
5407 return;
5408 }
5409 }
5410
5411 BT_DBG("chan %p, len %d", chan, skb->len);
5412
5413 if (chan->state != BT_CONNECTED)
5414 goto drop;
5415
5416 switch (chan->mode) {
5417 case L2CAP_MODE_BASIC:
5418 /* If socket recv buffers overflows we drop data here
5419 * which is *bad* because L2CAP has to be reliable.
5420 * But we don't have any other choice. L2CAP doesn't
5421 * provide flow control mechanism. */
5422
5423 if (chan->imtu < skb->len)
5424 goto drop;
5425
5426 if (!chan->ops->recv(chan, skb))
5427 goto done;
5428 break;
5429
5430 case L2CAP_MODE_ERTM:
5431 case L2CAP_MODE_STREAMING:
5432 l2cap_data_rcv(chan, skb);
5433 goto done;
5434
5435 default:
5436 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5437 break;
5438 }
5439
5440 drop:
5441 kfree_skb(skb);
5442
5443 done:
5444 l2cap_chan_unlock(chan);
5445 }
5446
5447 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
5448 struct sk_buff *skb)
5449 {
5450 struct l2cap_chan *chan;
5451
5452 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5453 if (!chan)
5454 goto drop;
5455
5456 BT_DBG("chan %p, len %d", chan, skb->len);
5457
5458 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5459 goto drop;
5460
5461 if (chan->imtu < skb->len)
5462 goto drop;
5463
5464 if (!chan->ops->recv(chan, skb))
5465 return;
5466
5467 drop:
5468 kfree_skb(skb);
5469 }
5470
5471 static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5472 struct sk_buff *skb)
5473 {
5474 struct l2cap_chan *chan;
5475
5476 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5477 if (!chan)
5478 goto drop;
5479
5480 BT_DBG("chan %p, len %d", chan, skb->len);
5481
5482 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5483 goto drop;
5484
5485 if (chan->imtu < skb->len)
5486 goto drop;
5487
5488 if (!chan->ops->recv(chan, skb))
5489 return;
5490
5491 drop:
5492 kfree_skb(skb);
5493 }
5494
5495 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5496 {
5497 struct l2cap_hdr *lh = (void *) skb->data;
5498 u16 cid, len;
5499 __le16 psm;
5500
5501 skb_pull(skb, L2CAP_HDR_SIZE);
5502 cid = __le16_to_cpu(lh->cid);
5503 len = __le16_to_cpu(lh->len);
5504
5505 if (len != skb->len) {
5506 kfree_skb(skb);
5507 return;
5508 }
5509
5510 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5511
5512 switch (cid) {
5513 case L2CAP_CID_LE_SIGNALING:
5514 case L2CAP_CID_SIGNALING:
5515 l2cap_sig_channel(conn, skb);
5516 break;
5517
5518 case L2CAP_CID_CONN_LESS:
5519 psm = get_unaligned((__le16 *) skb->data);
5520 skb_pull(skb, L2CAP_PSMLEN_SIZE);
5521 l2cap_conless_channel(conn, psm, skb);
5522 break;
5523
5524 case L2CAP_CID_LE_DATA:
5525 l2cap_att_channel(conn, cid, skb);
5526 break;
5527
5528 case L2CAP_CID_SMP:
5529 if (smp_sig_channel(conn, skb))
5530 l2cap_conn_del(conn->hcon, EACCES);
5531 break;
5532
5533 default:
5534 l2cap_data_channel(conn, cid, skb);
5535 break;
5536 }
5537 }
5538
5539 /* ---- L2CAP interface with lower layer (HCI) ---- */
5540
5541 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5542 {
5543 int exact = 0, lm1 = 0, lm2 = 0;
5544 struct l2cap_chan *c;
5545
5546 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
5547
5548 /* Find listening sockets and check their link_mode */
5549 read_lock(&chan_list_lock);
5550 list_for_each_entry(c, &chan_list, global_l) {
5551 struct sock *sk = c->sk;
5552
5553 if (c->state != BT_LISTEN)
5554 continue;
5555
5556 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5557 lm1 |= HCI_LM_ACCEPT;
5558 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5559 lm1 |= HCI_LM_MASTER;
5560 exact++;
5561 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5562 lm2 |= HCI_LM_ACCEPT;
5563 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5564 lm2 |= HCI_LM_MASTER;
5565 }
5566 }
5567 read_unlock(&chan_list_lock);
5568
5569 return exact ? lm1 : lm2;
5570 }
5571
5572 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5573 {
5574 struct l2cap_conn *conn;
5575
5576 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
5577
5578 if (!status) {
5579 conn = l2cap_conn_add(hcon, status);
5580 if (conn)
5581 l2cap_conn_ready(conn);
5582 } else
5583 l2cap_conn_del(hcon, bt_to_errno(status));
5584
5585 }
5586
5587 int l2cap_disconn_ind(struct hci_conn *hcon)
5588 {
5589 struct l2cap_conn *conn = hcon->l2cap_data;
5590
5591 BT_DBG("hcon %p", hcon);
5592
5593 if (!conn)
5594 return HCI_ERROR_REMOTE_USER_TERM;
5595 return conn->disc_reason;
5596 }
5597
5598 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5599 {
5600 BT_DBG("hcon %p reason %d", hcon, reason);
5601
5602 l2cap_conn_del(hcon, bt_to_errno(reason));
5603 }
5604
5605 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5606 {
5607 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5608 return;
5609
5610 if (encrypt == 0x00) {
5611 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5612 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5613 } else if (chan->sec_level == BT_SECURITY_HIGH)
5614 l2cap_chan_close(chan, ECONNREFUSED);
5615 } else {
5616 if (chan->sec_level == BT_SECURITY_MEDIUM)
5617 __clear_chan_timer(chan);
5618 }
5619 }
5620
5621 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5622 {
5623 struct l2cap_conn *conn = hcon->l2cap_data;
5624 struct l2cap_chan *chan;
5625
5626 if (!conn)
5627 return 0;
5628
5629 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
5630
5631 if (hcon->type == LE_LINK) {
5632 if (!status && encrypt)
5633 smp_distribute_keys(conn, 0);
5634 cancel_delayed_work(&conn->security_timer);
5635 }
5636
5637 mutex_lock(&conn->chan_lock);
5638
5639 list_for_each_entry(chan, &conn->chan_l, list) {
5640 l2cap_chan_lock(chan);
5641
5642 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
5643 state_to_string(chan->state));
5644
5645 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
5646 l2cap_chan_unlock(chan);
5647 continue;
5648 }
5649
5650 if (chan->scid == L2CAP_CID_LE_DATA) {
5651 if (!status && encrypt) {
5652 chan->sec_level = hcon->sec_level;
5653 l2cap_chan_ready(chan);
5654 }
5655
5656 l2cap_chan_unlock(chan);
5657 continue;
5658 }
5659
5660 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5661 l2cap_chan_unlock(chan);
5662 continue;
5663 }
5664
5665 if (!status && (chan->state == BT_CONNECTED ||
5666 chan->state == BT_CONFIG)) {
5667 struct sock *sk = chan->sk;
5668
5669 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5670 sk->sk_state_change(sk);
5671
5672 l2cap_check_encryption(chan, encrypt);
5673 l2cap_chan_unlock(chan);
5674 continue;
5675 }
5676
5677 if (chan->state == BT_CONNECT) {
5678 if (!status) {
5679 l2cap_start_connection(chan);
5680 } else {
5681 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5682 }
5683 } else if (chan->state == BT_CONNECT2) {
5684 struct sock *sk = chan->sk;
5685 struct l2cap_conn_rsp rsp;
5686 __u16 res, stat;
5687
5688 lock_sock(sk);
5689
5690 if (!status) {
5691 if (test_bit(BT_SK_DEFER_SETUP,
5692 &bt_sk(sk)->flags)) {
5693 res = L2CAP_CR_PEND;
5694 stat = L2CAP_CS_AUTHOR_PEND;
5695 chan->ops->defer(chan);
5696 } else {
5697 __l2cap_state_change(chan, BT_CONFIG);
5698 res = L2CAP_CR_SUCCESS;
5699 stat = L2CAP_CS_NO_INFO;
5700 }
5701 } else {
5702 __l2cap_state_change(chan, BT_DISCONN);
5703 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5704 res = L2CAP_CR_SEC_BLOCK;
5705 stat = L2CAP_CS_NO_INFO;
5706 }
5707
5708 release_sock(sk);
5709
5710 rsp.scid = cpu_to_le16(chan->dcid);
5711 rsp.dcid = cpu_to_le16(chan->scid);
5712 rsp.result = cpu_to_le16(res);
5713 rsp.status = cpu_to_le16(stat);
5714 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5715 sizeof(rsp), &rsp);
5716
5717 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5718 res == L2CAP_CR_SUCCESS) {
5719 char buf[128];
5720 set_bit(CONF_REQ_SENT, &chan->conf_state);
5721 l2cap_send_cmd(conn, l2cap_get_ident(conn),
5722 L2CAP_CONF_REQ,
5723 l2cap_build_conf_req(chan, buf),
5724 buf);
5725 chan->num_conf_req++;
5726 }
5727 }
5728
5729 l2cap_chan_unlock(chan);
5730 }
5731
5732 mutex_unlock(&conn->chan_lock);
5733
5734 return 0;
5735 }
5736
5737 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5738 {
5739 struct l2cap_conn *conn = hcon->l2cap_data;
5740 struct l2cap_hdr *hdr;
5741 int len;
5742
5743 /* For AMP controller do not create l2cap conn */
5744 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
5745 goto drop;
5746
5747 if (!conn)
5748 conn = l2cap_conn_add(hcon, 0);
5749
5750 if (!conn)
5751 goto drop;
5752
5753 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5754
5755 switch (flags) {
5756 case ACL_START:
5757 case ACL_START_NO_FLUSH:
5758 case ACL_COMPLETE:
5759 if (conn->rx_len) {
5760 BT_ERR("Unexpected start frame (len %d)", skb->len);
5761 kfree_skb(conn->rx_skb);
5762 conn->rx_skb = NULL;
5763 conn->rx_len = 0;
5764 l2cap_conn_unreliable(conn, ECOMM);
5765 }
5766
5767 /* Start fragment always begin with Basic L2CAP header */
5768 if (skb->len < L2CAP_HDR_SIZE) {
5769 BT_ERR("Frame is too short (len %d)", skb->len);
5770 l2cap_conn_unreliable(conn, ECOMM);
5771 goto drop;
5772 }
5773
5774 hdr = (struct l2cap_hdr *) skb->data;
5775 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5776
5777 if (len == skb->len) {
5778 /* Complete frame received */
5779 l2cap_recv_frame(conn, skb);
5780 return 0;
5781 }
5782
5783 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5784
5785 if (skb->len > len) {
5786 BT_ERR("Frame is too long (len %d, expected len %d)",
5787 skb->len, len);
5788 l2cap_conn_unreliable(conn, ECOMM);
5789 goto drop;
5790 }
5791
5792 /* Allocate skb for the complete frame (with header) */
5793 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
5794 if (!conn->rx_skb)
5795 goto drop;
5796
5797 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5798 skb->len);
5799 conn->rx_len = len - skb->len;
5800 break;
5801
5802 case ACL_CONT:
5803 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5804
5805 if (!conn->rx_len) {
5806 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5807 l2cap_conn_unreliable(conn, ECOMM);
5808 goto drop;
5809 }
5810
5811 if (skb->len > conn->rx_len) {
5812 BT_ERR("Fragment is too long (len %d, expected %d)",
5813 skb->len, conn->rx_len);
5814 kfree_skb(conn->rx_skb);
5815 conn->rx_skb = NULL;
5816 conn->rx_len = 0;
5817 l2cap_conn_unreliable(conn, ECOMM);
5818 goto drop;
5819 }
5820
5821 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5822 skb->len);
5823 conn->rx_len -= skb->len;
5824
5825 if (!conn->rx_len) {
5826 /* Complete frame received */
5827 l2cap_recv_frame(conn, conn->rx_skb);
5828 conn->rx_skb = NULL;
5829 }
5830 break;
5831 }
5832
5833 drop:
5834 kfree_skb(skb);
5835 return 0;
5836 }
5837
5838 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5839 {
5840 struct l2cap_chan *c;
5841
5842 read_lock(&chan_list_lock);
5843
5844 list_for_each_entry(c, &chan_list, global_l) {
5845 struct sock *sk = c->sk;
5846
5847 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5848 &bt_sk(sk)->src, &bt_sk(sk)->dst,
5849 c->state, __le16_to_cpu(c->psm),
5850 c->scid, c->dcid, c->imtu, c->omtu,
5851 c->sec_level, c->mode);
5852 }
5853
5854 read_unlock(&chan_list_lock);
5855
5856 return 0;
5857 }
5858
5859 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5860 {
5861 return single_open(file, l2cap_debugfs_show, inode->i_private);
5862 }
5863
5864 static const struct file_operations l2cap_debugfs_fops = {
5865 .open = l2cap_debugfs_open,
5866 .read = seq_read,
5867 .llseek = seq_lseek,
5868 .release = single_release,
5869 };
5870
5871 static struct dentry *l2cap_debugfs;
5872
5873 int __init l2cap_init(void)
5874 {
5875 int err;
5876
5877 err = l2cap_init_sockets();
5878 if (err < 0)
5879 return err;
5880
5881 if (bt_debugfs) {
5882 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
5883 NULL, &l2cap_debugfs_fops);
5884 if (!l2cap_debugfs)
5885 BT_ERR("Failed to create L2CAP debug file");
5886 }
5887
5888 return 0;
5889 }
5890
5891 void l2cap_exit(void)
5892 {
5893 debugfs_remove(l2cap_debugfs);
5894 l2cap_cleanup_sockets();
5895 }
5896
5897 module_param(disable_ertm, bool, 0644);
5898 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");