Bluetooth: Add implementation for retransmitting all unacked frames
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/types.h>
34 #include <linux/capability.h>
35 #include <linux/errno.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/poll.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/interrupt.h>
43 #include <linux/socket.h>
44 #include <linux/skbuff.h>
45 #include <linux/list.h>
46 #include <linux/device.h>
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uaccess.h>
50 #include <linux/crc16.h>
51 #include <net/sock.h>
52
53 #include <asm/unaligned.h>
54
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
59
60 bool disable_ertm = 1;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
67
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 void *data);
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
75
76 static int l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
77 struct sk_buff_head *skbs, u8 event);
78
79 /* ---- L2CAP channels ---- */
80
81 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
82 {
83 struct l2cap_chan *c;
84
85 list_for_each_entry(c, &conn->chan_l, list) {
86 if (c->dcid == cid)
87 return c;
88 }
89 return NULL;
90 }
91
92 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
93 {
94 struct l2cap_chan *c;
95
96 list_for_each_entry(c, &conn->chan_l, list) {
97 if (c->scid == cid)
98 return c;
99 }
100 return NULL;
101 }
102
103 /* Find channel with given SCID.
104 * Returns locked channel. */
105 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
106 {
107 struct l2cap_chan *c;
108
109 mutex_lock(&conn->chan_lock);
110 c = __l2cap_get_chan_by_scid(conn, cid);
111 if (c)
112 l2cap_chan_lock(c);
113 mutex_unlock(&conn->chan_lock);
114
115 return c;
116 }
117
118 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
119 {
120 struct l2cap_chan *c;
121
122 list_for_each_entry(c, &conn->chan_l, list) {
123 if (c->ident == ident)
124 return c;
125 }
126 return NULL;
127 }
128
129 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
130 {
131 struct l2cap_chan *c;
132
133 list_for_each_entry(c, &chan_list, global_l) {
134 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
135 return c;
136 }
137 return NULL;
138 }
139
140 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
141 {
142 int err;
143
144 write_lock(&chan_list_lock);
145
146 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
147 err = -EADDRINUSE;
148 goto done;
149 }
150
151 if (psm) {
152 chan->psm = psm;
153 chan->sport = psm;
154 err = 0;
155 } else {
156 u16 p;
157
158 err = -EINVAL;
159 for (p = 0x1001; p < 0x1100; p += 2)
160 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
161 chan->psm = cpu_to_le16(p);
162 chan->sport = cpu_to_le16(p);
163 err = 0;
164 break;
165 }
166 }
167
168 done:
169 write_unlock(&chan_list_lock);
170 return err;
171 }
172
173 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
174 {
175 write_lock(&chan_list_lock);
176
177 chan->scid = scid;
178
179 write_unlock(&chan_list_lock);
180
181 return 0;
182 }
183
184 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
185 {
186 u16 cid = L2CAP_CID_DYN_START;
187
188 for (; cid < L2CAP_CID_DYN_END; cid++) {
189 if (!__l2cap_get_chan_by_scid(conn, cid))
190 return cid;
191 }
192
193 return 0;
194 }
195
196 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
197 {
198 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
199 state_to_string(state));
200
201 chan->state = state;
202 chan->ops->state_change(chan->data, state);
203 }
204
205 static void l2cap_state_change(struct l2cap_chan *chan, int state)
206 {
207 struct sock *sk = chan->sk;
208
209 lock_sock(sk);
210 __l2cap_state_change(chan, state);
211 release_sock(sk);
212 }
213
214 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
215 {
216 struct sock *sk = chan->sk;
217
218 sk->sk_err = err;
219 }
220
221 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
222 {
223 struct sock *sk = chan->sk;
224
225 lock_sock(sk);
226 __l2cap_chan_set_err(chan, err);
227 release_sock(sk);
228 }
229
230 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
231 u16 seq)
232 {
233 struct sk_buff *skb;
234
235 skb_queue_walk(head, skb) {
236 if (bt_cb(skb)->control.txseq == seq)
237 return skb;
238 }
239
240 return NULL;
241 }
242
243 /* ---- L2CAP sequence number lists ---- */
244
245 /* For ERTM, ordered lists of sequence numbers must be tracked for
246 * SREJ requests that are received and for frames that are to be
247 * retransmitted. These seq_list functions implement a singly-linked
248 * list in an array, where membership in the list can also be checked
249 * in constant time. Items can also be added to the tail of the list
250 * and removed from the head in constant time, without further memory
251 * allocs or frees.
252 */
253
254 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
255 {
256 size_t alloc_size, i;
257
258 /* Allocated size is a power of 2 to map sequence numbers
259 * (which may be up to 14 bits) in to a smaller array that is
260 * sized for the negotiated ERTM transmit windows.
261 */
262 alloc_size = roundup_pow_of_two(size);
263
264 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
265 if (!seq_list->list)
266 return -ENOMEM;
267
268 seq_list->mask = alloc_size - 1;
269 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
270 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
271 for (i = 0; i < alloc_size; i++)
272 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
273
274 return 0;
275 }
276
277 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
278 {
279 kfree(seq_list->list);
280 }
281
282 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
283 u16 seq)
284 {
285 /* Constant-time check for list membership */
286 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
287 }
288
289 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
290 {
291 u16 mask = seq_list->mask;
292
293 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
294 /* In case someone tries to pop the head of an empty list */
295 return L2CAP_SEQ_LIST_CLEAR;
296 } else if (seq_list->head == seq) {
297 /* Head can be removed in constant time */
298 seq_list->head = seq_list->list[seq & mask];
299 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
300
301 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
302 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
303 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
304 }
305 } else {
306 /* Walk the list to find the sequence number */
307 u16 prev = seq_list->head;
308 while (seq_list->list[prev & mask] != seq) {
309 prev = seq_list->list[prev & mask];
310 if (prev == L2CAP_SEQ_LIST_TAIL)
311 return L2CAP_SEQ_LIST_CLEAR;
312 }
313
314 /* Unlink the number from the list and clear it */
315 seq_list->list[prev & mask] = seq_list->list[seq & mask];
316 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
317 if (seq_list->tail == seq)
318 seq_list->tail = prev;
319 }
320 return seq;
321 }
322
323 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
324 {
325 /* Remove the head in constant time */
326 return l2cap_seq_list_remove(seq_list, seq_list->head);
327 }
328
329 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
330 {
331 u16 i;
332
333 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
334 return;
335
336 for (i = 0; i <= seq_list->mask; i++)
337 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
338
339 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 }
342
343 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
344 {
345 u16 mask = seq_list->mask;
346
347 /* All appends happen in constant time */
348
349 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
350 return;
351
352 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
353 seq_list->head = seq;
354 else
355 seq_list->list[seq_list->tail & mask] = seq;
356
357 seq_list->tail = seq;
358 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
359 }
360
361 static void l2cap_chan_timeout(struct work_struct *work)
362 {
363 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
364 chan_timer.work);
365 struct l2cap_conn *conn = chan->conn;
366 int reason;
367
368 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
369
370 mutex_lock(&conn->chan_lock);
371 l2cap_chan_lock(chan);
372
373 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
374 reason = ECONNREFUSED;
375 else if (chan->state == BT_CONNECT &&
376 chan->sec_level != BT_SECURITY_SDP)
377 reason = ECONNREFUSED;
378 else
379 reason = ETIMEDOUT;
380
381 l2cap_chan_close(chan, reason);
382
383 l2cap_chan_unlock(chan);
384
385 chan->ops->close(chan->data);
386 mutex_unlock(&conn->chan_lock);
387
388 l2cap_chan_put(chan);
389 }
390
391 struct l2cap_chan *l2cap_chan_create(void)
392 {
393 struct l2cap_chan *chan;
394
395 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
396 if (!chan)
397 return NULL;
398
399 mutex_init(&chan->lock);
400
401 write_lock(&chan_list_lock);
402 list_add(&chan->global_l, &chan_list);
403 write_unlock(&chan_list_lock);
404
405 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
406
407 chan->state = BT_OPEN;
408
409 atomic_set(&chan->refcnt, 1);
410
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
413
414 BT_DBG("chan %p", chan);
415
416 return chan;
417 }
418
419 void l2cap_chan_destroy(struct l2cap_chan *chan)
420 {
421 write_lock(&chan_list_lock);
422 list_del(&chan->global_l);
423 write_unlock(&chan_list_lock);
424
425 l2cap_chan_put(chan);
426 }
427
428 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
429 {
430 chan->fcs = L2CAP_FCS_CRC16;
431 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
432 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
433 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
434 chan->sec_level = BT_SECURITY_LOW;
435
436 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
437 }
438
439 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
440 {
441 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
442 __le16_to_cpu(chan->psm), chan->dcid);
443
444 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
445
446 chan->conn = conn;
447
448 switch (chan->chan_type) {
449 case L2CAP_CHAN_CONN_ORIENTED:
450 if (conn->hcon->type == LE_LINK) {
451 /* LE connection */
452 chan->omtu = L2CAP_LE_DEFAULT_MTU;
453 chan->scid = L2CAP_CID_LE_DATA;
454 chan->dcid = L2CAP_CID_LE_DATA;
455 } else {
456 /* Alloc CID for connection-oriented socket */
457 chan->scid = l2cap_alloc_cid(conn);
458 chan->omtu = L2CAP_DEFAULT_MTU;
459 }
460 break;
461
462 case L2CAP_CHAN_CONN_LESS:
463 /* Connectionless socket */
464 chan->scid = L2CAP_CID_CONN_LESS;
465 chan->dcid = L2CAP_CID_CONN_LESS;
466 chan->omtu = L2CAP_DEFAULT_MTU;
467 break;
468
469 default:
470 /* Raw socket can send/recv signalling messages only */
471 chan->scid = L2CAP_CID_SIGNALING;
472 chan->dcid = L2CAP_CID_SIGNALING;
473 chan->omtu = L2CAP_DEFAULT_MTU;
474 }
475
476 chan->local_id = L2CAP_BESTEFFORT_ID;
477 chan->local_stype = L2CAP_SERV_BESTEFFORT;
478 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
479 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
480 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
481 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
482
483 l2cap_chan_hold(chan);
484
485 list_add(&chan->list, &conn->chan_l);
486 }
487
488 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
489 {
490 mutex_lock(&conn->chan_lock);
491 __l2cap_chan_add(conn, chan);
492 mutex_unlock(&conn->chan_lock);
493 }
494
495 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
496 {
497 struct sock *sk = chan->sk;
498 struct l2cap_conn *conn = chan->conn;
499 struct sock *parent = bt_sk(sk)->parent;
500
501 __clear_chan_timer(chan);
502
503 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
504
505 if (conn) {
506 /* Delete from channel list */
507 list_del(&chan->list);
508
509 l2cap_chan_put(chan);
510
511 chan->conn = NULL;
512 hci_conn_put(conn->hcon);
513 }
514
515 lock_sock(sk);
516
517 __l2cap_state_change(chan, BT_CLOSED);
518 sock_set_flag(sk, SOCK_ZAPPED);
519
520 if (err)
521 __l2cap_chan_set_err(chan, err);
522
523 if (parent) {
524 bt_accept_unlink(sk);
525 parent->sk_data_ready(parent, 0);
526 } else
527 sk->sk_state_change(sk);
528
529 release_sock(sk);
530
531 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
532 return;
533
534 skb_queue_purge(&chan->tx_q);
535
536 if (chan->mode == L2CAP_MODE_ERTM) {
537 __clear_retrans_timer(chan);
538 __clear_monitor_timer(chan);
539 __clear_ack_timer(chan);
540
541 skb_queue_purge(&chan->srej_q);
542
543 l2cap_seq_list_free(&chan->srej_list);
544 l2cap_seq_list_free(&chan->retrans_list);
545 }
546 }
547
548 static void l2cap_chan_cleanup_listen(struct sock *parent)
549 {
550 struct sock *sk;
551
552 BT_DBG("parent %p", parent);
553
554 /* Close not yet accepted channels */
555 while ((sk = bt_accept_dequeue(parent, NULL))) {
556 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
557
558 l2cap_chan_lock(chan);
559 __clear_chan_timer(chan);
560 l2cap_chan_close(chan, ECONNRESET);
561 l2cap_chan_unlock(chan);
562
563 chan->ops->close(chan->data);
564 }
565 }
566
567 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
568 {
569 struct l2cap_conn *conn = chan->conn;
570 struct sock *sk = chan->sk;
571
572 BT_DBG("chan %p state %s sk %p", chan,
573 state_to_string(chan->state), sk);
574
575 switch (chan->state) {
576 case BT_LISTEN:
577 lock_sock(sk);
578 l2cap_chan_cleanup_listen(sk);
579
580 __l2cap_state_change(chan, BT_CLOSED);
581 sock_set_flag(sk, SOCK_ZAPPED);
582 release_sock(sk);
583 break;
584
585 case BT_CONNECTED:
586 case BT_CONFIG:
587 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
588 conn->hcon->type == ACL_LINK) {
589 __set_chan_timer(chan, sk->sk_sndtimeo);
590 l2cap_send_disconn_req(conn, chan, reason);
591 } else
592 l2cap_chan_del(chan, reason);
593 break;
594
595 case BT_CONNECT2:
596 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
597 conn->hcon->type == ACL_LINK) {
598 struct l2cap_conn_rsp rsp;
599 __u16 result;
600
601 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
602 result = L2CAP_CR_SEC_BLOCK;
603 else
604 result = L2CAP_CR_BAD_PSM;
605 l2cap_state_change(chan, BT_DISCONN);
606
607 rsp.scid = cpu_to_le16(chan->dcid);
608 rsp.dcid = cpu_to_le16(chan->scid);
609 rsp.result = cpu_to_le16(result);
610 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
611 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
612 sizeof(rsp), &rsp);
613 }
614
615 l2cap_chan_del(chan, reason);
616 break;
617
618 case BT_CONNECT:
619 case BT_DISCONN:
620 l2cap_chan_del(chan, reason);
621 break;
622
623 default:
624 lock_sock(sk);
625 sock_set_flag(sk, SOCK_ZAPPED);
626 release_sock(sk);
627 break;
628 }
629 }
630
631 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
632 {
633 if (chan->chan_type == L2CAP_CHAN_RAW) {
634 switch (chan->sec_level) {
635 case BT_SECURITY_HIGH:
636 return HCI_AT_DEDICATED_BONDING_MITM;
637 case BT_SECURITY_MEDIUM:
638 return HCI_AT_DEDICATED_BONDING;
639 default:
640 return HCI_AT_NO_BONDING;
641 }
642 } else if (chan->psm == cpu_to_le16(0x0001)) {
643 if (chan->sec_level == BT_SECURITY_LOW)
644 chan->sec_level = BT_SECURITY_SDP;
645
646 if (chan->sec_level == BT_SECURITY_HIGH)
647 return HCI_AT_NO_BONDING_MITM;
648 else
649 return HCI_AT_NO_BONDING;
650 } else {
651 switch (chan->sec_level) {
652 case BT_SECURITY_HIGH:
653 return HCI_AT_GENERAL_BONDING_MITM;
654 case BT_SECURITY_MEDIUM:
655 return HCI_AT_GENERAL_BONDING;
656 default:
657 return HCI_AT_NO_BONDING;
658 }
659 }
660 }
661
662 /* Service level security */
663 int l2cap_chan_check_security(struct l2cap_chan *chan)
664 {
665 struct l2cap_conn *conn = chan->conn;
666 __u8 auth_type;
667
668 auth_type = l2cap_get_auth_type(chan);
669
670 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
671 }
672
673 static u8 l2cap_get_ident(struct l2cap_conn *conn)
674 {
675 u8 id;
676
677 /* Get next available identificator.
678 * 1 - 128 are used by kernel.
679 * 129 - 199 are reserved.
680 * 200 - 254 are used by utilities like l2ping, etc.
681 */
682
683 spin_lock(&conn->lock);
684
685 if (++conn->tx_ident > 128)
686 conn->tx_ident = 1;
687
688 id = conn->tx_ident;
689
690 spin_unlock(&conn->lock);
691
692 return id;
693 }
694
695 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
696 {
697 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
698 u8 flags;
699
700 BT_DBG("code 0x%2.2x", code);
701
702 if (!skb)
703 return;
704
705 if (lmp_no_flush_capable(conn->hcon->hdev))
706 flags = ACL_START_NO_FLUSH;
707 else
708 flags = ACL_START;
709
710 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
711 skb->priority = HCI_PRIO_MAX;
712
713 hci_send_acl(conn->hchan, skb, flags);
714 }
715
716 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
717 {
718 struct hci_conn *hcon = chan->conn->hcon;
719 u16 flags;
720
721 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
722 skb->priority);
723
724 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
725 lmp_no_flush_capable(hcon->hdev))
726 flags = ACL_START_NO_FLUSH;
727 else
728 flags = ACL_START;
729
730 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
731 hci_send_acl(chan->conn->hchan, skb, flags);
732 }
733
734 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
735 {
736 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
737 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
738
739 if (enh & L2CAP_CTRL_FRAME_TYPE) {
740 /* S-Frame */
741 control->sframe = 1;
742 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
743 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
744
745 control->sar = 0;
746 control->txseq = 0;
747 } else {
748 /* I-Frame */
749 control->sframe = 0;
750 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
751 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
752
753 control->poll = 0;
754 control->super = 0;
755 }
756 }
757
758 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
759 {
760 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
761 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
762
763 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
764 /* S-Frame */
765 control->sframe = 1;
766 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
767 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
768
769 control->sar = 0;
770 control->txseq = 0;
771 } else {
772 /* I-Frame */
773 control->sframe = 0;
774 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
775 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
776
777 control->poll = 0;
778 control->super = 0;
779 }
780 }
781
782 static inline void __unpack_control(struct l2cap_chan *chan,
783 struct sk_buff *skb)
784 {
785 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
786 __unpack_extended_control(get_unaligned_le32(skb->data),
787 &bt_cb(skb)->control);
788 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
789 } else {
790 __unpack_enhanced_control(get_unaligned_le16(skb->data),
791 &bt_cb(skb)->control);
792 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
793 }
794 }
795
796 static u32 __pack_extended_control(struct l2cap_ctrl *control)
797 {
798 u32 packed;
799
800 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
801 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
802
803 if (control->sframe) {
804 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
805 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
806 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
807 } else {
808 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
809 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
810 }
811
812 return packed;
813 }
814
815 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
816 {
817 u16 packed;
818
819 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
820 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
821
822 if (control->sframe) {
823 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
824 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
825 packed |= L2CAP_CTRL_FRAME_TYPE;
826 } else {
827 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
828 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
829 }
830
831 return packed;
832 }
833
834 static inline void __pack_control(struct l2cap_chan *chan,
835 struct l2cap_ctrl *control,
836 struct sk_buff *skb)
837 {
838 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
839 put_unaligned_le32(__pack_extended_control(control),
840 skb->data + L2CAP_HDR_SIZE);
841 } else {
842 put_unaligned_le16(__pack_enhanced_control(control),
843 skb->data + L2CAP_HDR_SIZE);
844 }
845 }
846
847 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
848 u32 control)
849 {
850 struct sk_buff *skb;
851 struct l2cap_hdr *lh;
852 int hlen;
853
854 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
855 hlen = L2CAP_EXT_HDR_SIZE;
856 else
857 hlen = L2CAP_ENH_HDR_SIZE;
858
859 if (chan->fcs == L2CAP_FCS_CRC16)
860 hlen += L2CAP_FCS_SIZE;
861
862 skb = bt_skb_alloc(hlen, GFP_KERNEL);
863
864 if (!skb)
865 return ERR_PTR(-ENOMEM);
866
867 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
868 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
869 lh->cid = cpu_to_le16(chan->dcid);
870
871 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
872 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
873 else
874 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
875
876 if (chan->fcs == L2CAP_FCS_CRC16) {
877 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
878 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
879 }
880
881 skb->priority = HCI_PRIO_MAX;
882 return skb;
883 }
884
885 static void l2cap_send_sframe(struct l2cap_chan *chan,
886 struct l2cap_ctrl *control)
887 {
888 struct sk_buff *skb;
889 u32 control_field;
890
891 BT_DBG("chan %p, control %p", chan, control);
892
893 if (!control->sframe)
894 return;
895
896 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
897 !control->poll)
898 control->final = 1;
899
900 if (control->super == L2CAP_SUPER_RR)
901 clear_bit(CONN_RNR_SENT, &chan->conn_state);
902 else if (control->super == L2CAP_SUPER_RNR)
903 set_bit(CONN_RNR_SENT, &chan->conn_state);
904
905 if (control->super != L2CAP_SUPER_SREJ) {
906 chan->last_acked_seq = control->reqseq;
907 __clear_ack_timer(chan);
908 }
909
910 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
911 control->final, control->poll, control->super);
912
913 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
914 control_field = __pack_extended_control(control);
915 else
916 control_field = __pack_enhanced_control(control);
917
918 skb = l2cap_create_sframe_pdu(chan, control_field);
919 if (!IS_ERR(skb))
920 l2cap_do_send(chan, skb);
921 }
922
923 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
924 {
925 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
926 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
927 set_bit(CONN_RNR_SENT, &chan->conn_state);
928 } else
929 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
930
931 control |= __set_reqseq(chan, chan->buffer_seq);
932 }
933
934 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
935 {
936 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
937 }
938
939 static void l2cap_send_conn_req(struct l2cap_chan *chan)
940 {
941 struct l2cap_conn *conn = chan->conn;
942 struct l2cap_conn_req req;
943
944 req.scid = cpu_to_le16(chan->scid);
945 req.psm = chan->psm;
946
947 chan->ident = l2cap_get_ident(conn);
948
949 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
950
951 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
952 }
953
954 static void l2cap_chan_ready(struct l2cap_chan *chan)
955 {
956 struct sock *sk = chan->sk;
957 struct sock *parent;
958
959 lock_sock(sk);
960
961 parent = bt_sk(sk)->parent;
962
963 BT_DBG("sk %p, parent %p", sk, parent);
964
965 /* This clears all conf flags, including CONF_NOT_COMPLETE */
966 chan->conf_state = 0;
967 __clear_chan_timer(chan);
968
969 __l2cap_state_change(chan, BT_CONNECTED);
970 sk->sk_state_change(sk);
971
972 if (parent)
973 parent->sk_data_ready(parent, 0);
974
975 release_sock(sk);
976 }
977
978 static void l2cap_do_start(struct l2cap_chan *chan)
979 {
980 struct l2cap_conn *conn = chan->conn;
981
982 if (conn->hcon->type == LE_LINK) {
983 l2cap_chan_ready(chan);
984 return;
985 }
986
987 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
988 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
989 return;
990
991 if (l2cap_chan_check_security(chan) &&
992 __l2cap_no_conn_pending(chan))
993 l2cap_send_conn_req(chan);
994 } else {
995 struct l2cap_info_req req;
996 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
997
998 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
999 conn->info_ident = l2cap_get_ident(conn);
1000
1001 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1002
1003 l2cap_send_cmd(conn, conn->info_ident,
1004 L2CAP_INFO_REQ, sizeof(req), &req);
1005 }
1006 }
1007
1008 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1009 {
1010 u32 local_feat_mask = l2cap_feat_mask;
1011 if (!disable_ertm)
1012 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1013
1014 switch (mode) {
1015 case L2CAP_MODE_ERTM:
1016 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1017 case L2CAP_MODE_STREAMING:
1018 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1019 default:
1020 return 0x00;
1021 }
1022 }
1023
1024 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1025 {
1026 struct sock *sk = chan->sk;
1027 struct l2cap_disconn_req req;
1028
1029 if (!conn)
1030 return;
1031
1032 if (chan->mode == L2CAP_MODE_ERTM) {
1033 __clear_retrans_timer(chan);
1034 __clear_monitor_timer(chan);
1035 __clear_ack_timer(chan);
1036 }
1037
1038 req.dcid = cpu_to_le16(chan->dcid);
1039 req.scid = cpu_to_le16(chan->scid);
1040 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1041 L2CAP_DISCONN_REQ, sizeof(req), &req);
1042
1043 lock_sock(sk);
1044 __l2cap_state_change(chan, BT_DISCONN);
1045 __l2cap_chan_set_err(chan, err);
1046 release_sock(sk);
1047 }
1048
1049 /* ---- L2CAP connections ---- */
1050 static void l2cap_conn_start(struct l2cap_conn *conn)
1051 {
1052 struct l2cap_chan *chan, *tmp;
1053
1054 BT_DBG("conn %p", conn);
1055
1056 mutex_lock(&conn->chan_lock);
1057
1058 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1059 struct sock *sk = chan->sk;
1060
1061 l2cap_chan_lock(chan);
1062
1063 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1064 l2cap_chan_unlock(chan);
1065 continue;
1066 }
1067
1068 if (chan->state == BT_CONNECT) {
1069 if (!l2cap_chan_check_security(chan) ||
1070 !__l2cap_no_conn_pending(chan)) {
1071 l2cap_chan_unlock(chan);
1072 continue;
1073 }
1074
1075 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1076 && test_bit(CONF_STATE2_DEVICE,
1077 &chan->conf_state)) {
1078 l2cap_chan_close(chan, ECONNRESET);
1079 l2cap_chan_unlock(chan);
1080 continue;
1081 }
1082
1083 l2cap_send_conn_req(chan);
1084
1085 } else if (chan->state == BT_CONNECT2) {
1086 struct l2cap_conn_rsp rsp;
1087 char buf[128];
1088 rsp.scid = cpu_to_le16(chan->dcid);
1089 rsp.dcid = cpu_to_le16(chan->scid);
1090
1091 if (l2cap_chan_check_security(chan)) {
1092 lock_sock(sk);
1093 if (test_bit(BT_SK_DEFER_SETUP,
1094 &bt_sk(sk)->flags)) {
1095 struct sock *parent = bt_sk(sk)->parent;
1096 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1097 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1098 if (parent)
1099 parent->sk_data_ready(parent, 0);
1100
1101 } else {
1102 __l2cap_state_change(chan, BT_CONFIG);
1103 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1104 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1105 }
1106 release_sock(sk);
1107 } else {
1108 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1109 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1110 }
1111
1112 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1113 sizeof(rsp), &rsp);
1114
1115 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1116 rsp.result != L2CAP_CR_SUCCESS) {
1117 l2cap_chan_unlock(chan);
1118 continue;
1119 }
1120
1121 set_bit(CONF_REQ_SENT, &chan->conf_state);
1122 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1123 l2cap_build_conf_req(chan, buf), buf);
1124 chan->num_conf_req++;
1125 }
1126
1127 l2cap_chan_unlock(chan);
1128 }
1129
1130 mutex_unlock(&conn->chan_lock);
1131 }
1132
1133 /* Find socket with cid and source/destination bdaddr.
1134 * Returns closest match, locked.
1135 */
1136 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1137 bdaddr_t *src,
1138 bdaddr_t *dst)
1139 {
1140 struct l2cap_chan *c, *c1 = NULL;
1141
1142 read_lock(&chan_list_lock);
1143
1144 list_for_each_entry(c, &chan_list, global_l) {
1145 struct sock *sk = c->sk;
1146
1147 if (state && c->state != state)
1148 continue;
1149
1150 if (c->scid == cid) {
1151 int src_match, dst_match;
1152 int src_any, dst_any;
1153
1154 /* Exact match. */
1155 src_match = !bacmp(&bt_sk(sk)->src, src);
1156 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1157 if (src_match && dst_match) {
1158 read_unlock(&chan_list_lock);
1159 return c;
1160 }
1161
1162 /* Closest match */
1163 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1164 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1165 if ((src_match && dst_any) || (src_any && dst_match) ||
1166 (src_any && dst_any))
1167 c1 = c;
1168 }
1169 }
1170
1171 read_unlock(&chan_list_lock);
1172
1173 return c1;
1174 }
1175
1176 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1177 {
1178 struct sock *parent, *sk;
1179 struct l2cap_chan *chan, *pchan;
1180
1181 BT_DBG("");
1182
1183 /* Check if we have socket listening on cid */
1184 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1185 conn->src, conn->dst);
1186 if (!pchan)
1187 return;
1188
1189 parent = pchan->sk;
1190
1191 lock_sock(parent);
1192
1193 /* Check for backlog size */
1194 if (sk_acceptq_is_full(parent)) {
1195 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1196 goto clean;
1197 }
1198
1199 chan = pchan->ops->new_connection(pchan->data);
1200 if (!chan)
1201 goto clean;
1202
1203 sk = chan->sk;
1204
1205 hci_conn_hold(conn->hcon);
1206
1207 bacpy(&bt_sk(sk)->src, conn->src);
1208 bacpy(&bt_sk(sk)->dst, conn->dst);
1209
1210 bt_accept_enqueue(parent, sk);
1211
1212 l2cap_chan_add(conn, chan);
1213
1214 __set_chan_timer(chan, sk->sk_sndtimeo);
1215
1216 __l2cap_state_change(chan, BT_CONNECTED);
1217 parent->sk_data_ready(parent, 0);
1218
1219 clean:
1220 release_sock(parent);
1221 }
1222
1223 static void l2cap_conn_ready(struct l2cap_conn *conn)
1224 {
1225 struct l2cap_chan *chan;
1226
1227 BT_DBG("conn %p", conn);
1228
1229 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1230 l2cap_le_conn_ready(conn);
1231
1232 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1233 smp_conn_security(conn, conn->hcon->pending_sec_level);
1234
1235 mutex_lock(&conn->chan_lock);
1236
1237 list_for_each_entry(chan, &conn->chan_l, list) {
1238
1239 l2cap_chan_lock(chan);
1240
1241 if (conn->hcon->type == LE_LINK) {
1242 if (smp_conn_security(conn, chan->sec_level))
1243 l2cap_chan_ready(chan);
1244
1245 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1246 struct sock *sk = chan->sk;
1247 __clear_chan_timer(chan);
1248 lock_sock(sk);
1249 __l2cap_state_change(chan, BT_CONNECTED);
1250 sk->sk_state_change(sk);
1251 release_sock(sk);
1252
1253 } else if (chan->state == BT_CONNECT)
1254 l2cap_do_start(chan);
1255
1256 l2cap_chan_unlock(chan);
1257 }
1258
1259 mutex_unlock(&conn->chan_lock);
1260 }
1261
1262 /* Notify sockets that we cannot guaranty reliability anymore */
1263 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1264 {
1265 struct l2cap_chan *chan;
1266
1267 BT_DBG("conn %p", conn);
1268
1269 mutex_lock(&conn->chan_lock);
1270
1271 list_for_each_entry(chan, &conn->chan_l, list) {
1272 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1273 __l2cap_chan_set_err(chan, err);
1274 }
1275
1276 mutex_unlock(&conn->chan_lock);
1277 }
1278
1279 static void l2cap_info_timeout(struct work_struct *work)
1280 {
1281 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1282 info_timer.work);
1283
1284 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1285 conn->info_ident = 0;
1286
1287 l2cap_conn_start(conn);
1288 }
1289
1290 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1291 {
1292 struct l2cap_conn *conn = hcon->l2cap_data;
1293 struct l2cap_chan *chan, *l;
1294
1295 if (!conn)
1296 return;
1297
1298 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1299
1300 kfree_skb(conn->rx_skb);
1301
1302 mutex_lock(&conn->chan_lock);
1303
1304 /* Kill channels */
1305 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1306 l2cap_chan_hold(chan);
1307 l2cap_chan_lock(chan);
1308
1309 l2cap_chan_del(chan, err);
1310
1311 l2cap_chan_unlock(chan);
1312
1313 chan->ops->close(chan->data);
1314 l2cap_chan_put(chan);
1315 }
1316
1317 mutex_unlock(&conn->chan_lock);
1318
1319 hci_chan_del(conn->hchan);
1320
1321 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1322 cancel_delayed_work_sync(&conn->info_timer);
1323
1324 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1325 cancel_delayed_work_sync(&conn->security_timer);
1326 smp_chan_destroy(conn);
1327 }
1328
1329 hcon->l2cap_data = NULL;
1330 kfree(conn);
1331 }
1332
1333 static void security_timeout(struct work_struct *work)
1334 {
1335 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1336 security_timer.work);
1337
1338 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1339 }
1340
1341 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1342 {
1343 struct l2cap_conn *conn = hcon->l2cap_data;
1344 struct hci_chan *hchan;
1345
1346 if (conn || status)
1347 return conn;
1348
1349 hchan = hci_chan_create(hcon);
1350 if (!hchan)
1351 return NULL;
1352
1353 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1354 if (!conn) {
1355 hci_chan_del(hchan);
1356 return NULL;
1357 }
1358
1359 hcon->l2cap_data = conn;
1360 conn->hcon = hcon;
1361 conn->hchan = hchan;
1362
1363 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1364
1365 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1366 conn->mtu = hcon->hdev->le_mtu;
1367 else
1368 conn->mtu = hcon->hdev->acl_mtu;
1369
1370 conn->src = &hcon->hdev->bdaddr;
1371 conn->dst = &hcon->dst;
1372
1373 conn->feat_mask = 0;
1374
1375 spin_lock_init(&conn->lock);
1376 mutex_init(&conn->chan_lock);
1377
1378 INIT_LIST_HEAD(&conn->chan_l);
1379
1380 if (hcon->type == LE_LINK)
1381 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1382 else
1383 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1384
1385 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1386
1387 return conn;
1388 }
1389
1390 /* ---- Socket interface ---- */
1391
1392 /* Find socket with psm and source / destination bdaddr.
1393 * Returns closest match.
1394 */
1395 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1396 bdaddr_t *src,
1397 bdaddr_t *dst)
1398 {
1399 struct l2cap_chan *c, *c1 = NULL;
1400
1401 read_lock(&chan_list_lock);
1402
1403 list_for_each_entry(c, &chan_list, global_l) {
1404 struct sock *sk = c->sk;
1405
1406 if (state && c->state != state)
1407 continue;
1408
1409 if (c->psm == psm) {
1410 int src_match, dst_match;
1411 int src_any, dst_any;
1412
1413 /* Exact match. */
1414 src_match = !bacmp(&bt_sk(sk)->src, src);
1415 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1416 if (src_match && dst_match) {
1417 read_unlock(&chan_list_lock);
1418 return c;
1419 }
1420
1421 /* Closest match */
1422 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1423 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1424 if ((src_match && dst_any) || (src_any && dst_match) ||
1425 (src_any && dst_any))
1426 c1 = c;
1427 }
1428 }
1429
1430 read_unlock(&chan_list_lock);
1431
1432 return c1;
1433 }
1434
1435 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1436 bdaddr_t *dst, u8 dst_type)
1437 {
1438 struct sock *sk = chan->sk;
1439 bdaddr_t *src = &bt_sk(sk)->src;
1440 struct l2cap_conn *conn;
1441 struct hci_conn *hcon;
1442 struct hci_dev *hdev;
1443 __u8 auth_type;
1444 int err;
1445
1446 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1447 dst_type, __le16_to_cpu(chan->psm));
1448
1449 hdev = hci_get_route(dst, src);
1450 if (!hdev)
1451 return -EHOSTUNREACH;
1452
1453 hci_dev_lock(hdev);
1454
1455 l2cap_chan_lock(chan);
1456
1457 /* PSM must be odd and lsb of upper byte must be 0 */
1458 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1459 chan->chan_type != L2CAP_CHAN_RAW) {
1460 err = -EINVAL;
1461 goto done;
1462 }
1463
1464 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1465 err = -EINVAL;
1466 goto done;
1467 }
1468
1469 switch (chan->mode) {
1470 case L2CAP_MODE_BASIC:
1471 break;
1472 case L2CAP_MODE_ERTM:
1473 case L2CAP_MODE_STREAMING:
1474 if (!disable_ertm)
1475 break;
1476 /* fall through */
1477 default:
1478 err = -ENOTSUPP;
1479 goto done;
1480 }
1481
1482 lock_sock(sk);
1483
1484 switch (sk->sk_state) {
1485 case BT_CONNECT:
1486 case BT_CONNECT2:
1487 case BT_CONFIG:
1488 /* Already connecting */
1489 err = 0;
1490 release_sock(sk);
1491 goto done;
1492
1493 case BT_CONNECTED:
1494 /* Already connected */
1495 err = -EISCONN;
1496 release_sock(sk);
1497 goto done;
1498
1499 case BT_OPEN:
1500 case BT_BOUND:
1501 /* Can connect */
1502 break;
1503
1504 default:
1505 err = -EBADFD;
1506 release_sock(sk);
1507 goto done;
1508 }
1509
1510 /* Set destination address and psm */
1511 bacpy(&bt_sk(sk)->dst, dst);
1512
1513 release_sock(sk);
1514
1515 chan->psm = psm;
1516 chan->dcid = cid;
1517
1518 auth_type = l2cap_get_auth_type(chan);
1519
1520 if (chan->dcid == L2CAP_CID_LE_DATA)
1521 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1522 chan->sec_level, auth_type);
1523 else
1524 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1525 chan->sec_level, auth_type);
1526
1527 if (IS_ERR(hcon)) {
1528 err = PTR_ERR(hcon);
1529 goto done;
1530 }
1531
1532 conn = l2cap_conn_add(hcon, 0);
1533 if (!conn) {
1534 hci_conn_put(hcon);
1535 err = -ENOMEM;
1536 goto done;
1537 }
1538
1539 if (hcon->type == LE_LINK) {
1540 err = 0;
1541
1542 if (!list_empty(&conn->chan_l)) {
1543 err = -EBUSY;
1544 hci_conn_put(hcon);
1545 }
1546
1547 if (err)
1548 goto done;
1549 }
1550
1551 /* Update source addr of the socket */
1552 bacpy(src, conn->src);
1553
1554 l2cap_chan_unlock(chan);
1555 l2cap_chan_add(conn, chan);
1556 l2cap_chan_lock(chan);
1557
1558 l2cap_state_change(chan, BT_CONNECT);
1559 __set_chan_timer(chan, sk->sk_sndtimeo);
1560
1561 if (hcon->state == BT_CONNECTED) {
1562 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1563 __clear_chan_timer(chan);
1564 if (l2cap_chan_check_security(chan))
1565 l2cap_state_change(chan, BT_CONNECTED);
1566 } else
1567 l2cap_do_start(chan);
1568 }
1569
1570 err = 0;
1571
1572 done:
1573 l2cap_chan_unlock(chan);
1574 hci_dev_unlock(hdev);
1575 hci_dev_put(hdev);
1576 return err;
1577 }
1578
1579 int __l2cap_wait_ack(struct sock *sk)
1580 {
1581 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1582 DECLARE_WAITQUEUE(wait, current);
1583 int err = 0;
1584 int timeo = HZ/5;
1585
1586 add_wait_queue(sk_sleep(sk), &wait);
1587 set_current_state(TASK_INTERRUPTIBLE);
1588 while (chan->unacked_frames > 0 && chan->conn) {
1589 if (!timeo)
1590 timeo = HZ/5;
1591
1592 if (signal_pending(current)) {
1593 err = sock_intr_errno(timeo);
1594 break;
1595 }
1596
1597 release_sock(sk);
1598 timeo = schedule_timeout(timeo);
1599 lock_sock(sk);
1600 set_current_state(TASK_INTERRUPTIBLE);
1601
1602 err = sock_error(sk);
1603 if (err)
1604 break;
1605 }
1606 set_current_state(TASK_RUNNING);
1607 remove_wait_queue(sk_sleep(sk), &wait);
1608 return err;
1609 }
1610
1611 static void l2cap_monitor_timeout(struct work_struct *work)
1612 {
1613 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1614 monitor_timer.work);
1615
1616 BT_DBG("chan %p", chan);
1617
1618 l2cap_chan_lock(chan);
1619
1620 if (chan->retry_count >= chan->remote_max_tx) {
1621 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1622 l2cap_chan_unlock(chan);
1623 l2cap_chan_put(chan);
1624 return;
1625 }
1626
1627 chan->retry_count++;
1628 __set_monitor_timer(chan);
1629
1630 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1631 l2cap_chan_unlock(chan);
1632 l2cap_chan_put(chan);
1633 }
1634
1635 static void l2cap_retrans_timeout(struct work_struct *work)
1636 {
1637 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1638 retrans_timer.work);
1639
1640 BT_DBG("chan %p", chan);
1641
1642 l2cap_chan_lock(chan);
1643
1644 chan->retry_count = 1;
1645 __set_monitor_timer(chan);
1646
1647 set_bit(CONN_WAIT_F, &chan->conn_state);
1648
1649 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1650
1651 l2cap_chan_unlock(chan);
1652 l2cap_chan_put(chan);
1653 }
1654
1655 static int l2cap_streaming_send(struct l2cap_chan *chan,
1656 struct sk_buff_head *skbs)
1657 {
1658 struct sk_buff *skb;
1659 struct l2cap_ctrl *control;
1660
1661 BT_DBG("chan %p, skbs %p", chan, skbs);
1662
1663 if (chan->state != BT_CONNECTED)
1664 return -ENOTCONN;
1665
1666 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1667
1668 while (!skb_queue_empty(&chan->tx_q)) {
1669
1670 skb = skb_dequeue(&chan->tx_q);
1671
1672 bt_cb(skb)->control.retries = 1;
1673 control = &bt_cb(skb)->control;
1674
1675 control->reqseq = 0;
1676 control->txseq = chan->next_tx_seq;
1677
1678 __pack_control(chan, control, skb);
1679
1680 if (chan->fcs == L2CAP_FCS_CRC16) {
1681 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1682 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1683 }
1684
1685 l2cap_do_send(chan, skb);
1686
1687 BT_DBG("Sent txseq %d", (int)control->txseq);
1688
1689 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1690 chan->frames_sent++;
1691 }
1692
1693 return 0;
1694 }
1695
1696 static int l2cap_ertm_send(struct l2cap_chan *chan)
1697 {
1698 struct sk_buff *skb, *tx_skb;
1699 struct l2cap_ctrl *control;
1700 int sent = 0;
1701
1702 BT_DBG("chan %p", chan);
1703
1704 if (chan->state != BT_CONNECTED)
1705 return -ENOTCONN;
1706
1707 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1708 return 0;
1709
1710 while (chan->tx_send_head &&
1711 chan->unacked_frames < chan->remote_tx_win &&
1712 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1713
1714 skb = chan->tx_send_head;
1715
1716 bt_cb(skb)->control.retries = 1;
1717 control = &bt_cb(skb)->control;
1718
1719 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1720 control->final = 1;
1721
1722 control->reqseq = chan->buffer_seq;
1723 chan->last_acked_seq = chan->buffer_seq;
1724 control->txseq = chan->next_tx_seq;
1725
1726 __pack_control(chan, control, skb);
1727
1728 if (chan->fcs == L2CAP_FCS_CRC16) {
1729 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1730 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1731 }
1732
1733 /* Clone after data has been modified. Data is assumed to be
1734 read-only (for locking purposes) on cloned sk_buffs.
1735 */
1736 tx_skb = skb_clone(skb, GFP_KERNEL);
1737
1738 if (!tx_skb)
1739 break;
1740
1741 __set_retrans_timer(chan);
1742
1743 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1744 chan->unacked_frames++;
1745 chan->frames_sent++;
1746 sent++;
1747
1748 if (skb_queue_is_last(&chan->tx_q, skb))
1749 chan->tx_send_head = NULL;
1750 else
1751 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1752
1753 l2cap_do_send(chan, tx_skb);
1754 BT_DBG("Sent txseq %d", (int)control->txseq);
1755 }
1756
1757 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent,
1758 (int) chan->unacked_frames, skb_queue_len(&chan->tx_q));
1759
1760 return sent;
1761 }
1762
1763 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1764 {
1765 struct l2cap_ctrl control;
1766 struct sk_buff *skb;
1767 struct sk_buff *tx_skb;
1768 u16 seq;
1769
1770 BT_DBG("chan %p", chan);
1771
1772 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1773 return;
1774
1775 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1776 seq = l2cap_seq_list_pop(&chan->retrans_list);
1777
1778 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1779 if (!skb) {
1780 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1781 seq);
1782 continue;
1783 }
1784
1785 bt_cb(skb)->control.retries++;
1786 control = bt_cb(skb)->control;
1787
1788 if (chan->max_tx != 0 &&
1789 bt_cb(skb)->control.retries > chan->max_tx) {
1790 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1791 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1792 l2cap_seq_list_clear(&chan->retrans_list);
1793 break;
1794 }
1795
1796 control.reqseq = chan->buffer_seq;
1797 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1798 control.final = 1;
1799 else
1800 control.final = 0;
1801
1802 if (skb_cloned(skb)) {
1803 /* Cloned sk_buffs are read-only, so we need a
1804 * writeable copy
1805 */
1806 tx_skb = skb_copy(skb, GFP_ATOMIC);
1807 } else {
1808 tx_skb = skb_clone(skb, GFP_ATOMIC);
1809 }
1810
1811 if (!tx_skb) {
1812 l2cap_seq_list_clear(&chan->retrans_list);
1813 break;
1814 }
1815
1816 /* Update skb contents */
1817 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1818 put_unaligned_le32(__pack_extended_control(&control),
1819 tx_skb->data + L2CAP_HDR_SIZE);
1820 } else {
1821 put_unaligned_le16(__pack_enhanced_control(&control),
1822 tx_skb->data + L2CAP_HDR_SIZE);
1823 }
1824
1825 if (chan->fcs == L2CAP_FCS_CRC16) {
1826 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1827 put_unaligned_le16(fcs, skb_put(tx_skb,
1828 L2CAP_FCS_SIZE));
1829 }
1830
1831 l2cap_do_send(chan, tx_skb);
1832
1833 BT_DBG("Resent txseq %d", control.txseq);
1834
1835 chan->last_acked_seq = chan->buffer_seq;
1836 }
1837 }
1838
1839 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1840 struct l2cap_ctrl *control)
1841 {
1842 struct sk_buff *skb;
1843
1844 BT_DBG("chan %p, control %p", chan, control);
1845
1846 if (control->poll)
1847 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1848
1849 l2cap_seq_list_clear(&chan->retrans_list);
1850
1851 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1852 return;
1853
1854 if (chan->unacked_frames) {
1855 skb_queue_walk(&chan->tx_q, skb) {
1856 if (bt_cb(skb)->control.txseq == control->reqseq ||
1857 skb == chan->tx_send_head)
1858 break;
1859 }
1860
1861 skb_queue_walk_from(&chan->tx_q, skb) {
1862 if (skb == chan->tx_send_head)
1863 break;
1864
1865 l2cap_seq_list_append(&chan->retrans_list,
1866 bt_cb(skb)->control.txseq);
1867 }
1868
1869 l2cap_ertm_resend(chan);
1870 }
1871 }
1872
1873 static void l2cap_send_ack(struct l2cap_chan *chan)
1874 {
1875 struct l2cap_ctrl control;
1876 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1877 chan->last_acked_seq);
1878 int threshold;
1879
1880 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1881 chan, chan->last_acked_seq, chan->buffer_seq);
1882
1883 memset(&control, 0, sizeof(control));
1884 control.sframe = 1;
1885
1886 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1887 chan->rx_state == L2CAP_RX_STATE_RECV) {
1888 __clear_ack_timer(chan);
1889 control.super = L2CAP_SUPER_RNR;
1890 control.reqseq = chan->buffer_seq;
1891 l2cap_send_sframe(chan, &control);
1892 } else {
1893 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1894 l2cap_ertm_send(chan);
1895 /* If any i-frames were sent, they included an ack */
1896 if (chan->buffer_seq == chan->last_acked_seq)
1897 frames_to_ack = 0;
1898 }
1899
1900 /* Ack now if the tx window is 3/4ths full.
1901 * Calculate without mul or div
1902 */
1903 threshold = chan->tx_win;
1904 threshold += threshold << 1;
1905 threshold >>= 2;
1906
1907 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
1908 threshold);
1909
1910 if (frames_to_ack >= threshold) {
1911 __clear_ack_timer(chan);
1912 control.super = L2CAP_SUPER_RR;
1913 control.reqseq = chan->buffer_seq;
1914 l2cap_send_sframe(chan, &control);
1915 frames_to_ack = 0;
1916 }
1917
1918 if (frames_to_ack)
1919 __set_ack_timer(chan);
1920 }
1921 }
1922
1923 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1924 struct msghdr *msg, int len,
1925 int count, struct sk_buff *skb)
1926 {
1927 struct l2cap_conn *conn = chan->conn;
1928 struct sk_buff **frag;
1929 int sent = 0;
1930
1931 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1932 return -EFAULT;
1933
1934 sent += count;
1935 len -= count;
1936
1937 /* Continuation fragments (no L2CAP header) */
1938 frag = &skb_shinfo(skb)->frag_list;
1939 while (len) {
1940 struct sk_buff *tmp;
1941
1942 count = min_t(unsigned int, conn->mtu, len);
1943
1944 tmp = chan->ops->alloc_skb(chan, count,
1945 msg->msg_flags & MSG_DONTWAIT);
1946 if (IS_ERR(tmp))
1947 return PTR_ERR(tmp);
1948
1949 *frag = tmp;
1950
1951 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1952 return -EFAULT;
1953
1954 (*frag)->priority = skb->priority;
1955
1956 sent += count;
1957 len -= count;
1958
1959 skb->len += (*frag)->len;
1960 skb->data_len += (*frag)->len;
1961
1962 frag = &(*frag)->next;
1963 }
1964
1965 return sent;
1966 }
1967
1968 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1969 struct msghdr *msg, size_t len,
1970 u32 priority)
1971 {
1972 struct l2cap_conn *conn = chan->conn;
1973 struct sk_buff *skb;
1974 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1975 struct l2cap_hdr *lh;
1976
1977 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1978
1979 count = min_t(unsigned int, (conn->mtu - hlen), len);
1980
1981 skb = chan->ops->alloc_skb(chan, count + hlen,
1982 msg->msg_flags & MSG_DONTWAIT);
1983 if (IS_ERR(skb))
1984 return skb;
1985
1986 skb->priority = priority;
1987
1988 /* Create L2CAP header */
1989 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1990 lh->cid = cpu_to_le16(chan->dcid);
1991 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1992 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1993
1994 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1995 if (unlikely(err < 0)) {
1996 kfree_skb(skb);
1997 return ERR_PTR(err);
1998 }
1999 return skb;
2000 }
2001
2002 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2003 struct msghdr *msg, size_t len,
2004 u32 priority)
2005 {
2006 struct l2cap_conn *conn = chan->conn;
2007 struct sk_buff *skb;
2008 int err, count;
2009 struct l2cap_hdr *lh;
2010
2011 BT_DBG("chan %p len %d", chan, (int)len);
2012
2013 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2014
2015 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2016 msg->msg_flags & MSG_DONTWAIT);
2017 if (IS_ERR(skb))
2018 return skb;
2019
2020 skb->priority = priority;
2021
2022 /* Create L2CAP header */
2023 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2024 lh->cid = cpu_to_le16(chan->dcid);
2025 lh->len = cpu_to_le16(len);
2026
2027 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2028 if (unlikely(err < 0)) {
2029 kfree_skb(skb);
2030 return ERR_PTR(err);
2031 }
2032 return skb;
2033 }
2034
2035 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2036 struct msghdr *msg, size_t len,
2037 u16 sdulen)
2038 {
2039 struct l2cap_conn *conn = chan->conn;
2040 struct sk_buff *skb;
2041 int err, count, hlen;
2042 struct l2cap_hdr *lh;
2043
2044 BT_DBG("chan %p len %d", chan, (int)len);
2045
2046 if (!conn)
2047 return ERR_PTR(-ENOTCONN);
2048
2049 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2050 hlen = L2CAP_EXT_HDR_SIZE;
2051 else
2052 hlen = L2CAP_ENH_HDR_SIZE;
2053
2054 if (sdulen)
2055 hlen += L2CAP_SDULEN_SIZE;
2056
2057 if (chan->fcs == L2CAP_FCS_CRC16)
2058 hlen += L2CAP_FCS_SIZE;
2059
2060 count = min_t(unsigned int, (conn->mtu - hlen), len);
2061
2062 skb = chan->ops->alloc_skb(chan, count + hlen,
2063 msg->msg_flags & MSG_DONTWAIT);
2064 if (IS_ERR(skb))
2065 return skb;
2066
2067 /* Create L2CAP header */
2068 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2069 lh->cid = cpu_to_le16(chan->dcid);
2070 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2071
2072 /* Control header is populated later */
2073 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2074 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2075 else
2076 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2077
2078 if (sdulen)
2079 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2080
2081 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2082 if (unlikely(err < 0)) {
2083 kfree_skb(skb);
2084 return ERR_PTR(err);
2085 }
2086
2087 bt_cb(skb)->control.fcs = chan->fcs;
2088 bt_cb(skb)->control.retries = 0;
2089 return skb;
2090 }
2091
2092 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2093 struct sk_buff_head *seg_queue,
2094 struct msghdr *msg, size_t len)
2095 {
2096 struct sk_buff *skb;
2097 u16 sdu_len;
2098 size_t pdu_len;
2099 int err = 0;
2100 u8 sar;
2101
2102 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2103
2104 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2105 * so fragmented skbs are not used. The HCI layer's handling
2106 * of fragmented skbs is not compatible with ERTM's queueing.
2107 */
2108
2109 /* PDU size is derived from the HCI MTU */
2110 pdu_len = chan->conn->mtu;
2111
2112 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2113
2114 /* Adjust for largest possible L2CAP overhead. */
2115 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
2116
2117 /* Remote device may have requested smaller PDUs */
2118 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2119
2120 if (len <= pdu_len) {
2121 sar = L2CAP_SAR_UNSEGMENTED;
2122 sdu_len = 0;
2123 pdu_len = len;
2124 } else {
2125 sar = L2CAP_SAR_START;
2126 sdu_len = len;
2127 pdu_len -= L2CAP_SDULEN_SIZE;
2128 }
2129
2130 while (len > 0) {
2131 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2132
2133 if (IS_ERR(skb)) {
2134 __skb_queue_purge(seg_queue);
2135 return PTR_ERR(skb);
2136 }
2137
2138 bt_cb(skb)->control.sar = sar;
2139 __skb_queue_tail(seg_queue, skb);
2140
2141 len -= pdu_len;
2142 if (sdu_len) {
2143 sdu_len = 0;
2144 pdu_len += L2CAP_SDULEN_SIZE;
2145 }
2146
2147 if (len <= pdu_len) {
2148 sar = L2CAP_SAR_END;
2149 pdu_len = len;
2150 } else {
2151 sar = L2CAP_SAR_CONTINUE;
2152 }
2153 }
2154
2155 return err;
2156 }
2157
2158 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2159 u32 priority)
2160 {
2161 struct sk_buff *skb;
2162 int err;
2163 struct sk_buff_head seg_queue;
2164
2165 /* Connectionless channel */
2166 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2167 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2168 if (IS_ERR(skb))
2169 return PTR_ERR(skb);
2170
2171 l2cap_do_send(chan, skb);
2172 return len;
2173 }
2174
2175 switch (chan->mode) {
2176 case L2CAP_MODE_BASIC:
2177 /* Check outgoing MTU */
2178 if (len > chan->omtu)
2179 return -EMSGSIZE;
2180
2181 /* Create a basic PDU */
2182 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2183 if (IS_ERR(skb))
2184 return PTR_ERR(skb);
2185
2186 l2cap_do_send(chan, skb);
2187 err = len;
2188 break;
2189
2190 case L2CAP_MODE_ERTM:
2191 case L2CAP_MODE_STREAMING:
2192 /* Check outgoing MTU */
2193 if (len > chan->omtu) {
2194 err = -EMSGSIZE;
2195 break;
2196 }
2197
2198 __skb_queue_head_init(&seg_queue);
2199
2200 /* Do segmentation before calling in to the state machine,
2201 * since it's possible to block while waiting for memory
2202 * allocation.
2203 */
2204 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2205
2206 /* The channel could have been closed while segmenting,
2207 * check that it is still connected.
2208 */
2209 if (chan->state != BT_CONNECTED) {
2210 __skb_queue_purge(&seg_queue);
2211 err = -ENOTCONN;
2212 }
2213
2214 if (err)
2215 break;
2216
2217 if (chan->mode == L2CAP_MODE_ERTM)
2218 err = l2cap_tx(chan, 0, &seg_queue,
2219 L2CAP_EV_DATA_REQUEST);
2220 else
2221 err = l2cap_streaming_send(chan, &seg_queue);
2222
2223 if (!err)
2224 err = len;
2225
2226 /* If the skbs were not queued for sending, they'll still be in
2227 * seg_queue and need to be purged.
2228 */
2229 __skb_queue_purge(&seg_queue);
2230 break;
2231
2232 default:
2233 BT_DBG("bad state %1.1x", chan->mode);
2234 err = -EBADFD;
2235 }
2236
2237 return err;
2238 }
2239
2240 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2241 {
2242 /* Placeholder */
2243 }
2244
2245 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2246 {
2247 /* Placeholder */
2248 }
2249
2250 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2251 {
2252 /* Placeholder */
2253 }
2254
2255 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2256 {
2257 struct sk_buff *acked_skb;
2258 u16 ackseq;
2259
2260 BT_DBG("chan %p, reqseq %d", chan, reqseq);
2261
2262 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2263 return;
2264
2265 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2266 chan->expected_ack_seq, chan->unacked_frames);
2267
2268 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2269 ackseq = __next_seq(chan, ackseq)) {
2270
2271 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2272 if (acked_skb) {
2273 skb_unlink(acked_skb, &chan->tx_q);
2274 kfree_skb(acked_skb);
2275 chan->unacked_frames--;
2276 }
2277 }
2278
2279 chan->expected_ack_seq = reqseq;
2280
2281 if (chan->unacked_frames == 0)
2282 __clear_retrans_timer(chan);
2283
2284 BT_DBG("unacked_frames %d", (int) chan->unacked_frames);
2285 }
2286
2287 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2288 {
2289 BT_DBG("chan %p", chan);
2290
2291 chan->expected_tx_seq = chan->buffer_seq;
2292 l2cap_seq_list_clear(&chan->srej_list);
2293 skb_queue_purge(&chan->srej_q);
2294 chan->rx_state = L2CAP_RX_STATE_RECV;
2295 }
2296
2297 static int l2cap_tx_state_xmit(struct l2cap_chan *chan,
2298 struct l2cap_ctrl *control,
2299 struct sk_buff_head *skbs, u8 event)
2300 {
2301 int err = 0;
2302
2303 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2304 event);
2305
2306 switch (event) {
2307 case L2CAP_EV_DATA_REQUEST:
2308 if (chan->tx_send_head == NULL)
2309 chan->tx_send_head = skb_peek(skbs);
2310
2311 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2312 l2cap_ertm_send(chan);
2313 break;
2314 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2315 BT_DBG("Enter LOCAL_BUSY");
2316 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2317
2318 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2319 /* The SREJ_SENT state must be aborted if we are to
2320 * enter the LOCAL_BUSY state.
2321 */
2322 l2cap_abort_rx_srej_sent(chan);
2323 }
2324
2325 l2cap_send_ack(chan);
2326
2327 break;
2328 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2329 BT_DBG("Exit LOCAL_BUSY");
2330 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2331
2332 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2333 struct l2cap_ctrl local_control;
2334
2335 memset(&local_control, 0, sizeof(local_control));
2336 local_control.sframe = 1;
2337 local_control.super = L2CAP_SUPER_RR;
2338 local_control.poll = 1;
2339 local_control.reqseq = chan->buffer_seq;
2340 l2cap_send_sframe(chan, &local_control);
2341
2342 chan->retry_count = 1;
2343 __set_monitor_timer(chan);
2344 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2345 }
2346 break;
2347 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2348 l2cap_process_reqseq(chan, control->reqseq);
2349 break;
2350 case L2CAP_EV_EXPLICIT_POLL:
2351 l2cap_send_rr_or_rnr(chan, 1);
2352 chan->retry_count = 1;
2353 __set_monitor_timer(chan);
2354 __clear_ack_timer(chan);
2355 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2356 break;
2357 case L2CAP_EV_RETRANS_TO:
2358 l2cap_send_rr_or_rnr(chan, 1);
2359 chan->retry_count = 1;
2360 __set_monitor_timer(chan);
2361 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2362 break;
2363 case L2CAP_EV_RECV_FBIT:
2364 /* Nothing to process */
2365 break;
2366 default:
2367 break;
2368 }
2369
2370 return err;
2371 }
2372
2373 static int l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2374 struct l2cap_ctrl *control,
2375 struct sk_buff_head *skbs, u8 event)
2376 {
2377 int err = 0;
2378
2379 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2380 event);
2381
2382 switch (event) {
2383 case L2CAP_EV_DATA_REQUEST:
2384 if (chan->tx_send_head == NULL)
2385 chan->tx_send_head = skb_peek(skbs);
2386 /* Queue data, but don't send. */
2387 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2388 break;
2389 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2390 BT_DBG("Enter LOCAL_BUSY");
2391 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2392
2393 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2394 /* The SREJ_SENT state must be aborted if we are to
2395 * enter the LOCAL_BUSY state.
2396 */
2397 l2cap_abort_rx_srej_sent(chan);
2398 }
2399
2400 l2cap_send_ack(chan);
2401
2402 break;
2403 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2404 BT_DBG("Exit LOCAL_BUSY");
2405 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2406
2407 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2408 struct l2cap_ctrl local_control;
2409 memset(&local_control, 0, sizeof(local_control));
2410 local_control.sframe = 1;
2411 local_control.super = L2CAP_SUPER_RR;
2412 local_control.poll = 1;
2413 local_control.reqseq = chan->buffer_seq;
2414 l2cap_send_sframe(chan, &local_control);
2415
2416 chan->retry_count = 1;
2417 __set_monitor_timer(chan);
2418 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2419 }
2420 break;
2421 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2422 l2cap_process_reqseq(chan, control->reqseq);
2423
2424 /* Fall through */
2425
2426 case L2CAP_EV_RECV_FBIT:
2427 if (control && control->final) {
2428 __clear_monitor_timer(chan);
2429 if (chan->unacked_frames > 0)
2430 __set_retrans_timer(chan);
2431 chan->retry_count = 0;
2432 chan->tx_state = L2CAP_TX_STATE_XMIT;
2433 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2434 }
2435 break;
2436 case L2CAP_EV_EXPLICIT_POLL:
2437 /* Ignore */
2438 break;
2439 case L2CAP_EV_MONITOR_TO:
2440 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2441 l2cap_send_rr_or_rnr(chan, 1);
2442 __set_monitor_timer(chan);
2443 chan->retry_count++;
2444 } else {
2445 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2446 }
2447 break;
2448 default:
2449 break;
2450 }
2451
2452 return err;
2453 }
2454
2455 static int l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2456 struct sk_buff_head *skbs, u8 event)
2457 {
2458 int err = 0;
2459
2460 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2461 chan, control, skbs, event, chan->tx_state);
2462
2463 switch (chan->tx_state) {
2464 case L2CAP_TX_STATE_XMIT:
2465 err = l2cap_tx_state_xmit(chan, control, skbs, event);
2466 break;
2467 case L2CAP_TX_STATE_WAIT_F:
2468 err = l2cap_tx_state_wait_f(chan, control, skbs, event);
2469 break;
2470 default:
2471 /* Ignore event */
2472 break;
2473 }
2474
2475 return err;
2476 }
2477
2478 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2479 struct l2cap_ctrl *control)
2480 {
2481 BT_DBG("chan %p, control %p", chan, control);
2482 l2cap_tx(chan, control, 0, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2483 }
2484
2485 /* Copy frame to all raw sockets on that connection */
2486 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2487 {
2488 struct sk_buff *nskb;
2489 struct l2cap_chan *chan;
2490
2491 BT_DBG("conn %p", conn);
2492
2493 mutex_lock(&conn->chan_lock);
2494
2495 list_for_each_entry(chan, &conn->chan_l, list) {
2496 struct sock *sk = chan->sk;
2497 if (chan->chan_type != L2CAP_CHAN_RAW)
2498 continue;
2499
2500 /* Don't send frame to the socket it came from */
2501 if (skb->sk == sk)
2502 continue;
2503 nskb = skb_clone(skb, GFP_ATOMIC);
2504 if (!nskb)
2505 continue;
2506
2507 if (chan->ops->recv(chan->data, nskb))
2508 kfree_skb(nskb);
2509 }
2510
2511 mutex_unlock(&conn->chan_lock);
2512 }
2513
2514 /* ---- L2CAP signalling commands ---- */
2515 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2516 u8 code, u8 ident, u16 dlen, void *data)
2517 {
2518 struct sk_buff *skb, **frag;
2519 struct l2cap_cmd_hdr *cmd;
2520 struct l2cap_hdr *lh;
2521 int len, count;
2522
2523 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2524 conn, code, ident, dlen);
2525
2526 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2527 count = min_t(unsigned int, conn->mtu, len);
2528
2529 skb = bt_skb_alloc(count, GFP_ATOMIC);
2530 if (!skb)
2531 return NULL;
2532
2533 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2534 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2535
2536 if (conn->hcon->type == LE_LINK)
2537 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2538 else
2539 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2540
2541 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2542 cmd->code = code;
2543 cmd->ident = ident;
2544 cmd->len = cpu_to_le16(dlen);
2545
2546 if (dlen) {
2547 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2548 memcpy(skb_put(skb, count), data, count);
2549 data += count;
2550 }
2551
2552 len -= skb->len;
2553
2554 /* Continuation fragments (no L2CAP header) */
2555 frag = &skb_shinfo(skb)->frag_list;
2556 while (len) {
2557 count = min_t(unsigned int, conn->mtu, len);
2558
2559 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2560 if (!*frag)
2561 goto fail;
2562
2563 memcpy(skb_put(*frag, count), data, count);
2564
2565 len -= count;
2566 data += count;
2567
2568 frag = &(*frag)->next;
2569 }
2570
2571 return skb;
2572
2573 fail:
2574 kfree_skb(skb);
2575 return NULL;
2576 }
2577
2578 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2579 {
2580 struct l2cap_conf_opt *opt = *ptr;
2581 int len;
2582
2583 len = L2CAP_CONF_OPT_SIZE + opt->len;
2584 *ptr += len;
2585
2586 *type = opt->type;
2587 *olen = opt->len;
2588
2589 switch (opt->len) {
2590 case 1:
2591 *val = *((u8 *) opt->val);
2592 break;
2593
2594 case 2:
2595 *val = get_unaligned_le16(opt->val);
2596 break;
2597
2598 case 4:
2599 *val = get_unaligned_le32(opt->val);
2600 break;
2601
2602 default:
2603 *val = (unsigned long) opt->val;
2604 break;
2605 }
2606
2607 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2608 return len;
2609 }
2610
2611 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2612 {
2613 struct l2cap_conf_opt *opt = *ptr;
2614
2615 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2616
2617 opt->type = type;
2618 opt->len = len;
2619
2620 switch (len) {
2621 case 1:
2622 *((u8 *) opt->val) = val;
2623 break;
2624
2625 case 2:
2626 put_unaligned_le16(val, opt->val);
2627 break;
2628
2629 case 4:
2630 put_unaligned_le32(val, opt->val);
2631 break;
2632
2633 default:
2634 memcpy(opt->val, (void *) val, len);
2635 break;
2636 }
2637
2638 *ptr += L2CAP_CONF_OPT_SIZE + len;
2639 }
2640
2641 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2642 {
2643 struct l2cap_conf_efs efs;
2644
2645 switch (chan->mode) {
2646 case L2CAP_MODE_ERTM:
2647 efs.id = chan->local_id;
2648 efs.stype = chan->local_stype;
2649 efs.msdu = cpu_to_le16(chan->local_msdu);
2650 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2651 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2652 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2653 break;
2654
2655 case L2CAP_MODE_STREAMING:
2656 efs.id = 1;
2657 efs.stype = L2CAP_SERV_BESTEFFORT;
2658 efs.msdu = cpu_to_le16(chan->local_msdu);
2659 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2660 efs.acc_lat = 0;
2661 efs.flush_to = 0;
2662 break;
2663
2664 default:
2665 return;
2666 }
2667
2668 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2669 (unsigned long) &efs);
2670 }
2671
2672 static void l2cap_ack_timeout(struct work_struct *work)
2673 {
2674 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2675 ack_timer.work);
2676
2677 BT_DBG("chan %p", chan);
2678
2679 l2cap_chan_lock(chan);
2680
2681 l2cap_send_ack(chan);
2682
2683 l2cap_chan_unlock(chan);
2684
2685 l2cap_chan_put(chan);
2686 }
2687
2688 static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2689 {
2690 int err;
2691
2692 chan->next_tx_seq = 0;
2693 chan->expected_tx_seq = 0;
2694 chan->expected_ack_seq = 0;
2695 chan->unacked_frames = 0;
2696 chan->buffer_seq = 0;
2697 chan->frames_sent = 0;
2698 chan->last_acked_seq = 0;
2699 chan->sdu = NULL;
2700 chan->sdu_last_frag = NULL;
2701 chan->sdu_len = 0;
2702
2703 skb_queue_head_init(&chan->tx_q);
2704
2705 if (chan->mode != L2CAP_MODE_ERTM)
2706 return 0;
2707
2708 chan->rx_state = L2CAP_RX_STATE_RECV;
2709 chan->tx_state = L2CAP_TX_STATE_XMIT;
2710
2711 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2712 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2713 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2714
2715 skb_queue_head_init(&chan->srej_q);
2716
2717 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2718 if (err < 0)
2719 return err;
2720
2721 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2722 if (err < 0)
2723 l2cap_seq_list_free(&chan->srej_list);
2724
2725 return err;
2726 }
2727
2728 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2729 {
2730 switch (mode) {
2731 case L2CAP_MODE_STREAMING:
2732 case L2CAP_MODE_ERTM:
2733 if (l2cap_mode_supported(mode, remote_feat_mask))
2734 return mode;
2735 /* fall through */
2736 default:
2737 return L2CAP_MODE_BASIC;
2738 }
2739 }
2740
2741 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2742 {
2743 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2744 }
2745
2746 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2747 {
2748 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2749 }
2750
2751 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2752 {
2753 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2754 __l2cap_ews_supported(chan)) {
2755 /* use extended control field */
2756 set_bit(FLAG_EXT_CTRL, &chan->flags);
2757 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2758 } else {
2759 chan->tx_win = min_t(u16, chan->tx_win,
2760 L2CAP_DEFAULT_TX_WINDOW);
2761 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2762 }
2763 }
2764
2765 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2766 {
2767 struct l2cap_conf_req *req = data;
2768 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2769 void *ptr = req->data;
2770 u16 size;
2771
2772 BT_DBG("chan %p", chan);
2773
2774 if (chan->num_conf_req || chan->num_conf_rsp)
2775 goto done;
2776
2777 switch (chan->mode) {
2778 case L2CAP_MODE_STREAMING:
2779 case L2CAP_MODE_ERTM:
2780 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2781 break;
2782
2783 if (__l2cap_efs_supported(chan))
2784 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2785
2786 /* fall through */
2787 default:
2788 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2789 break;
2790 }
2791
2792 done:
2793 if (chan->imtu != L2CAP_DEFAULT_MTU)
2794 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2795
2796 switch (chan->mode) {
2797 case L2CAP_MODE_BASIC:
2798 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2799 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2800 break;
2801
2802 rfc.mode = L2CAP_MODE_BASIC;
2803 rfc.txwin_size = 0;
2804 rfc.max_transmit = 0;
2805 rfc.retrans_timeout = 0;
2806 rfc.monitor_timeout = 0;
2807 rfc.max_pdu_size = 0;
2808
2809 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2810 (unsigned long) &rfc);
2811 break;
2812
2813 case L2CAP_MODE_ERTM:
2814 rfc.mode = L2CAP_MODE_ERTM;
2815 rfc.max_transmit = chan->max_tx;
2816 rfc.retrans_timeout = 0;
2817 rfc.monitor_timeout = 0;
2818
2819 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2820 L2CAP_EXT_HDR_SIZE -
2821 L2CAP_SDULEN_SIZE -
2822 L2CAP_FCS_SIZE);
2823 rfc.max_pdu_size = cpu_to_le16(size);
2824
2825 l2cap_txwin_setup(chan);
2826
2827 rfc.txwin_size = min_t(u16, chan->tx_win,
2828 L2CAP_DEFAULT_TX_WINDOW);
2829
2830 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2831 (unsigned long) &rfc);
2832
2833 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2834 l2cap_add_opt_efs(&ptr, chan);
2835
2836 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2837 break;
2838
2839 if (chan->fcs == L2CAP_FCS_NONE ||
2840 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2841 chan->fcs = L2CAP_FCS_NONE;
2842 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2843 }
2844
2845 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2846 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2847 chan->tx_win);
2848 break;
2849
2850 case L2CAP_MODE_STREAMING:
2851 rfc.mode = L2CAP_MODE_STREAMING;
2852 rfc.txwin_size = 0;
2853 rfc.max_transmit = 0;
2854 rfc.retrans_timeout = 0;
2855 rfc.monitor_timeout = 0;
2856
2857 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2858 L2CAP_EXT_HDR_SIZE -
2859 L2CAP_SDULEN_SIZE -
2860 L2CAP_FCS_SIZE);
2861 rfc.max_pdu_size = cpu_to_le16(size);
2862
2863 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2864 (unsigned long) &rfc);
2865
2866 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2867 l2cap_add_opt_efs(&ptr, chan);
2868
2869 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2870 break;
2871
2872 if (chan->fcs == L2CAP_FCS_NONE ||
2873 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2874 chan->fcs = L2CAP_FCS_NONE;
2875 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2876 }
2877 break;
2878 }
2879
2880 req->dcid = cpu_to_le16(chan->dcid);
2881 req->flags = cpu_to_le16(0);
2882
2883 return ptr - data;
2884 }
2885
2886 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2887 {
2888 struct l2cap_conf_rsp *rsp = data;
2889 void *ptr = rsp->data;
2890 void *req = chan->conf_req;
2891 int len = chan->conf_len;
2892 int type, hint, olen;
2893 unsigned long val;
2894 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2895 struct l2cap_conf_efs efs;
2896 u8 remote_efs = 0;
2897 u16 mtu = L2CAP_DEFAULT_MTU;
2898 u16 result = L2CAP_CONF_SUCCESS;
2899 u16 size;
2900
2901 BT_DBG("chan %p", chan);
2902
2903 while (len >= L2CAP_CONF_OPT_SIZE) {
2904 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2905
2906 hint = type & L2CAP_CONF_HINT;
2907 type &= L2CAP_CONF_MASK;
2908
2909 switch (type) {
2910 case L2CAP_CONF_MTU:
2911 mtu = val;
2912 break;
2913
2914 case L2CAP_CONF_FLUSH_TO:
2915 chan->flush_to = val;
2916 break;
2917
2918 case L2CAP_CONF_QOS:
2919 break;
2920
2921 case L2CAP_CONF_RFC:
2922 if (olen == sizeof(rfc))
2923 memcpy(&rfc, (void *) val, olen);
2924 break;
2925
2926 case L2CAP_CONF_FCS:
2927 if (val == L2CAP_FCS_NONE)
2928 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2929 break;
2930
2931 case L2CAP_CONF_EFS:
2932 remote_efs = 1;
2933 if (olen == sizeof(efs))
2934 memcpy(&efs, (void *) val, olen);
2935 break;
2936
2937 case L2CAP_CONF_EWS:
2938 if (!enable_hs)
2939 return -ECONNREFUSED;
2940
2941 set_bit(FLAG_EXT_CTRL, &chan->flags);
2942 set_bit(CONF_EWS_RECV, &chan->conf_state);
2943 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2944 chan->remote_tx_win = val;
2945 break;
2946
2947 default:
2948 if (hint)
2949 break;
2950
2951 result = L2CAP_CONF_UNKNOWN;
2952 *((u8 *) ptr++) = type;
2953 break;
2954 }
2955 }
2956
2957 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2958 goto done;
2959
2960 switch (chan->mode) {
2961 case L2CAP_MODE_STREAMING:
2962 case L2CAP_MODE_ERTM:
2963 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2964 chan->mode = l2cap_select_mode(rfc.mode,
2965 chan->conn->feat_mask);
2966 break;
2967 }
2968
2969 if (remote_efs) {
2970 if (__l2cap_efs_supported(chan))
2971 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2972 else
2973 return -ECONNREFUSED;
2974 }
2975
2976 if (chan->mode != rfc.mode)
2977 return -ECONNREFUSED;
2978
2979 break;
2980 }
2981
2982 done:
2983 if (chan->mode != rfc.mode) {
2984 result = L2CAP_CONF_UNACCEPT;
2985 rfc.mode = chan->mode;
2986
2987 if (chan->num_conf_rsp == 1)
2988 return -ECONNREFUSED;
2989
2990 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2991 sizeof(rfc), (unsigned long) &rfc);
2992 }
2993
2994 if (result == L2CAP_CONF_SUCCESS) {
2995 /* Configure output options and let the other side know
2996 * which ones we don't like. */
2997
2998 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2999 result = L2CAP_CONF_UNACCEPT;
3000 else {
3001 chan->omtu = mtu;
3002 set_bit(CONF_MTU_DONE, &chan->conf_state);
3003 }
3004 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3005
3006 if (remote_efs) {
3007 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3008 efs.stype != L2CAP_SERV_NOTRAFIC &&
3009 efs.stype != chan->local_stype) {
3010
3011 result = L2CAP_CONF_UNACCEPT;
3012
3013 if (chan->num_conf_req >= 1)
3014 return -ECONNREFUSED;
3015
3016 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3017 sizeof(efs),
3018 (unsigned long) &efs);
3019 } else {
3020 /* Send PENDING Conf Rsp */
3021 result = L2CAP_CONF_PENDING;
3022 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3023 }
3024 }
3025
3026 switch (rfc.mode) {
3027 case L2CAP_MODE_BASIC:
3028 chan->fcs = L2CAP_FCS_NONE;
3029 set_bit(CONF_MODE_DONE, &chan->conf_state);
3030 break;
3031
3032 case L2CAP_MODE_ERTM:
3033 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3034 chan->remote_tx_win = rfc.txwin_size;
3035 else
3036 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3037
3038 chan->remote_max_tx = rfc.max_transmit;
3039
3040 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3041 chan->conn->mtu -
3042 L2CAP_EXT_HDR_SIZE -
3043 L2CAP_SDULEN_SIZE -
3044 L2CAP_FCS_SIZE);
3045 rfc.max_pdu_size = cpu_to_le16(size);
3046 chan->remote_mps = size;
3047
3048 rfc.retrans_timeout =
3049 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3050 rfc.monitor_timeout =
3051 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3052
3053 set_bit(CONF_MODE_DONE, &chan->conf_state);
3054
3055 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3056 sizeof(rfc), (unsigned long) &rfc);
3057
3058 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3059 chan->remote_id = efs.id;
3060 chan->remote_stype = efs.stype;
3061 chan->remote_msdu = le16_to_cpu(efs.msdu);
3062 chan->remote_flush_to =
3063 le32_to_cpu(efs.flush_to);
3064 chan->remote_acc_lat =
3065 le32_to_cpu(efs.acc_lat);
3066 chan->remote_sdu_itime =
3067 le32_to_cpu(efs.sdu_itime);
3068 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3069 sizeof(efs), (unsigned long) &efs);
3070 }
3071 break;
3072
3073 case L2CAP_MODE_STREAMING:
3074 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3075 chan->conn->mtu -
3076 L2CAP_EXT_HDR_SIZE -
3077 L2CAP_SDULEN_SIZE -
3078 L2CAP_FCS_SIZE);
3079 rfc.max_pdu_size = cpu_to_le16(size);
3080 chan->remote_mps = size;
3081
3082 set_bit(CONF_MODE_DONE, &chan->conf_state);
3083
3084 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3085 sizeof(rfc), (unsigned long) &rfc);
3086
3087 break;
3088
3089 default:
3090 result = L2CAP_CONF_UNACCEPT;
3091
3092 memset(&rfc, 0, sizeof(rfc));
3093 rfc.mode = chan->mode;
3094 }
3095
3096 if (result == L2CAP_CONF_SUCCESS)
3097 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3098 }
3099 rsp->scid = cpu_to_le16(chan->dcid);
3100 rsp->result = cpu_to_le16(result);
3101 rsp->flags = cpu_to_le16(0x0000);
3102
3103 return ptr - data;
3104 }
3105
3106 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3107 {
3108 struct l2cap_conf_req *req = data;
3109 void *ptr = req->data;
3110 int type, olen;
3111 unsigned long val;
3112 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3113 struct l2cap_conf_efs efs;
3114
3115 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3116
3117 while (len >= L2CAP_CONF_OPT_SIZE) {
3118 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3119
3120 switch (type) {
3121 case L2CAP_CONF_MTU:
3122 if (val < L2CAP_DEFAULT_MIN_MTU) {
3123 *result = L2CAP_CONF_UNACCEPT;
3124 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3125 } else
3126 chan->imtu = val;
3127 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3128 break;
3129
3130 case L2CAP_CONF_FLUSH_TO:
3131 chan->flush_to = val;
3132 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3133 2, chan->flush_to);
3134 break;
3135
3136 case L2CAP_CONF_RFC:
3137 if (olen == sizeof(rfc))
3138 memcpy(&rfc, (void *)val, olen);
3139
3140 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3141 rfc.mode != chan->mode)
3142 return -ECONNREFUSED;
3143
3144 chan->fcs = 0;
3145
3146 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3147 sizeof(rfc), (unsigned long) &rfc);
3148 break;
3149
3150 case L2CAP_CONF_EWS:
3151 chan->tx_win = min_t(u16, val,
3152 L2CAP_DEFAULT_EXT_WINDOW);
3153 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3154 chan->tx_win);
3155 break;
3156
3157 case L2CAP_CONF_EFS:
3158 if (olen == sizeof(efs))
3159 memcpy(&efs, (void *)val, olen);
3160
3161 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3162 efs.stype != L2CAP_SERV_NOTRAFIC &&
3163 efs.stype != chan->local_stype)
3164 return -ECONNREFUSED;
3165
3166 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3167 sizeof(efs), (unsigned long) &efs);
3168 break;
3169 }
3170 }
3171
3172 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3173 return -ECONNREFUSED;
3174
3175 chan->mode = rfc.mode;
3176
3177 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3178 switch (rfc.mode) {
3179 case L2CAP_MODE_ERTM:
3180 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3181 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3182 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3183
3184 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3185 chan->local_msdu = le16_to_cpu(efs.msdu);
3186 chan->local_sdu_itime =
3187 le32_to_cpu(efs.sdu_itime);
3188 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3189 chan->local_flush_to =
3190 le32_to_cpu(efs.flush_to);
3191 }
3192 break;
3193
3194 case L2CAP_MODE_STREAMING:
3195 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3196 }
3197 }
3198
3199 req->dcid = cpu_to_le16(chan->dcid);
3200 req->flags = cpu_to_le16(0x0000);
3201
3202 return ptr - data;
3203 }
3204
3205 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3206 {
3207 struct l2cap_conf_rsp *rsp = data;
3208 void *ptr = rsp->data;
3209
3210 BT_DBG("chan %p", chan);
3211
3212 rsp->scid = cpu_to_le16(chan->dcid);
3213 rsp->result = cpu_to_le16(result);
3214 rsp->flags = cpu_to_le16(flags);
3215
3216 return ptr - data;
3217 }
3218
3219 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3220 {
3221 struct l2cap_conn_rsp rsp;
3222 struct l2cap_conn *conn = chan->conn;
3223 u8 buf[128];
3224
3225 rsp.scid = cpu_to_le16(chan->dcid);
3226 rsp.dcid = cpu_to_le16(chan->scid);
3227 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3228 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3229 l2cap_send_cmd(conn, chan->ident,
3230 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3231
3232 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3233 return;
3234
3235 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3236 l2cap_build_conf_req(chan, buf), buf);
3237 chan->num_conf_req++;
3238 }
3239
3240 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3241 {
3242 int type, olen;
3243 unsigned long val;
3244 struct l2cap_conf_rfc rfc;
3245
3246 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3247
3248 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3249 return;
3250
3251 while (len >= L2CAP_CONF_OPT_SIZE) {
3252 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3253
3254 switch (type) {
3255 case L2CAP_CONF_RFC:
3256 if (olen == sizeof(rfc))
3257 memcpy(&rfc, (void *)val, olen);
3258 goto done;
3259 }
3260 }
3261
3262 /* Use sane default values in case a misbehaving remote device
3263 * did not send an RFC option.
3264 */
3265 rfc.mode = chan->mode;
3266 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3267 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3268 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
3269
3270 BT_ERR("Expected RFC option was not found, using defaults");
3271
3272 done:
3273 switch (rfc.mode) {
3274 case L2CAP_MODE_ERTM:
3275 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3276 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3277 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3278 break;
3279 case L2CAP_MODE_STREAMING:
3280 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3281 }
3282 }
3283
3284 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3285 {
3286 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3287
3288 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3289 return 0;
3290
3291 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3292 cmd->ident == conn->info_ident) {
3293 cancel_delayed_work(&conn->info_timer);
3294
3295 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3296 conn->info_ident = 0;
3297
3298 l2cap_conn_start(conn);
3299 }
3300
3301 return 0;
3302 }
3303
3304 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3305 {
3306 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3307 struct l2cap_conn_rsp rsp;
3308 struct l2cap_chan *chan = NULL, *pchan;
3309 struct sock *parent, *sk = NULL;
3310 int result, status = L2CAP_CS_NO_INFO;
3311
3312 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3313 __le16 psm = req->psm;
3314
3315 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3316
3317 /* Check if we have socket listening on psm */
3318 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3319 if (!pchan) {
3320 result = L2CAP_CR_BAD_PSM;
3321 goto sendresp;
3322 }
3323
3324 parent = pchan->sk;
3325
3326 mutex_lock(&conn->chan_lock);
3327 lock_sock(parent);
3328
3329 /* Check if the ACL is secure enough (if not SDP) */
3330 if (psm != cpu_to_le16(0x0001) &&
3331 !hci_conn_check_link_mode(conn->hcon)) {
3332 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3333 result = L2CAP_CR_SEC_BLOCK;
3334 goto response;
3335 }
3336
3337 result = L2CAP_CR_NO_MEM;
3338
3339 /* Check for backlog size */
3340 if (sk_acceptq_is_full(parent)) {
3341 BT_DBG("backlog full %d", parent->sk_ack_backlog);
3342 goto response;
3343 }
3344
3345 chan = pchan->ops->new_connection(pchan->data);
3346 if (!chan)
3347 goto response;
3348
3349 sk = chan->sk;
3350
3351 /* Check if we already have channel with that dcid */
3352 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3353 sock_set_flag(sk, SOCK_ZAPPED);
3354 chan->ops->close(chan->data);
3355 goto response;
3356 }
3357
3358 hci_conn_hold(conn->hcon);
3359
3360 bacpy(&bt_sk(sk)->src, conn->src);
3361 bacpy(&bt_sk(sk)->dst, conn->dst);
3362 chan->psm = psm;
3363 chan->dcid = scid;
3364
3365 bt_accept_enqueue(parent, sk);
3366
3367 __l2cap_chan_add(conn, chan);
3368
3369 dcid = chan->scid;
3370
3371 __set_chan_timer(chan, sk->sk_sndtimeo);
3372
3373 chan->ident = cmd->ident;
3374
3375 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3376 if (l2cap_chan_check_security(chan)) {
3377 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3378 __l2cap_state_change(chan, BT_CONNECT2);
3379 result = L2CAP_CR_PEND;
3380 status = L2CAP_CS_AUTHOR_PEND;
3381 parent->sk_data_ready(parent, 0);
3382 } else {
3383 __l2cap_state_change(chan, BT_CONFIG);
3384 result = L2CAP_CR_SUCCESS;
3385 status = L2CAP_CS_NO_INFO;
3386 }
3387 } else {
3388 __l2cap_state_change(chan, BT_CONNECT2);
3389 result = L2CAP_CR_PEND;
3390 status = L2CAP_CS_AUTHEN_PEND;
3391 }
3392 } else {
3393 __l2cap_state_change(chan, BT_CONNECT2);
3394 result = L2CAP_CR_PEND;
3395 status = L2CAP_CS_NO_INFO;
3396 }
3397
3398 response:
3399 release_sock(parent);
3400 mutex_unlock(&conn->chan_lock);
3401
3402 sendresp:
3403 rsp.scid = cpu_to_le16(scid);
3404 rsp.dcid = cpu_to_le16(dcid);
3405 rsp.result = cpu_to_le16(result);
3406 rsp.status = cpu_to_le16(status);
3407 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3408
3409 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3410 struct l2cap_info_req info;
3411 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3412
3413 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3414 conn->info_ident = l2cap_get_ident(conn);
3415
3416 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3417
3418 l2cap_send_cmd(conn, conn->info_ident,
3419 L2CAP_INFO_REQ, sizeof(info), &info);
3420 }
3421
3422 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3423 result == L2CAP_CR_SUCCESS) {
3424 u8 buf[128];
3425 set_bit(CONF_REQ_SENT, &chan->conf_state);
3426 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3427 l2cap_build_conf_req(chan, buf), buf);
3428 chan->num_conf_req++;
3429 }
3430
3431 return 0;
3432 }
3433
3434 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3435 {
3436 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3437 u16 scid, dcid, result, status;
3438 struct l2cap_chan *chan;
3439 u8 req[128];
3440 int err;
3441
3442 scid = __le16_to_cpu(rsp->scid);
3443 dcid = __le16_to_cpu(rsp->dcid);
3444 result = __le16_to_cpu(rsp->result);
3445 status = __le16_to_cpu(rsp->status);
3446
3447 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3448 dcid, scid, result, status);
3449
3450 mutex_lock(&conn->chan_lock);
3451
3452 if (scid) {
3453 chan = __l2cap_get_chan_by_scid(conn, scid);
3454 if (!chan) {
3455 err = -EFAULT;
3456 goto unlock;
3457 }
3458 } else {
3459 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3460 if (!chan) {
3461 err = -EFAULT;
3462 goto unlock;
3463 }
3464 }
3465
3466 err = 0;
3467
3468 l2cap_chan_lock(chan);
3469
3470 switch (result) {
3471 case L2CAP_CR_SUCCESS:
3472 l2cap_state_change(chan, BT_CONFIG);
3473 chan->ident = 0;
3474 chan->dcid = dcid;
3475 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3476
3477 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3478 break;
3479
3480 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3481 l2cap_build_conf_req(chan, req), req);
3482 chan->num_conf_req++;
3483 break;
3484
3485 case L2CAP_CR_PEND:
3486 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3487 break;
3488
3489 default:
3490 l2cap_chan_del(chan, ECONNREFUSED);
3491 break;
3492 }
3493
3494 l2cap_chan_unlock(chan);
3495
3496 unlock:
3497 mutex_unlock(&conn->chan_lock);
3498
3499 return err;
3500 }
3501
3502 static inline void set_default_fcs(struct l2cap_chan *chan)
3503 {
3504 /* FCS is enabled only in ERTM or streaming mode, if one or both
3505 * sides request it.
3506 */
3507 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3508 chan->fcs = L2CAP_FCS_NONE;
3509 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3510 chan->fcs = L2CAP_FCS_CRC16;
3511 }
3512
3513 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3514 {
3515 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3516 u16 dcid, flags;
3517 u8 rsp[64];
3518 struct l2cap_chan *chan;
3519 int len, err = 0;
3520
3521 dcid = __le16_to_cpu(req->dcid);
3522 flags = __le16_to_cpu(req->flags);
3523
3524 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3525
3526 chan = l2cap_get_chan_by_scid(conn, dcid);
3527 if (!chan)
3528 return -ENOENT;
3529
3530 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3531 struct l2cap_cmd_rej_cid rej;
3532
3533 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3534 rej.scid = cpu_to_le16(chan->scid);
3535 rej.dcid = cpu_to_le16(chan->dcid);
3536
3537 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3538 sizeof(rej), &rej);
3539 goto unlock;
3540 }
3541
3542 /* Reject if config buffer is too small. */
3543 len = cmd_len - sizeof(*req);
3544 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3545 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3546 l2cap_build_conf_rsp(chan, rsp,
3547 L2CAP_CONF_REJECT, flags), rsp);
3548 goto unlock;
3549 }
3550
3551 /* Store config. */
3552 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3553 chan->conf_len += len;
3554
3555 if (flags & 0x0001) {
3556 /* Incomplete config. Send empty response. */
3557 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3558 l2cap_build_conf_rsp(chan, rsp,
3559 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3560 goto unlock;
3561 }
3562
3563 /* Complete config. */
3564 len = l2cap_parse_conf_req(chan, rsp);
3565 if (len < 0) {
3566 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3567 goto unlock;
3568 }
3569
3570 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3571 chan->num_conf_rsp++;
3572
3573 /* Reset config buffer. */
3574 chan->conf_len = 0;
3575
3576 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3577 goto unlock;
3578
3579 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3580 set_default_fcs(chan);
3581
3582 l2cap_state_change(chan, BT_CONNECTED);
3583
3584 if (chan->mode == L2CAP_MODE_ERTM ||
3585 chan->mode == L2CAP_MODE_STREAMING)
3586 err = l2cap_ertm_init(chan);
3587
3588 if (err < 0)
3589 l2cap_send_disconn_req(chan->conn, chan, -err);
3590 else
3591 l2cap_chan_ready(chan);
3592
3593 goto unlock;
3594 }
3595
3596 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3597 u8 buf[64];
3598 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3599 l2cap_build_conf_req(chan, buf), buf);
3600 chan->num_conf_req++;
3601 }
3602
3603 /* Got Conf Rsp PENDING from remote side and asume we sent
3604 Conf Rsp PENDING in the code above */
3605 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3606 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3607
3608 /* check compatibility */
3609
3610 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3611 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3612
3613 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3614 l2cap_build_conf_rsp(chan, rsp,
3615 L2CAP_CONF_SUCCESS, 0x0000), rsp);
3616 }
3617
3618 unlock:
3619 l2cap_chan_unlock(chan);
3620 return err;
3621 }
3622
3623 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3624 {
3625 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3626 u16 scid, flags, result;
3627 struct l2cap_chan *chan;
3628 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3629 int err = 0;
3630
3631 scid = __le16_to_cpu(rsp->scid);
3632 flags = __le16_to_cpu(rsp->flags);
3633 result = __le16_to_cpu(rsp->result);
3634
3635 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3636 result, len);
3637
3638 chan = l2cap_get_chan_by_scid(conn, scid);
3639 if (!chan)
3640 return 0;
3641
3642 switch (result) {
3643 case L2CAP_CONF_SUCCESS:
3644 l2cap_conf_rfc_get(chan, rsp->data, len);
3645 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3646 break;
3647
3648 case L2CAP_CONF_PENDING:
3649 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3650
3651 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3652 char buf[64];
3653
3654 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3655 buf, &result);
3656 if (len < 0) {
3657 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3658 goto done;
3659 }
3660
3661 /* check compatibility */
3662
3663 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3664 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3665
3666 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3667 l2cap_build_conf_rsp(chan, buf,
3668 L2CAP_CONF_SUCCESS, 0x0000), buf);
3669 }
3670 goto done;
3671
3672 case L2CAP_CONF_UNACCEPT:
3673 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3674 char req[64];
3675
3676 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3677 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3678 goto done;
3679 }
3680
3681 /* throw out any old stored conf requests */
3682 result = L2CAP_CONF_SUCCESS;
3683 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3684 req, &result);
3685 if (len < 0) {
3686 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3687 goto done;
3688 }
3689
3690 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3691 L2CAP_CONF_REQ, len, req);
3692 chan->num_conf_req++;
3693 if (result != L2CAP_CONF_SUCCESS)
3694 goto done;
3695 break;
3696 }
3697
3698 default:
3699 l2cap_chan_set_err(chan, ECONNRESET);
3700
3701 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3702 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3703 goto done;
3704 }
3705
3706 if (flags & 0x01)
3707 goto done;
3708
3709 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3710
3711 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3712 set_default_fcs(chan);
3713
3714 l2cap_state_change(chan, BT_CONNECTED);
3715 if (chan->mode == L2CAP_MODE_ERTM ||
3716 chan->mode == L2CAP_MODE_STREAMING)
3717 err = l2cap_ertm_init(chan);
3718
3719 if (err < 0)
3720 l2cap_send_disconn_req(chan->conn, chan, -err);
3721 else
3722 l2cap_chan_ready(chan);
3723 }
3724
3725 done:
3726 l2cap_chan_unlock(chan);
3727 return err;
3728 }
3729
3730 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3731 {
3732 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3733 struct l2cap_disconn_rsp rsp;
3734 u16 dcid, scid;
3735 struct l2cap_chan *chan;
3736 struct sock *sk;
3737
3738 scid = __le16_to_cpu(req->scid);
3739 dcid = __le16_to_cpu(req->dcid);
3740
3741 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3742
3743 mutex_lock(&conn->chan_lock);
3744
3745 chan = __l2cap_get_chan_by_scid(conn, dcid);
3746 if (!chan) {
3747 mutex_unlock(&conn->chan_lock);
3748 return 0;
3749 }
3750
3751 l2cap_chan_lock(chan);
3752
3753 sk = chan->sk;
3754
3755 rsp.dcid = cpu_to_le16(chan->scid);
3756 rsp.scid = cpu_to_le16(chan->dcid);
3757 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3758
3759 lock_sock(sk);
3760 sk->sk_shutdown = SHUTDOWN_MASK;
3761 release_sock(sk);
3762
3763 l2cap_chan_hold(chan);
3764 l2cap_chan_del(chan, ECONNRESET);
3765
3766 l2cap_chan_unlock(chan);
3767
3768 chan->ops->close(chan->data);
3769 l2cap_chan_put(chan);
3770
3771 mutex_unlock(&conn->chan_lock);
3772
3773 return 0;
3774 }
3775
3776 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3777 {
3778 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3779 u16 dcid, scid;
3780 struct l2cap_chan *chan;
3781
3782 scid = __le16_to_cpu(rsp->scid);
3783 dcid = __le16_to_cpu(rsp->dcid);
3784
3785 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3786
3787 mutex_lock(&conn->chan_lock);
3788
3789 chan = __l2cap_get_chan_by_scid(conn, scid);
3790 if (!chan) {
3791 mutex_unlock(&conn->chan_lock);
3792 return 0;
3793 }
3794
3795 l2cap_chan_lock(chan);
3796
3797 l2cap_chan_hold(chan);
3798 l2cap_chan_del(chan, 0);
3799
3800 l2cap_chan_unlock(chan);
3801
3802 chan->ops->close(chan->data);
3803 l2cap_chan_put(chan);
3804
3805 mutex_unlock(&conn->chan_lock);
3806
3807 return 0;
3808 }
3809
3810 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3811 {
3812 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3813 u16 type;
3814
3815 type = __le16_to_cpu(req->type);
3816
3817 BT_DBG("type 0x%4.4x", type);
3818
3819 if (type == L2CAP_IT_FEAT_MASK) {
3820 u8 buf[8];
3821 u32 feat_mask = l2cap_feat_mask;
3822 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3823 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3824 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3825 if (!disable_ertm)
3826 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3827 | L2CAP_FEAT_FCS;
3828 if (enable_hs)
3829 feat_mask |= L2CAP_FEAT_EXT_FLOW
3830 | L2CAP_FEAT_EXT_WINDOW;
3831
3832 put_unaligned_le32(feat_mask, rsp->data);
3833 l2cap_send_cmd(conn, cmd->ident,
3834 L2CAP_INFO_RSP, sizeof(buf), buf);
3835 } else if (type == L2CAP_IT_FIXED_CHAN) {
3836 u8 buf[12];
3837 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3838
3839 if (enable_hs)
3840 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3841 else
3842 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3843
3844 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3845 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3846 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3847 l2cap_send_cmd(conn, cmd->ident,
3848 L2CAP_INFO_RSP, sizeof(buf), buf);
3849 } else {
3850 struct l2cap_info_rsp rsp;
3851 rsp.type = cpu_to_le16(type);
3852 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3853 l2cap_send_cmd(conn, cmd->ident,
3854 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3855 }
3856
3857 return 0;
3858 }
3859
3860 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3861 {
3862 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3863 u16 type, result;
3864
3865 type = __le16_to_cpu(rsp->type);
3866 result = __le16_to_cpu(rsp->result);
3867
3868 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3869
3870 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3871 if (cmd->ident != conn->info_ident ||
3872 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3873 return 0;
3874
3875 cancel_delayed_work(&conn->info_timer);
3876
3877 if (result != L2CAP_IR_SUCCESS) {
3878 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3879 conn->info_ident = 0;
3880
3881 l2cap_conn_start(conn);
3882
3883 return 0;
3884 }
3885
3886 switch (type) {
3887 case L2CAP_IT_FEAT_MASK:
3888 conn->feat_mask = get_unaligned_le32(rsp->data);
3889
3890 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3891 struct l2cap_info_req req;
3892 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3893
3894 conn->info_ident = l2cap_get_ident(conn);
3895
3896 l2cap_send_cmd(conn, conn->info_ident,
3897 L2CAP_INFO_REQ, sizeof(req), &req);
3898 } else {
3899 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3900 conn->info_ident = 0;
3901
3902 l2cap_conn_start(conn);
3903 }
3904 break;
3905
3906 case L2CAP_IT_FIXED_CHAN:
3907 conn->fixed_chan_mask = rsp->data[0];
3908 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3909 conn->info_ident = 0;
3910
3911 l2cap_conn_start(conn);
3912 break;
3913 }
3914
3915 return 0;
3916 }
3917
3918 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3919 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3920 void *data)
3921 {
3922 struct l2cap_create_chan_req *req = data;
3923 struct l2cap_create_chan_rsp rsp;
3924 u16 psm, scid;
3925
3926 if (cmd_len != sizeof(*req))
3927 return -EPROTO;
3928
3929 if (!enable_hs)
3930 return -EINVAL;
3931
3932 psm = le16_to_cpu(req->psm);
3933 scid = le16_to_cpu(req->scid);
3934
3935 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3936
3937 /* Placeholder: Always reject */
3938 rsp.dcid = 0;
3939 rsp.scid = cpu_to_le16(scid);
3940 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3941 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3942
3943 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3944 sizeof(rsp), &rsp);
3945
3946 return 0;
3947 }
3948
3949 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3950 struct l2cap_cmd_hdr *cmd, void *data)
3951 {
3952 BT_DBG("conn %p", conn);
3953
3954 return l2cap_connect_rsp(conn, cmd, data);
3955 }
3956
3957 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3958 u16 icid, u16 result)
3959 {
3960 struct l2cap_move_chan_rsp rsp;
3961
3962 BT_DBG("icid %d, result %d", icid, result);
3963
3964 rsp.icid = cpu_to_le16(icid);
3965 rsp.result = cpu_to_le16(result);
3966
3967 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3968 }
3969
3970 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3971 struct l2cap_chan *chan, u16 icid, u16 result)
3972 {
3973 struct l2cap_move_chan_cfm cfm;
3974 u8 ident;
3975
3976 BT_DBG("icid %d, result %d", icid, result);
3977
3978 ident = l2cap_get_ident(conn);
3979 if (chan)
3980 chan->ident = ident;
3981
3982 cfm.icid = cpu_to_le16(icid);
3983 cfm.result = cpu_to_le16(result);
3984
3985 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3986 }
3987
3988 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3989 u16 icid)
3990 {
3991 struct l2cap_move_chan_cfm_rsp rsp;
3992
3993 BT_DBG("icid %d", icid);
3994
3995 rsp.icid = cpu_to_le16(icid);
3996 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3997 }
3998
3999 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4000 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4001 {
4002 struct l2cap_move_chan_req *req = data;
4003 u16 icid = 0;
4004 u16 result = L2CAP_MR_NOT_ALLOWED;
4005
4006 if (cmd_len != sizeof(*req))
4007 return -EPROTO;
4008
4009 icid = le16_to_cpu(req->icid);
4010
4011 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
4012
4013 if (!enable_hs)
4014 return -EINVAL;
4015
4016 /* Placeholder: Always refuse */
4017 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4018
4019 return 0;
4020 }
4021
4022 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4023 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4024 {
4025 struct l2cap_move_chan_rsp *rsp = data;
4026 u16 icid, result;
4027
4028 if (cmd_len != sizeof(*rsp))
4029 return -EPROTO;
4030
4031 icid = le16_to_cpu(rsp->icid);
4032 result = le16_to_cpu(rsp->result);
4033
4034 BT_DBG("icid %d, result %d", icid, result);
4035
4036 /* Placeholder: Always unconfirmed */
4037 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4038
4039 return 0;
4040 }
4041
4042 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4043 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4044 {
4045 struct l2cap_move_chan_cfm *cfm = data;
4046 u16 icid, result;
4047
4048 if (cmd_len != sizeof(*cfm))
4049 return -EPROTO;
4050
4051 icid = le16_to_cpu(cfm->icid);
4052 result = le16_to_cpu(cfm->result);
4053
4054 BT_DBG("icid %d, result %d", icid, result);
4055
4056 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4057
4058 return 0;
4059 }
4060
4061 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4062 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4063 {
4064 struct l2cap_move_chan_cfm_rsp *rsp = data;
4065 u16 icid;
4066
4067 if (cmd_len != sizeof(*rsp))
4068 return -EPROTO;
4069
4070 icid = le16_to_cpu(rsp->icid);
4071
4072 BT_DBG("icid %d", icid);
4073
4074 return 0;
4075 }
4076
4077 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4078 u16 to_multiplier)
4079 {
4080 u16 max_latency;
4081
4082 if (min > max || min < 6 || max > 3200)
4083 return -EINVAL;
4084
4085 if (to_multiplier < 10 || to_multiplier > 3200)
4086 return -EINVAL;
4087
4088 if (max >= to_multiplier * 8)
4089 return -EINVAL;
4090
4091 max_latency = (to_multiplier * 8 / max) - 1;
4092 if (latency > 499 || latency > max_latency)
4093 return -EINVAL;
4094
4095 return 0;
4096 }
4097
4098 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4099 struct l2cap_cmd_hdr *cmd, u8 *data)
4100 {
4101 struct hci_conn *hcon = conn->hcon;
4102 struct l2cap_conn_param_update_req *req;
4103 struct l2cap_conn_param_update_rsp rsp;
4104 u16 min, max, latency, to_multiplier, cmd_len;
4105 int err;
4106
4107 if (!(hcon->link_mode & HCI_LM_MASTER))
4108 return -EINVAL;
4109
4110 cmd_len = __le16_to_cpu(cmd->len);
4111 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4112 return -EPROTO;
4113
4114 req = (struct l2cap_conn_param_update_req *) data;
4115 min = __le16_to_cpu(req->min);
4116 max = __le16_to_cpu(req->max);
4117 latency = __le16_to_cpu(req->latency);
4118 to_multiplier = __le16_to_cpu(req->to_multiplier);
4119
4120 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4121 min, max, latency, to_multiplier);
4122
4123 memset(&rsp, 0, sizeof(rsp));
4124
4125 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4126 if (err)
4127 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4128 else
4129 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4130
4131 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4132 sizeof(rsp), &rsp);
4133
4134 if (!err)
4135 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4136
4137 return 0;
4138 }
4139
4140 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4141 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4142 {
4143 int err = 0;
4144
4145 switch (cmd->code) {
4146 case L2CAP_COMMAND_REJ:
4147 l2cap_command_rej(conn, cmd, data);
4148 break;
4149
4150 case L2CAP_CONN_REQ:
4151 err = l2cap_connect_req(conn, cmd, data);
4152 break;
4153
4154 case L2CAP_CONN_RSP:
4155 err = l2cap_connect_rsp(conn, cmd, data);
4156 break;
4157
4158 case L2CAP_CONF_REQ:
4159 err = l2cap_config_req(conn, cmd, cmd_len, data);
4160 break;
4161
4162 case L2CAP_CONF_RSP:
4163 err = l2cap_config_rsp(conn, cmd, data);
4164 break;
4165
4166 case L2CAP_DISCONN_REQ:
4167 err = l2cap_disconnect_req(conn, cmd, data);
4168 break;
4169
4170 case L2CAP_DISCONN_RSP:
4171 err = l2cap_disconnect_rsp(conn, cmd, data);
4172 break;
4173
4174 case L2CAP_ECHO_REQ:
4175 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4176 break;
4177
4178 case L2CAP_ECHO_RSP:
4179 break;
4180
4181 case L2CAP_INFO_REQ:
4182 err = l2cap_information_req(conn, cmd, data);
4183 break;
4184
4185 case L2CAP_INFO_RSP:
4186 err = l2cap_information_rsp(conn, cmd, data);
4187 break;
4188
4189 case L2CAP_CREATE_CHAN_REQ:
4190 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4191 break;
4192
4193 case L2CAP_CREATE_CHAN_RSP:
4194 err = l2cap_create_channel_rsp(conn, cmd, data);
4195 break;
4196
4197 case L2CAP_MOVE_CHAN_REQ:
4198 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4199 break;
4200
4201 case L2CAP_MOVE_CHAN_RSP:
4202 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4203 break;
4204
4205 case L2CAP_MOVE_CHAN_CFM:
4206 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4207 break;
4208
4209 case L2CAP_MOVE_CHAN_CFM_RSP:
4210 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4211 break;
4212
4213 default:
4214 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4215 err = -EINVAL;
4216 break;
4217 }
4218
4219 return err;
4220 }
4221
4222 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4223 struct l2cap_cmd_hdr *cmd, u8 *data)
4224 {
4225 switch (cmd->code) {
4226 case L2CAP_COMMAND_REJ:
4227 return 0;
4228
4229 case L2CAP_CONN_PARAM_UPDATE_REQ:
4230 return l2cap_conn_param_update_req(conn, cmd, data);
4231
4232 case L2CAP_CONN_PARAM_UPDATE_RSP:
4233 return 0;
4234
4235 default:
4236 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4237 return -EINVAL;
4238 }
4239 }
4240
4241 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4242 struct sk_buff *skb)
4243 {
4244 u8 *data = skb->data;
4245 int len = skb->len;
4246 struct l2cap_cmd_hdr cmd;
4247 int err;
4248
4249 l2cap_raw_recv(conn, skb);
4250
4251 while (len >= L2CAP_CMD_HDR_SIZE) {
4252 u16 cmd_len;
4253 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4254 data += L2CAP_CMD_HDR_SIZE;
4255 len -= L2CAP_CMD_HDR_SIZE;
4256
4257 cmd_len = le16_to_cpu(cmd.len);
4258
4259 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4260
4261 if (cmd_len > len || !cmd.ident) {
4262 BT_DBG("corrupted command");
4263 break;
4264 }
4265
4266 if (conn->hcon->type == LE_LINK)
4267 err = l2cap_le_sig_cmd(conn, &cmd, data);
4268 else
4269 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4270
4271 if (err) {
4272 struct l2cap_cmd_rej_unk rej;
4273
4274 BT_ERR("Wrong link type (%d)", err);
4275
4276 /* FIXME: Map err to a valid reason */
4277 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4278 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4279 }
4280
4281 data += cmd_len;
4282 len -= cmd_len;
4283 }
4284
4285 kfree_skb(skb);
4286 }
4287
4288 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4289 {
4290 u16 our_fcs, rcv_fcs;
4291 int hdr_size;
4292
4293 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4294 hdr_size = L2CAP_EXT_HDR_SIZE;
4295 else
4296 hdr_size = L2CAP_ENH_HDR_SIZE;
4297
4298 if (chan->fcs == L2CAP_FCS_CRC16) {
4299 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4300 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4301 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4302
4303 if (our_fcs != rcv_fcs)
4304 return -EBADMSG;
4305 }
4306 return 0;
4307 }
4308
4309 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4310 {
4311 struct l2cap_ctrl control;
4312
4313 BT_DBG("chan %p", chan);
4314
4315 memset(&control, 0, sizeof(control));
4316 control.sframe = 1;
4317 control.final = 1;
4318 control.reqseq = chan->buffer_seq;
4319 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4320
4321 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4322 control.super = L2CAP_SUPER_RNR;
4323 l2cap_send_sframe(chan, &control);
4324 }
4325
4326 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4327 chan->unacked_frames > 0)
4328 __set_retrans_timer(chan);
4329
4330 /* Send pending iframes */
4331 l2cap_ertm_send(chan);
4332
4333 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4334 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4335 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4336 * send it now.
4337 */
4338 control.super = L2CAP_SUPER_RR;
4339 l2cap_send_sframe(chan, &control);
4340 }
4341 }
4342
4343 static void append_skb_frag(struct sk_buff *skb,
4344 struct sk_buff *new_frag, struct sk_buff **last_frag)
4345 {
4346 /* skb->len reflects data in skb as well as all fragments
4347 * skb->data_len reflects only data in fragments
4348 */
4349 if (!skb_has_frag_list(skb))
4350 skb_shinfo(skb)->frag_list = new_frag;
4351
4352 new_frag->next = NULL;
4353
4354 (*last_frag)->next = new_frag;
4355 *last_frag = new_frag;
4356
4357 skb->len += new_frag->len;
4358 skb->data_len += new_frag->len;
4359 skb->truesize += new_frag->truesize;
4360 }
4361
4362 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4363 struct l2cap_ctrl *control)
4364 {
4365 int err = -EINVAL;
4366
4367 switch (control->sar) {
4368 case L2CAP_SAR_UNSEGMENTED:
4369 if (chan->sdu)
4370 break;
4371
4372 err = chan->ops->recv(chan->data, skb);
4373 break;
4374
4375 case L2CAP_SAR_START:
4376 if (chan->sdu)
4377 break;
4378
4379 chan->sdu_len = get_unaligned_le16(skb->data);
4380 skb_pull(skb, L2CAP_SDULEN_SIZE);
4381
4382 if (chan->sdu_len > chan->imtu) {
4383 err = -EMSGSIZE;
4384 break;
4385 }
4386
4387 if (skb->len >= chan->sdu_len)
4388 break;
4389
4390 chan->sdu = skb;
4391 chan->sdu_last_frag = skb;
4392
4393 skb = NULL;
4394 err = 0;
4395 break;
4396
4397 case L2CAP_SAR_CONTINUE:
4398 if (!chan->sdu)
4399 break;
4400
4401 append_skb_frag(chan->sdu, skb,
4402 &chan->sdu_last_frag);
4403 skb = NULL;
4404
4405 if (chan->sdu->len >= chan->sdu_len)
4406 break;
4407
4408 err = 0;
4409 break;
4410
4411 case L2CAP_SAR_END:
4412 if (!chan->sdu)
4413 break;
4414
4415 append_skb_frag(chan->sdu, skb,
4416 &chan->sdu_last_frag);
4417 skb = NULL;
4418
4419 if (chan->sdu->len != chan->sdu_len)
4420 break;
4421
4422 err = chan->ops->recv(chan->data, chan->sdu);
4423
4424 if (!err) {
4425 /* Reassembly complete */
4426 chan->sdu = NULL;
4427 chan->sdu_last_frag = NULL;
4428 chan->sdu_len = 0;
4429 }
4430 break;
4431 }
4432
4433 if (err) {
4434 kfree_skb(skb);
4435 kfree_skb(chan->sdu);
4436 chan->sdu = NULL;
4437 chan->sdu_last_frag = NULL;
4438 chan->sdu_len = 0;
4439 }
4440
4441 return err;
4442 }
4443
4444 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4445 {
4446 u8 event;
4447
4448 if (chan->mode != L2CAP_MODE_ERTM)
4449 return;
4450
4451 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4452 l2cap_tx(chan, 0, 0, event);
4453 }
4454
4455 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4456 {
4457 /* Placeholder */
4458 return 0;
4459 }
4460
4461 static void l2cap_handle_srej(struct l2cap_chan *chan,
4462 struct l2cap_ctrl *control)
4463 {
4464 /* Placeholder */
4465 }
4466
4467 static void l2cap_handle_rej(struct l2cap_chan *chan,
4468 struct l2cap_ctrl *control)
4469 {
4470 /* Placeholder */
4471 }
4472
4473 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4474 {
4475 BT_DBG("chan %p, txseq %d", chan, txseq);
4476
4477 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4478 chan->expected_tx_seq);
4479
4480 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4481 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4482 chan->tx_win) {
4483 /* See notes below regarding "double poll" and
4484 * invalid packets.
4485 */
4486 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4487 BT_DBG("Invalid/Ignore - after SREJ");
4488 return L2CAP_TXSEQ_INVALID_IGNORE;
4489 } else {
4490 BT_DBG("Invalid - in window after SREJ sent");
4491 return L2CAP_TXSEQ_INVALID;
4492 }
4493 }
4494
4495 if (chan->srej_list.head == txseq) {
4496 BT_DBG("Expected SREJ");
4497 return L2CAP_TXSEQ_EXPECTED_SREJ;
4498 }
4499
4500 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4501 BT_DBG("Duplicate SREJ - txseq already stored");
4502 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4503 }
4504
4505 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4506 BT_DBG("Unexpected SREJ - not requested");
4507 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4508 }
4509 }
4510
4511 if (chan->expected_tx_seq == txseq) {
4512 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4513 chan->tx_win) {
4514 BT_DBG("Invalid - txseq outside tx window");
4515 return L2CAP_TXSEQ_INVALID;
4516 } else {
4517 BT_DBG("Expected");
4518 return L2CAP_TXSEQ_EXPECTED;
4519 }
4520 }
4521
4522 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4523 __seq_offset(chan, chan->expected_tx_seq,
4524 chan->last_acked_seq)){
4525 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4526 return L2CAP_TXSEQ_DUPLICATE;
4527 }
4528
4529 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4530 /* A source of invalid packets is a "double poll" condition,
4531 * where delays cause us to send multiple poll packets. If
4532 * the remote stack receives and processes both polls,
4533 * sequence numbers can wrap around in such a way that a
4534 * resent frame has a sequence number that looks like new data
4535 * with a sequence gap. This would trigger an erroneous SREJ
4536 * request.
4537 *
4538 * Fortunately, this is impossible with a tx window that's
4539 * less than half of the maximum sequence number, which allows
4540 * invalid frames to be safely ignored.
4541 *
4542 * With tx window sizes greater than half of the tx window
4543 * maximum, the frame is invalid and cannot be ignored. This
4544 * causes a disconnect.
4545 */
4546
4547 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4548 BT_DBG("Invalid/Ignore - txseq outside tx window");
4549 return L2CAP_TXSEQ_INVALID_IGNORE;
4550 } else {
4551 BT_DBG("Invalid - txseq outside tx window");
4552 return L2CAP_TXSEQ_INVALID;
4553 }
4554 } else {
4555 BT_DBG("Unexpected - txseq indicates missing frames");
4556 return L2CAP_TXSEQ_UNEXPECTED;
4557 }
4558 }
4559
4560 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4561 struct l2cap_ctrl *control,
4562 struct sk_buff *skb, u8 event)
4563 {
4564 int err = 0;
4565 bool skb_in_use = 0;
4566
4567 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4568 event);
4569
4570 switch (event) {
4571 case L2CAP_EV_RECV_IFRAME:
4572 switch (l2cap_classify_txseq(chan, control->txseq)) {
4573 case L2CAP_TXSEQ_EXPECTED:
4574 l2cap_pass_to_tx(chan, control);
4575
4576 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4577 BT_DBG("Busy, discarding expected seq %d",
4578 control->txseq);
4579 break;
4580 }
4581
4582 chan->expected_tx_seq = __next_seq(chan,
4583 control->txseq);
4584
4585 chan->buffer_seq = chan->expected_tx_seq;
4586 skb_in_use = 1;
4587
4588 err = l2cap_reassemble_sdu(chan, skb, control);
4589 if (err)
4590 break;
4591
4592 if (control->final) {
4593 if (!test_and_clear_bit(CONN_REJ_ACT,
4594 &chan->conn_state)) {
4595 control->final = 0;
4596 l2cap_retransmit_all(chan, control);
4597 l2cap_ertm_send(chan);
4598 }
4599 }
4600
4601 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4602 l2cap_send_ack(chan);
4603 break;
4604 case L2CAP_TXSEQ_UNEXPECTED:
4605 l2cap_pass_to_tx(chan, control);
4606
4607 /* Can't issue SREJ frames in the local busy state.
4608 * Drop this frame, it will be seen as missing
4609 * when local busy is exited.
4610 */
4611 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4612 BT_DBG("Busy, discarding unexpected seq %d",
4613 control->txseq);
4614 break;
4615 }
4616
4617 /* There was a gap in the sequence, so an SREJ
4618 * must be sent for each missing frame. The
4619 * current frame is stored for later use.
4620 */
4621 skb_queue_tail(&chan->srej_q, skb);
4622 skb_in_use = 1;
4623 BT_DBG("Queued %p (queue len %d)", skb,
4624 skb_queue_len(&chan->srej_q));
4625
4626 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4627 l2cap_seq_list_clear(&chan->srej_list);
4628 l2cap_send_srej(chan, control->txseq);
4629
4630 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4631 break;
4632 case L2CAP_TXSEQ_DUPLICATE:
4633 l2cap_pass_to_tx(chan, control);
4634 break;
4635 case L2CAP_TXSEQ_INVALID_IGNORE:
4636 break;
4637 case L2CAP_TXSEQ_INVALID:
4638 default:
4639 l2cap_send_disconn_req(chan->conn, chan,
4640 ECONNRESET);
4641 break;
4642 }
4643 break;
4644 case L2CAP_EV_RECV_RR:
4645 l2cap_pass_to_tx(chan, control);
4646 if (control->final) {
4647 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4648
4649 if (!test_and_clear_bit(CONN_REJ_ACT,
4650 &chan->conn_state)) {
4651 control->final = 0;
4652 l2cap_retransmit_all(chan, control);
4653 }
4654
4655 l2cap_ertm_send(chan);
4656 } else if (control->poll) {
4657 l2cap_send_i_or_rr_or_rnr(chan);
4658 } else {
4659 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4660 &chan->conn_state) &&
4661 chan->unacked_frames)
4662 __set_retrans_timer(chan);
4663
4664 l2cap_ertm_send(chan);
4665 }
4666 break;
4667 case L2CAP_EV_RECV_RNR:
4668 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4669 l2cap_pass_to_tx(chan, control);
4670 if (control && control->poll) {
4671 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4672 l2cap_send_rr_or_rnr(chan, 0);
4673 }
4674 __clear_retrans_timer(chan);
4675 l2cap_seq_list_clear(&chan->retrans_list);
4676 break;
4677 case L2CAP_EV_RECV_REJ:
4678 l2cap_handle_rej(chan, control);
4679 break;
4680 case L2CAP_EV_RECV_SREJ:
4681 l2cap_handle_srej(chan, control);
4682 break;
4683 default:
4684 break;
4685 }
4686
4687 if (skb && !skb_in_use) {
4688 BT_DBG("Freeing %p", skb);
4689 kfree_skb(skb);
4690 }
4691
4692 return err;
4693 }
4694
4695 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4696 struct l2cap_ctrl *control,
4697 struct sk_buff *skb, u8 event)
4698 {
4699 int err = 0;
4700 u16 txseq = control->txseq;
4701 bool skb_in_use = 0;
4702
4703 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4704 event);
4705
4706 switch (event) {
4707 case L2CAP_EV_RECV_IFRAME:
4708 switch (l2cap_classify_txseq(chan, txseq)) {
4709 case L2CAP_TXSEQ_EXPECTED:
4710 /* Keep frame for reassembly later */
4711 l2cap_pass_to_tx(chan, control);
4712 skb_queue_tail(&chan->srej_q, skb);
4713 skb_in_use = 1;
4714 BT_DBG("Queued %p (queue len %d)", skb,
4715 skb_queue_len(&chan->srej_q));
4716
4717 chan->expected_tx_seq = __next_seq(chan, txseq);
4718 break;
4719 case L2CAP_TXSEQ_EXPECTED_SREJ:
4720 l2cap_seq_list_pop(&chan->srej_list);
4721
4722 l2cap_pass_to_tx(chan, control);
4723 skb_queue_tail(&chan->srej_q, skb);
4724 skb_in_use = 1;
4725 BT_DBG("Queued %p (queue len %d)", skb,
4726 skb_queue_len(&chan->srej_q));
4727
4728 err = l2cap_rx_queued_iframes(chan);
4729 if (err)
4730 break;
4731
4732 break;
4733 case L2CAP_TXSEQ_UNEXPECTED:
4734 /* Got a frame that can't be reassembled yet.
4735 * Save it for later, and send SREJs to cover
4736 * the missing frames.
4737 */
4738 skb_queue_tail(&chan->srej_q, skb);
4739 skb_in_use = 1;
4740 BT_DBG("Queued %p (queue len %d)", skb,
4741 skb_queue_len(&chan->srej_q));
4742
4743 l2cap_pass_to_tx(chan, control);
4744 l2cap_send_srej(chan, control->txseq);
4745 break;
4746 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4747 /* This frame was requested with an SREJ, but
4748 * some expected retransmitted frames are
4749 * missing. Request retransmission of missing
4750 * SREJ'd frames.
4751 */
4752 skb_queue_tail(&chan->srej_q, skb);
4753 skb_in_use = 1;
4754 BT_DBG("Queued %p (queue len %d)", skb,
4755 skb_queue_len(&chan->srej_q));
4756
4757 l2cap_pass_to_tx(chan, control);
4758 l2cap_send_srej_list(chan, control->txseq);
4759 break;
4760 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4761 /* We've already queued this frame. Drop this copy. */
4762 l2cap_pass_to_tx(chan, control);
4763 break;
4764 case L2CAP_TXSEQ_DUPLICATE:
4765 /* Expecting a later sequence number, so this frame
4766 * was already received. Ignore it completely.
4767 */
4768 break;
4769 case L2CAP_TXSEQ_INVALID_IGNORE:
4770 break;
4771 case L2CAP_TXSEQ_INVALID:
4772 default:
4773 l2cap_send_disconn_req(chan->conn, chan,
4774 ECONNRESET);
4775 break;
4776 }
4777 break;
4778 case L2CAP_EV_RECV_RR:
4779 l2cap_pass_to_tx(chan, control);
4780 if (control->final) {
4781 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4782
4783 if (!test_and_clear_bit(CONN_REJ_ACT,
4784 &chan->conn_state)) {
4785 control->final = 0;
4786 l2cap_retransmit_all(chan, control);
4787 }
4788
4789 l2cap_ertm_send(chan);
4790 } else if (control->poll) {
4791 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4792 &chan->conn_state) &&
4793 chan->unacked_frames) {
4794 __set_retrans_timer(chan);
4795 }
4796
4797 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4798 l2cap_send_srej_tail(chan);
4799 } else {
4800 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4801 &chan->conn_state) &&
4802 chan->unacked_frames)
4803 __set_retrans_timer(chan);
4804
4805 l2cap_send_ack(chan);
4806 }
4807 break;
4808 case L2CAP_EV_RECV_RNR:
4809 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4810 l2cap_pass_to_tx(chan, control);
4811 if (control->poll) {
4812 l2cap_send_srej_tail(chan);
4813 } else {
4814 struct l2cap_ctrl rr_control;
4815 memset(&rr_control, 0, sizeof(rr_control));
4816 rr_control.sframe = 1;
4817 rr_control.super = L2CAP_SUPER_RR;
4818 rr_control.reqseq = chan->buffer_seq;
4819 l2cap_send_sframe(chan, &rr_control);
4820 }
4821
4822 break;
4823 case L2CAP_EV_RECV_REJ:
4824 l2cap_handle_rej(chan, control);
4825 break;
4826 case L2CAP_EV_RECV_SREJ:
4827 l2cap_handle_srej(chan, control);
4828 break;
4829 }
4830
4831 if (skb && !skb_in_use) {
4832 BT_DBG("Freeing %p", skb);
4833 kfree_skb(skb);
4834 }
4835
4836 return err;
4837 }
4838
4839 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
4840 {
4841 /* Make sure reqseq is for a packet that has been sent but not acked */
4842 u16 unacked;
4843
4844 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
4845 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
4846 }
4847
4848 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
4849 struct sk_buff *skb, u8 event)
4850 {
4851 int err = 0;
4852
4853 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
4854 control, skb, event, chan->rx_state);
4855
4856 if (__valid_reqseq(chan, control->reqseq)) {
4857 switch (chan->rx_state) {
4858 case L2CAP_RX_STATE_RECV:
4859 err = l2cap_rx_state_recv(chan, control, skb, event);
4860 break;
4861 case L2CAP_RX_STATE_SREJ_SENT:
4862 err = l2cap_rx_state_srej_sent(chan, control, skb,
4863 event);
4864 break;
4865 default:
4866 /* shut it down */
4867 break;
4868 }
4869 } else {
4870 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
4871 control->reqseq, chan->next_tx_seq,
4872 chan->expected_ack_seq);
4873 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4874 }
4875
4876 return err;
4877 }
4878
4879 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
4880 struct sk_buff *skb)
4881 {
4882 int err = 0;
4883
4884 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
4885 chan->rx_state);
4886
4887 if (l2cap_classify_txseq(chan, control->txseq) ==
4888 L2CAP_TXSEQ_EXPECTED) {
4889 l2cap_pass_to_tx(chan, control);
4890
4891 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
4892 __next_seq(chan, chan->buffer_seq));
4893
4894 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4895
4896 l2cap_reassemble_sdu(chan, skb, control);
4897 } else {
4898 if (chan->sdu) {
4899 kfree_skb(chan->sdu);
4900 chan->sdu = NULL;
4901 }
4902 chan->sdu_last_frag = NULL;
4903 chan->sdu_len = 0;
4904
4905 if (skb) {
4906 BT_DBG("Freeing %p", skb);
4907 kfree_skb(skb);
4908 }
4909 }
4910
4911 chan->last_acked_seq = control->txseq;
4912 chan->expected_tx_seq = __next_seq(chan, control->txseq);
4913
4914 return err;
4915 }
4916
4917 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4918 {
4919 struct l2cap_ctrl *control = &bt_cb(skb)->control;
4920 u16 len;
4921 u8 event;
4922
4923 __unpack_control(chan, skb);
4924
4925 len = skb->len;
4926
4927 /*
4928 * We can just drop the corrupted I-frame here.
4929 * Receiver will miss it and start proper recovery
4930 * procedures and ask for retransmission.
4931 */
4932 if (l2cap_check_fcs(chan, skb))
4933 goto drop;
4934
4935 if (!control->sframe && control->sar == L2CAP_SAR_START)
4936 len -= L2CAP_SDULEN_SIZE;
4937
4938 if (chan->fcs == L2CAP_FCS_CRC16)
4939 len -= L2CAP_FCS_SIZE;
4940
4941 if (len > chan->mps) {
4942 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4943 goto drop;
4944 }
4945
4946 if (!control->sframe) {
4947 int err;
4948
4949 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
4950 control->sar, control->reqseq, control->final,
4951 control->txseq);
4952
4953 /* Validate F-bit - F=0 always valid, F=1 only
4954 * valid in TX WAIT_F
4955 */
4956 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
4957 goto drop;
4958
4959 if (chan->mode != L2CAP_MODE_STREAMING) {
4960 event = L2CAP_EV_RECV_IFRAME;
4961 err = l2cap_rx(chan, control, skb, event);
4962 } else {
4963 err = l2cap_stream_rx(chan, control, skb);
4964 }
4965
4966 if (err)
4967 l2cap_send_disconn_req(chan->conn, chan,
4968 ECONNRESET);
4969 } else {
4970 const u8 rx_func_to_event[4] = {
4971 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
4972 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
4973 };
4974
4975 /* Only I-frames are expected in streaming mode */
4976 if (chan->mode == L2CAP_MODE_STREAMING)
4977 goto drop;
4978
4979 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
4980 control->reqseq, control->final, control->poll,
4981 control->super);
4982
4983 if (len != 0) {
4984 BT_ERR("%d", len);
4985 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4986 goto drop;
4987 }
4988
4989 /* Validate F and P bits */
4990 if (control->final && (control->poll ||
4991 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
4992 goto drop;
4993
4994 event = rx_func_to_event[control->super];
4995 if (l2cap_rx(chan, control, skb, event))
4996 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4997 }
4998
4999 return 0;
5000
5001 drop:
5002 kfree_skb(skb);
5003 return 0;
5004 }
5005
5006 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
5007 {
5008 struct l2cap_chan *chan;
5009
5010 chan = l2cap_get_chan_by_scid(conn, cid);
5011 if (!chan) {
5012 BT_DBG("unknown cid 0x%4.4x", cid);
5013 /* Drop packet and return */
5014 kfree_skb(skb);
5015 return 0;
5016 }
5017
5018 BT_DBG("chan %p, len %d", chan, skb->len);
5019
5020 if (chan->state != BT_CONNECTED)
5021 goto drop;
5022
5023 switch (chan->mode) {
5024 case L2CAP_MODE_BASIC:
5025 /* If socket recv buffers overflows we drop data here
5026 * which is *bad* because L2CAP has to be reliable.
5027 * But we don't have any other choice. L2CAP doesn't
5028 * provide flow control mechanism. */
5029
5030 if (chan->imtu < skb->len)
5031 goto drop;
5032
5033 if (!chan->ops->recv(chan->data, skb))
5034 goto done;
5035 break;
5036
5037 case L2CAP_MODE_ERTM:
5038 case L2CAP_MODE_STREAMING:
5039 l2cap_data_rcv(chan, skb);
5040 goto done;
5041
5042 default:
5043 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5044 break;
5045 }
5046
5047 drop:
5048 kfree_skb(skb);
5049
5050 done:
5051 l2cap_chan_unlock(chan);
5052
5053 return 0;
5054 }
5055
5056 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
5057 {
5058 struct l2cap_chan *chan;
5059
5060 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5061 if (!chan)
5062 goto drop;
5063
5064 BT_DBG("chan %p, len %d", chan, skb->len);
5065
5066 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5067 goto drop;
5068
5069 if (chan->imtu < skb->len)
5070 goto drop;
5071
5072 if (!chan->ops->recv(chan->data, skb))
5073 return 0;
5074
5075 drop:
5076 kfree_skb(skb);
5077
5078 return 0;
5079 }
5080
5081 static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5082 struct sk_buff *skb)
5083 {
5084 struct l2cap_chan *chan;
5085
5086 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5087 if (!chan)
5088 goto drop;
5089
5090 BT_DBG("chan %p, len %d", chan, skb->len);
5091
5092 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5093 goto drop;
5094
5095 if (chan->imtu < skb->len)
5096 goto drop;
5097
5098 if (!chan->ops->recv(chan->data, skb))
5099 return 0;
5100
5101 drop:
5102 kfree_skb(skb);
5103
5104 return 0;
5105 }
5106
5107 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5108 {
5109 struct l2cap_hdr *lh = (void *) skb->data;
5110 u16 cid, len;
5111 __le16 psm;
5112
5113 skb_pull(skb, L2CAP_HDR_SIZE);
5114 cid = __le16_to_cpu(lh->cid);
5115 len = __le16_to_cpu(lh->len);
5116
5117 if (len != skb->len) {
5118 kfree_skb(skb);
5119 return;
5120 }
5121
5122 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5123
5124 switch (cid) {
5125 case L2CAP_CID_LE_SIGNALING:
5126 case L2CAP_CID_SIGNALING:
5127 l2cap_sig_channel(conn, skb);
5128 break;
5129
5130 case L2CAP_CID_CONN_LESS:
5131 psm = get_unaligned((__le16 *) skb->data);
5132 skb_pull(skb, 2);
5133 l2cap_conless_channel(conn, psm, skb);
5134 break;
5135
5136 case L2CAP_CID_LE_DATA:
5137 l2cap_att_channel(conn, cid, skb);
5138 break;
5139
5140 case L2CAP_CID_SMP:
5141 if (smp_sig_channel(conn, skb))
5142 l2cap_conn_del(conn->hcon, EACCES);
5143 break;
5144
5145 default:
5146 l2cap_data_channel(conn, cid, skb);
5147 break;
5148 }
5149 }
5150
5151 /* ---- L2CAP interface with lower layer (HCI) ---- */
5152
5153 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5154 {
5155 int exact = 0, lm1 = 0, lm2 = 0;
5156 struct l2cap_chan *c;
5157
5158 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5159
5160 /* Find listening sockets and check their link_mode */
5161 read_lock(&chan_list_lock);
5162 list_for_each_entry(c, &chan_list, global_l) {
5163 struct sock *sk = c->sk;
5164
5165 if (c->state != BT_LISTEN)
5166 continue;
5167
5168 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5169 lm1 |= HCI_LM_ACCEPT;
5170 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5171 lm1 |= HCI_LM_MASTER;
5172 exact++;
5173 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5174 lm2 |= HCI_LM_ACCEPT;
5175 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5176 lm2 |= HCI_LM_MASTER;
5177 }
5178 }
5179 read_unlock(&chan_list_lock);
5180
5181 return exact ? lm1 : lm2;
5182 }
5183
5184 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5185 {
5186 struct l2cap_conn *conn;
5187
5188 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5189
5190 if (!status) {
5191 conn = l2cap_conn_add(hcon, status);
5192 if (conn)
5193 l2cap_conn_ready(conn);
5194 } else
5195 l2cap_conn_del(hcon, bt_to_errno(status));
5196
5197 return 0;
5198 }
5199
5200 int l2cap_disconn_ind(struct hci_conn *hcon)
5201 {
5202 struct l2cap_conn *conn = hcon->l2cap_data;
5203
5204 BT_DBG("hcon %p", hcon);
5205
5206 if (!conn)
5207 return HCI_ERROR_REMOTE_USER_TERM;
5208 return conn->disc_reason;
5209 }
5210
5211 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5212 {
5213 BT_DBG("hcon %p reason %d", hcon, reason);
5214
5215 l2cap_conn_del(hcon, bt_to_errno(reason));
5216 return 0;
5217 }
5218
5219 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5220 {
5221 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5222 return;
5223
5224 if (encrypt == 0x00) {
5225 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5226 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5227 } else if (chan->sec_level == BT_SECURITY_HIGH)
5228 l2cap_chan_close(chan, ECONNREFUSED);
5229 } else {
5230 if (chan->sec_level == BT_SECURITY_MEDIUM)
5231 __clear_chan_timer(chan);
5232 }
5233 }
5234
5235 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5236 {
5237 struct l2cap_conn *conn = hcon->l2cap_data;
5238 struct l2cap_chan *chan;
5239
5240 if (!conn)
5241 return 0;
5242
5243 BT_DBG("conn %p", conn);
5244
5245 if (hcon->type == LE_LINK) {
5246 if (!status && encrypt)
5247 smp_distribute_keys(conn, 0);
5248 cancel_delayed_work(&conn->security_timer);
5249 }
5250
5251 mutex_lock(&conn->chan_lock);
5252
5253 list_for_each_entry(chan, &conn->chan_l, list) {
5254 l2cap_chan_lock(chan);
5255
5256 BT_DBG("chan->scid %d", chan->scid);
5257
5258 if (chan->scid == L2CAP_CID_LE_DATA) {
5259 if (!status && encrypt) {
5260 chan->sec_level = hcon->sec_level;
5261 l2cap_chan_ready(chan);
5262 }
5263
5264 l2cap_chan_unlock(chan);
5265 continue;
5266 }
5267
5268 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5269 l2cap_chan_unlock(chan);
5270 continue;
5271 }
5272
5273 if (!status && (chan->state == BT_CONNECTED ||
5274 chan->state == BT_CONFIG)) {
5275 struct sock *sk = chan->sk;
5276
5277 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5278 sk->sk_state_change(sk);
5279
5280 l2cap_check_encryption(chan, encrypt);
5281 l2cap_chan_unlock(chan);
5282 continue;
5283 }
5284
5285 if (chan->state == BT_CONNECT) {
5286 if (!status) {
5287 l2cap_send_conn_req(chan);
5288 } else {
5289 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5290 }
5291 } else if (chan->state == BT_CONNECT2) {
5292 struct sock *sk = chan->sk;
5293 struct l2cap_conn_rsp rsp;
5294 __u16 res, stat;
5295
5296 lock_sock(sk);
5297
5298 if (!status) {
5299 if (test_bit(BT_SK_DEFER_SETUP,
5300 &bt_sk(sk)->flags)) {
5301 struct sock *parent = bt_sk(sk)->parent;
5302 res = L2CAP_CR_PEND;
5303 stat = L2CAP_CS_AUTHOR_PEND;
5304 if (parent)
5305 parent->sk_data_ready(parent, 0);
5306 } else {
5307 __l2cap_state_change(chan, BT_CONFIG);
5308 res = L2CAP_CR_SUCCESS;
5309 stat = L2CAP_CS_NO_INFO;
5310 }
5311 } else {
5312 __l2cap_state_change(chan, BT_DISCONN);
5313 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5314 res = L2CAP_CR_SEC_BLOCK;
5315 stat = L2CAP_CS_NO_INFO;
5316 }
5317
5318 release_sock(sk);
5319
5320 rsp.scid = cpu_to_le16(chan->dcid);
5321 rsp.dcid = cpu_to_le16(chan->scid);
5322 rsp.result = cpu_to_le16(res);
5323 rsp.status = cpu_to_le16(stat);
5324 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5325 sizeof(rsp), &rsp);
5326 }
5327
5328 l2cap_chan_unlock(chan);
5329 }
5330
5331 mutex_unlock(&conn->chan_lock);
5332
5333 return 0;
5334 }
5335
5336 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5337 {
5338 struct l2cap_conn *conn = hcon->l2cap_data;
5339
5340 if (!conn)
5341 conn = l2cap_conn_add(hcon, 0);
5342
5343 if (!conn)
5344 goto drop;
5345
5346 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5347
5348 if (!(flags & ACL_CONT)) {
5349 struct l2cap_hdr *hdr;
5350 int len;
5351
5352 if (conn->rx_len) {
5353 BT_ERR("Unexpected start frame (len %d)", skb->len);
5354 kfree_skb(conn->rx_skb);
5355 conn->rx_skb = NULL;
5356 conn->rx_len = 0;
5357 l2cap_conn_unreliable(conn, ECOMM);
5358 }
5359
5360 /* Start fragment always begin with Basic L2CAP header */
5361 if (skb->len < L2CAP_HDR_SIZE) {
5362 BT_ERR("Frame is too short (len %d)", skb->len);
5363 l2cap_conn_unreliable(conn, ECOMM);
5364 goto drop;
5365 }
5366
5367 hdr = (struct l2cap_hdr *) skb->data;
5368 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5369
5370 if (len == skb->len) {
5371 /* Complete frame received */
5372 l2cap_recv_frame(conn, skb);
5373 return 0;
5374 }
5375
5376 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5377
5378 if (skb->len > len) {
5379 BT_ERR("Frame is too long (len %d, expected len %d)",
5380 skb->len, len);
5381 l2cap_conn_unreliable(conn, ECOMM);
5382 goto drop;
5383 }
5384
5385 /* Allocate skb for the complete frame (with header) */
5386 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5387 if (!conn->rx_skb)
5388 goto drop;
5389
5390 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5391 skb->len);
5392 conn->rx_len = len - skb->len;
5393 } else {
5394 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5395
5396 if (!conn->rx_len) {
5397 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5398 l2cap_conn_unreliable(conn, ECOMM);
5399 goto drop;
5400 }
5401
5402 if (skb->len > conn->rx_len) {
5403 BT_ERR("Fragment is too long (len %d, expected %d)",
5404 skb->len, conn->rx_len);
5405 kfree_skb(conn->rx_skb);
5406 conn->rx_skb = NULL;
5407 conn->rx_len = 0;
5408 l2cap_conn_unreliable(conn, ECOMM);
5409 goto drop;
5410 }
5411
5412 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5413 skb->len);
5414 conn->rx_len -= skb->len;
5415
5416 if (!conn->rx_len) {
5417 /* Complete frame received */
5418 l2cap_recv_frame(conn, conn->rx_skb);
5419 conn->rx_skb = NULL;
5420 }
5421 }
5422
5423 drop:
5424 kfree_skb(skb);
5425 return 0;
5426 }
5427
5428 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5429 {
5430 struct l2cap_chan *c;
5431
5432 read_lock(&chan_list_lock);
5433
5434 list_for_each_entry(c, &chan_list, global_l) {
5435 struct sock *sk = c->sk;
5436
5437 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5438 batostr(&bt_sk(sk)->src),
5439 batostr(&bt_sk(sk)->dst),
5440 c->state, __le16_to_cpu(c->psm),
5441 c->scid, c->dcid, c->imtu, c->omtu,
5442 c->sec_level, c->mode);
5443 }
5444
5445 read_unlock(&chan_list_lock);
5446
5447 return 0;
5448 }
5449
5450 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5451 {
5452 return single_open(file, l2cap_debugfs_show, inode->i_private);
5453 }
5454
5455 static const struct file_operations l2cap_debugfs_fops = {
5456 .open = l2cap_debugfs_open,
5457 .read = seq_read,
5458 .llseek = seq_lseek,
5459 .release = single_release,
5460 };
5461
5462 static struct dentry *l2cap_debugfs;
5463
5464 int __init l2cap_init(void)
5465 {
5466 int err;
5467
5468 err = l2cap_init_sockets();
5469 if (err < 0)
5470 return err;
5471
5472 if (bt_debugfs) {
5473 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5474 bt_debugfs, NULL, &l2cap_debugfs_fops);
5475 if (!l2cap_debugfs)
5476 BT_ERR("Failed to create L2CAP debug file");
5477 }
5478
5479 return 0;
5480 }
5481
5482 void l2cap_exit(void)
5483 {
5484 debugfs_remove(l2cap_debugfs);
5485 l2cap_cleanup_sockets();
5486 }
5487
5488 module_param(disable_ertm, bool, 0644);
5489 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");