bb42d95f4f41552835ea80288da7115ec9235235
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41
42 bool disable_ertm;
43
44 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
45 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
46
47 static LIST_HEAD(chan_list);
48 static DEFINE_RWLOCK(chan_list_lock);
49
50 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
51 u8 code, u8 ident, u16 dlen, void *data);
52 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
53 void *data);
54 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
55 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
56 struct l2cap_chan *chan, int err);
57
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
60
61 /* ---- L2CAP channels ---- */
62
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
64 {
65 struct l2cap_chan *c;
66
67 list_for_each_entry(c, &conn->chan_l, list) {
68 if (c->dcid == cid)
69 return c;
70 }
71 return NULL;
72 }
73
74 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
75 {
76 struct l2cap_chan *c;
77
78 list_for_each_entry(c, &conn->chan_l, list) {
79 if (c->scid == cid)
80 return c;
81 }
82 return NULL;
83 }
84
85 /* Find channel with given SCID.
86 * Returns locked channel. */
87 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
88 {
89 struct l2cap_chan *c;
90
91 mutex_lock(&conn->chan_lock);
92 c = __l2cap_get_chan_by_scid(conn, cid);
93 if (c)
94 l2cap_chan_lock(c);
95 mutex_unlock(&conn->chan_lock);
96
97 return c;
98 }
99
100 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
101 {
102 struct l2cap_chan *c;
103
104 list_for_each_entry(c, &conn->chan_l, list) {
105 if (c->ident == ident)
106 return c;
107 }
108 return NULL;
109 }
110
111 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
112 {
113 struct l2cap_chan *c;
114
115 list_for_each_entry(c, &chan_list, global_l) {
116 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
117 return c;
118 }
119 return NULL;
120 }
121
122 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
123 {
124 int err;
125
126 write_lock(&chan_list_lock);
127
128 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
129 err = -EADDRINUSE;
130 goto done;
131 }
132
133 if (psm) {
134 chan->psm = psm;
135 chan->sport = psm;
136 err = 0;
137 } else {
138 u16 p;
139
140 err = -EINVAL;
141 for (p = 0x1001; p < 0x1100; p += 2)
142 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
143 chan->psm = cpu_to_le16(p);
144 chan->sport = cpu_to_le16(p);
145 err = 0;
146 break;
147 }
148 }
149
150 done:
151 write_unlock(&chan_list_lock);
152 return err;
153 }
154
155 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
156 {
157 write_lock(&chan_list_lock);
158
159 chan->scid = scid;
160
161 write_unlock(&chan_list_lock);
162
163 return 0;
164 }
165
166 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
167 {
168 u16 cid = L2CAP_CID_DYN_START;
169
170 for (; cid < L2CAP_CID_DYN_END; cid++) {
171 if (!__l2cap_get_chan_by_scid(conn, cid))
172 return cid;
173 }
174
175 return 0;
176 }
177
178 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
179 {
180 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
181 state_to_string(state));
182
183 chan->state = state;
184 chan->ops->state_change(chan, state);
185 }
186
187 static void l2cap_state_change(struct l2cap_chan *chan, int state)
188 {
189 struct sock *sk = chan->sk;
190
191 lock_sock(sk);
192 __l2cap_state_change(chan, state);
193 release_sock(sk);
194 }
195
196 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
197 {
198 struct sock *sk = chan->sk;
199
200 sk->sk_err = err;
201 }
202
203 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
204 {
205 struct sock *sk = chan->sk;
206
207 lock_sock(sk);
208 __l2cap_chan_set_err(chan, err);
209 release_sock(sk);
210 }
211
212 static void __set_retrans_timer(struct l2cap_chan *chan)
213 {
214 if (!delayed_work_pending(&chan->monitor_timer) &&
215 chan->retrans_timeout) {
216 l2cap_set_timer(chan, &chan->retrans_timer,
217 msecs_to_jiffies(chan->retrans_timeout));
218 }
219 }
220
221 static void __set_monitor_timer(struct l2cap_chan *chan)
222 {
223 __clear_retrans_timer(chan);
224 if (chan->monitor_timeout) {
225 l2cap_set_timer(chan, &chan->monitor_timer,
226 msecs_to_jiffies(chan->monitor_timeout));
227 }
228 }
229
230 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
231 u16 seq)
232 {
233 struct sk_buff *skb;
234
235 skb_queue_walk(head, skb) {
236 if (bt_cb(skb)->control.txseq == seq)
237 return skb;
238 }
239
240 return NULL;
241 }
242
243 /* ---- L2CAP sequence number lists ---- */
244
245 /* For ERTM, ordered lists of sequence numbers must be tracked for
246 * SREJ requests that are received and for frames that are to be
247 * retransmitted. These seq_list functions implement a singly-linked
248 * list in an array, where membership in the list can also be checked
249 * in constant time. Items can also be added to the tail of the list
250 * and removed from the head in constant time, without further memory
251 * allocs or frees.
252 */
253
254 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
255 {
256 size_t alloc_size, i;
257
258 /* Allocated size is a power of 2 to map sequence numbers
259 * (which may be up to 14 bits) in to a smaller array that is
260 * sized for the negotiated ERTM transmit windows.
261 */
262 alloc_size = roundup_pow_of_two(size);
263
264 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
265 if (!seq_list->list)
266 return -ENOMEM;
267
268 seq_list->mask = alloc_size - 1;
269 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
270 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
271 for (i = 0; i < alloc_size; i++)
272 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
273
274 return 0;
275 }
276
277 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
278 {
279 kfree(seq_list->list);
280 }
281
282 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
283 u16 seq)
284 {
285 /* Constant-time check for list membership */
286 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
287 }
288
289 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
290 {
291 u16 mask = seq_list->mask;
292
293 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
294 /* In case someone tries to pop the head of an empty list */
295 return L2CAP_SEQ_LIST_CLEAR;
296 } else if (seq_list->head == seq) {
297 /* Head can be removed in constant time */
298 seq_list->head = seq_list->list[seq & mask];
299 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
300
301 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
302 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
303 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
304 }
305 } else {
306 /* Walk the list to find the sequence number */
307 u16 prev = seq_list->head;
308 while (seq_list->list[prev & mask] != seq) {
309 prev = seq_list->list[prev & mask];
310 if (prev == L2CAP_SEQ_LIST_TAIL)
311 return L2CAP_SEQ_LIST_CLEAR;
312 }
313
314 /* Unlink the number from the list and clear it */
315 seq_list->list[prev & mask] = seq_list->list[seq & mask];
316 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
317 if (seq_list->tail == seq)
318 seq_list->tail = prev;
319 }
320 return seq;
321 }
322
323 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
324 {
325 /* Remove the head in constant time */
326 return l2cap_seq_list_remove(seq_list, seq_list->head);
327 }
328
329 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
330 {
331 u16 i;
332
333 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
334 return;
335
336 for (i = 0; i <= seq_list->mask; i++)
337 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
338
339 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 }
342
343 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
344 {
345 u16 mask = seq_list->mask;
346
347 /* All appends happen in constant time */
348
349 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
350 return;
351
352 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
353 seq_list->head = seq;
354 else
355 seq_list->list[seq_list->tail & mask] = seq;
356
357 seq_list->tail = seq;
358 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
359 }
360
361 static void l2cap_chan_timeout(struct work_struct *work)
362 {
363 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
364 chan_timer.work);
365 struct l2cap_conn *conn = chan->conn;
366 int reason;
367
368 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
369
370 mutex_lock(&conn->chan_lock);
371 l2cap_chan_lock(chan);
372
373 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
374 reason = ECONNREFUSED;
375 else if (chan->state == BT_CONNECT &&
376 chan->sec_level != BT_SECURITY_SDP)
377 reason = ECONNREFUSED;
378 else
379 reason = ETIMEDOUT;
380
381 l2cap_chan_close(chan, reason);
382
383 l2cap_chan_unlock(chan);
384
385 chan->ops->close(chan);
386 mutex_unlock(&conn->chan_lock);
387
388 l2cap_chan_put(chan);
389 }
390
391 struct l2cap_chan *l2cap_chan_create(void)
392 {
393 struct l2cap_chan *chan;
394
395 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
396 if (!chan)
397 return NULL;
398
399 mutex_init(&chan->lock);
400
401 write_lock(&chan_list_lock);
402 list_add(&chan->global_l, &chan_list);
403 write_unlock(&chan_list_lock);
404
405 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
406
407 chan->state = BT_OPEN;
408
409 kref_init(&chan->kref);
410
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
413
414 BT_DBG("chan %p", chan);
415
416 return chan;
417 }
418
419 static void l2cap_chan_destroy(struct kref *kref)
420 {
421 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
422
423 BT_DBG("chan %p", chan);
424
425 write_lock(&chan_list_lock);
426 list_del(&chan->global_l);
427 write_unlock(&chan_list_lock);
428
429 kfree(chan);
430 }
431
432 void l2cap_chan_hold(struct l2cap_chan *c)
433 {
434 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
435
436 kref_get(&c->kref);
437 }
438
439 void l2cap_chan_put(struct l2cap_chan *c)
440 {
441 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
442
443 kref_put(&c->kref, l2cap_chan_destroy);
444 }
445
446 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
447 {
448 chan->fcs = L2CAP_FCS_CRC16;
449 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
450 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
451 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
452 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
453 chan->sec_level = BT_SECURITY_LOW;
454
455 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
456 }
457
458 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
459 {
460 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
461 __le16_to_cpu(chan->psm), chan->dcid);
462
463 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
464
465 chan->conn = conn;
466
467 switch (chan->chan_type) {
468 case L2CAP_CHAN_CONN_ORIENTED:
469 if (conn->hcon->type == LE_LINK) {
470 /* LE connection */
471 chan->omtu = L2CAP_DEFAULT_MTU;
472 chan->scid = L2CAP_CID_LE_DATA;
473 chan->dcid = L2CAP_CID_LE_DATA;
474 } else {
475 /* Alloc CID for connection-oriented socket */
476 chan->scid = l2cap_alloc_cid(conn);
477 chan->omtu = L2CAP_DEFAULT_MTU;
478 }
479 break;
480
481 case L2CAP_CHAN_CONN_LESS:
482 /* Connectionless socket */
483 chan->scid = L2CAP_CID_CONN_LESS;
484 chan->dcid = L2CAP_CID_CONN_LESS;
485 chan->omtu = L2CAP_DEFAULT_MTU;
486 break;
487
488 case L2CAP_CHAN_CONN_FIX_A2MP:
489 chan->scid = L2CAP_CID_A2MP;
490 chan->dcid = L2CAP_CID_A2MP;
491 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
492 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
493 break;
494
495 default:
496 /* Raw socket can send/recv signalling messages only */
497 chan->scid = L2CAP_CID_SIGNALING;
498 chan->dcid = L2CAP_CID_SIGNALING;
499 chan->omtu = L2CAP_DEFAULT_MTU;
500 }
501
502 chan->local_id = L2CAP_BESTEFFORT_ID;
503 chan->local_stype = L2CAP_SERV_BESTEFFORT;
504 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
505 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
506 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
507 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
508
509 l2cap_chan_hold(chan);
510
511 list_add(&chan->list, &conn->chan_l);
512 }
513
514 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
515 {
516 mutex_lock(&conn->chan_lock);
517 __l2cap_chan_add(conn, chan);
518 mutex_unlock(&conn->chan_lock);
519 }
520
521 void l2cap_chan_del(struct l2cap_chan *chan, int err)
522 {
523 struct l2cap_conn *conn = chan->conn;
524
525 __clear_chan_timer(chan);
526
527 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
528
529 if (conn) {
530 /* Delete from channel list */
531 list_del(&chan->list);
532
533 l2cap_chan_put(chan);
534
535 chan->conn = NULL;
536
537 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
538 hci_conn_put(conn->hcon);
539 }
540
541 if (chan->ops->teardown)
542 chan->ops->teardown(chan, err);
543
544 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
545 return;
546
547 switch(chan->mode) {
548 case L2CAP_MODE_BASIC:
549 break;
550
551 case L2CAP_MODE_ERTM:
552 __clear_retrans_timer(chan);
553 __clear_monitor_timer(chan);
554 __clear_ack_timer(chan);
555
556 skb_queue_purge(&chan->srej_q);
557
558 l2cap_seq_list_free(&chan->srej_list);
559 l2cap_seq_list_free(&chan->retrans_list);
560
561 /* fall through */
562
563 case L2CAP_MODE_STREAMING:
564 skb_queue_purge(&chan->tx_q);
565 break;
566 }
567
568 return;
569 }
570
571 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
572 {
573 struct l2cap_conn *conn = chan->conn;
574 struct sock *sk = chan->sk;
575
576 BT_DBG("chan %p state %s sk %p", chan,
577 state_to_string(chan->state), sk);
578
579 switch (chan->state) {
580 case BT_LISTEN:
581 if (chan->ops->teardown)
582 chan->ops->teardown(chan, 0);
583 break;
584
585 case BT_CONNECTED:
586 case BT_CONFIG:
587 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
588 conn->hcon->type == ACL_LINK) {
589 __set_chan_timer(chan, sk->sk_sndtimeo);
590 l2cap_send_disconn_req(conn, chan, reason);
591 } else
592 l2cap_chan_del(chan, reason);
593 break;
594
595 case BT_CONNECT2:
596 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
597 conn->hcon->type == ACL_LINK) {
598 struct l2cap_conn_rsp rsp;
599 __u16 result;
600
601 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
602 result = L2CAP_CR_SEC_BLOCK;
603 else
604 result = L2CAP_CR_BAD_PSM;
605 l2cap_state_change(chan, BT_DISCONN);
606
607 rsp.scid = cpu_to_le16(chan->dcid);
608 rsp.dcid = cpu_to_le16(chan->scid);
609 rsp.result = cpu_to_le16(result);
610 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
611 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
612 sizeof(rsp), &rsp);
613 }
614
615 l2cap_chan_del(chan, reason);
616 break;
617
618 case BT_CONNECT:
619 case BT_DISCONN:
620 l2cap_chan_del(chan, reason);
621 break;
622
623 default:
624 if (chan->ops->teardown)
625 chan->ops->teardown(chan, 0);
626 break;
627 }
628 }
629
630 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
631 {
632 if (chan->chan_type == L2CAP_CHAN_RAW) {
633 switch (chan->sec_level) {
634 case BT_SECURITY_HIGH:
635 return HCI_AT_DEDICATED_BONDING_MITM;
636 case BT_SECURITY_MEDIUM:
637 return HCI_AT_DEDICATED_BONDING;
638 default:
639 return HCI_AT_NO_BONDING;
640 }
641 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
642 if (chan->sec_level == BT_SECURITY_LOW)
643 chan->sec_level = BT_SECURITY_SDP;
644
645 if (chan->sec_level == BT_SECURITY_HIGH)
646 return HCI_AT_NO_BONDING_MITM;
647 else
648 return HCI_AT_NO_BONDING;
649 } else {
650 switch (chan->sec_level) {
651 case BT_SECURITY_HIGH:
652 return HCI_AT_GENERAL_BONDING_MITM;
653 case BT_SECURITY_MEDIUM:
654 return HCI_AT_GENERAL_BONDING;
655 default:
656 return HCI_AT_NO_BONDING;
657 }
658 }
659 }
660
661 /* Service level security */
662 int l2cap_chan_check_security(struct l2cap_chan *chan)
663 {
664 struct l2cap_conn *conn = chan->conn;
665 __u8 auth_type;
666
667 auth_type = l2cap_get_auth_type(chan);
668
669 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
670 }
671
672 static u8 l2cap_get_ident(struct l2cap_conn *conn)
673 {
674 u8 id;
675
676 /* Get next available identificator.
677 * 1 - 128 are used by kernel.
678 * 129 - 199 are reserved.
679 * 200 - 254 are used by utilities like l2ping, etc.
680 */
681
682 spin_lock(&conn->lock);
683
684 if (++conn->tx_ident > 128)
685 conn->tx_ident = 1;
686
687 id = conn->tx_ident;
688
689 spin_unlock(&conn->lock);
690
691 return id;
692 }
693
694 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
695 {
696 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
697 u8 flags;
698
699 BT_DBG("code 0x%2.2x", code);
700
701 if (!skb)
702 return;
703
704 if (lmp_no_flush_capable(conn->hcon->hdev))
705 flags = ACL_START_NO_FLUSH;
706 else
707 flags = ACL_START;
708
709 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
710 skb->priority = HCI_PRIO_MAX;
711
712 hci_send_acl(conn->hchan, skb, flags);
713 }
714
715 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
716 {
717 struct hci_conn *hcon = chan->conn->hcon;
718 u16 flags;
719
720 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
721 skb->priority);
722
723 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
724 lmp_no_flush_capable(hcon->hdev))
725 flags = ACL_START_NO_FLUSH;
726 else
727 flags = ACL_START;
728
729 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
730 hci_send_acl(chan->conn->hchan, skb, flags);
731 }
732
733 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
734 {
735 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
736 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
737
738 if (enh & L2CAP_CTRL_FRAME_TYPE) {
739 /* S-Frame */
740 control->sframe = 1;
741 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
742 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
743
744 control->sar = 0;
745 control->txseq = 0;
746 } else {
747 /* I-Frame */
748 control->sframe = 0;
749 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
750 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
751
752 control->poll = 0;
753 control->super = 0;
754 }
755 }
756
757 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
758 {
759 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
760 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
761
762 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
763 /* S-Frame */
764 control->sframe = 1;
765 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
766 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
767
768 control->sar = 0;
769 control->txseq = 0;
770 } else {
771 /* I-Frame */
772 control->sframe = 0;
773 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
774 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
775
776 control->poll = 0;
777 control->super = 0;
778 }
779 }
780
781 static inline void __unpack_control(struct l2cap_chan *chan,
782 struct sk_buff *skb)
783 {
784 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
785 __unpack_extended_control(get_unaligned_le32(skb->data),
786 &bt_cb(skb)->control);
787 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
788 } else {
789 __unpack_enhanced_control(get_unaligned_le16(skb->data),
790 &bt_cb(skb)->control);
791 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
792 }
793 }
794
795 static u32 __pack_extended_control(struct l2cap_ctrl *control)
796 {
797 u32 packed;
798
799 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
800 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
801
802 if (control->sframe) {
803 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
804 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
805 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
806 } else {
807 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
808 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
809 }
810
811 return packed;
812 }
813
814 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
815 {
816 u16 packed;
817
818 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
819 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
820
821 if (control->sframe) {
822 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
823 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
824 packed |= L2CAP_CTRL_FRAME_TYPE;
825 } else {
826 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
827 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
828 }
829
830 return packed;
831 }
832
833 static inline void __pack_control(struct l2cap_chan *chan,
834 struct l2cap_ctrl *control,
835 struct sk_buff *skb)
836 {
837 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
838 put_unaligned_le32(__pack_extended_control(control),
839 skb->data + L2CAP_HDR_SIZE);
840 } else {
841 put_unaligned_le16(__pack_enhanced_control(control),
842 skb->data + L2CAP_HDR_SIZE);
843 }
844 }
845
846 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
847 {
848 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
849 return L2CAP_EXT_HDR_SIZE;
850 else
851 return L2CAP_ENH_HDR_SIZE;
852 }
853
854 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
855 u32 control)
856 {
857 struct sk_buff *skb;
858 struct l2cap_hdr *lh;
859 int hlen = __ertm_hdr_size(chan);
860
861 if (chan->fcs == L2CAP_FCS_CRC16)
862 hlen += L2CAP_FCS_SIZE;
863
864 skb = bt_skb_alloc(hlen, GFP_KERNEL);
865
866 if (!skb)
867 return ERR_PTR(-ENOMEM);
868
869 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
870 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
871 lh->cid = cpu_to_le16(chan->dcid);
872
873 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
874 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
875 else
876 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
877
878 if (chan->fcs == L2CAP_FCS_CRC16) {
879 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
880 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
881 }
882
883 skb->priority = HCI_PRIO_MAX;
884 return skb;
885 }
886
887 static void l2cap_send_sframe(struct l2cap_chan *chan,
888 struct l2cap_ctrl *control)
889 {
890 struct sk_buff *skb;
891 u32 control_field;
892
893 BT_DBG("chan %p, control %p", chan, control);
894
895 if (!control->sframe)
896 return;
897
898 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
899 !control->poll)
900 control->final = 1;
901
902 if (control->super == L2CAP_SUPER_RR)
903 clear_bit(CONN_RNR_SENT, &chan->conn_state);
904 else if (control->super == L2CAP_SUPER_RNR)
905 set_bit(CONN_RNR_SENT, &chan->conn_state);
906
907 if (control->super != L2CAP_SUPER_SREJ) {
908 chan->last_acked_seq = control->reqseq;
909 __clear_ack_timer(chan);
910 }
911
912 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
913 control->final, control->poll, control->super);
914
915 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
916 control_field = __pack_extended_control(control);
917 else
918 control_field = __pack_enhanced_control(control);
919
920 skb = l2cap_create_sframe_pdu(chan, control_field);
921 if (!IS_ERR(skb))
922 l2cap_do_send(chan, skb);
923 }
924
925 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
926 {
927 struct l2cap_ctrl control;
928
929 BT_DBG("chan %p, poll %d", chan, poll);
930
931 memset(&control, 0, sizeof(control));
932 control.sframe = 1;
933 control.poll = poll;
934
935 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
936 control.super = L2CAP_SUPER_RNR;
937 else
938 control.super = L2CAP_SUPER_RR;
939
940 control.reqseq = chan->buffer_seq;
941 l2cap_send_sframe(chan, &control);
942 }
943
944 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
945 {
946 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
947 }
948
949 static bool __amp_capable(struct l2cap_chan *chan)
950 {
951 struct l2cap_conn *conn = chan->conn;
952
953 if (enable_hs &&
954 chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
955 conn->fixed_chan_mask & L2CAP_FC_A2MP)
956 return true;
957 else
958 return false;
959 }
960
961 void l2cap_send_conn_req(struct l2cap_chan *chan)
962 {
963 struct l2cap_conn *conn = chan->conn;
964 struct l2cap_conn_req req;
965
966 req.scid = cpu_to_le16(chan->scid);
967 req.psm = chan->psm;
968
969 chan->ident = l2cap_get_ident(conn);
970
971 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
972
973 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
974 }
975
976 static void l2cap_chan_ready(struct l2cap_chan *chan)
977 {
978 /* This clears all conf flags, including CONF_NOT_COMPLETE */
979 chan->conf_state = 0;
980 __clear_chan_timer(chan);
981
982 chan->state = BT_CONNECTED;
983
984 chan->ops->ready(chan);
985 }
986
987 static void l2cap_start_connection(struct l2cap_chan *chan)
988 {
989 if (__amp_capable(chan)) {
990 BT_DBG("chan %p AMP capable: discover AMPs", chan);
991 a2mp_discover_amp(chan);
992 } else {
993 l2cap_send_conn_req(chan);
994 }
995 }
996
997 static void l2cap_do_start(struct l2cap_chan *chan)
998 {
999 struct l2cap_conn *conn = chan->conn;
1000
1001 if (conn->hcon->type == LE_LINK) {
1002 l2cap_chan_ready(chan);
1003 return;
1004 }
1005
1006 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1007 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1008 return;
1009
1010 if (l2cap_chan_check_security(chan) &&
1011 __l2cap_no_conn_pending(chan)) {
1012 l2cap_start_connection(chan);
1013 }
1014 } else {
1015 struct l2cap_info_req req;
1016 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1017
1018 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1019 conn->info_ident = l2cap_get_ident(conn);
1020
1021 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1022
1023 l2cap_send_cmd(conn, conn->info_ident,
1024 L2CAP_INFO_REQ, sizeof(req), &req);
1025 }
1026 }
1027
1028 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1029 {
1030 u32 local_feat_mask = l2cap_feat_mask;
1031 if (!disable_ertm)
1032 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1033
1034 switch (mode) {
1035 case L2CAP_MODE_ERTM:
1036 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1037 case L2CAP_MODE_STREAMING:
1038 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1039 default:
1040 return 0x00;
1041 }
1042 }
1043
1044 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1045 {
1046 struct sock *sk = chan->sk;
1047 struct l2cap_disconn_req req;
1048
1049 if (!conn)
1050 return;
1051
1052 if (chan->mode == L2CAP_MODE_ERTM) {
1053 __clear_retrans_timer(chan);
1054 __clear_monitor_timer(chan);
1055 __clear_ack_timer(chan);
1056 }
1057
1058 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1059 __l2cap_state_change(chan, BT_DISCONN);
1060 return;
1061 }
1062
1063 req.dcid = cpu_to_le16(chan->dcid);
1064 req.scid = cpu_to_le16(chan->scid);
1065 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1066 L2CAP_DISCONN_REQ, sizeof(req), &req);
1067
1068 lock_sock(sk);
1069 __l2cap_state_change(chan, BT_DISCONN);
1070 __l2cap_chan_set_err(chan, err);
1071 release_sock(sk);
1072 }
1073
1074 /* ---- L2CAP connections ---- */
1075 static void l2cap_conn_start(struct l2cap_conn *conn)
1076 {
1077 struct l2cap_chan *chan, *tmp;
1078
1079 BT_DBG("conn %p", conn);
1080
1081 mutex_lock(&conn->chan_lock);
1082
1083 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1084 struct sock *sk = chan->sk;
1085
1086 l2cap_chan_lock(chan);
1087
1088 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1089 l2cap_chan_unlock(chan);
1090 continue;
1091 }
1092
1093 if (chan->state == BT_CONNECT) {
1094 if (!l2cap_chan_check_security(chan) ||
1095 !__l2cap_no_conn_pending(chan)) {
1096 l2cap_chan_unlock(chan);
1097 continue;
1098 }
1099
1100 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1101 && test_bit(CONF_STATE2_DEVICE,
1102 &chan->conf_state)) {
1103 l2cap_chan_close(chan, ECONNRESET);
1104 l2cap_chan_unlock(chan);
1105 continue;
1106 }
1107
1108 l2cap_start_connection(chan);
1109
1110 } else if (chan->state == BT_CONNECT2) {
1111 struct l2cap_conn_rsp rsp;
1112 char buf[128];
1113 rsp.scid = cpu_to_le16(chan->dcid);
1114 rsp.dcid = cpu_to_le16(chan->scid);
1115
1116 if (l2cap_chan_check_security(chan)) {
1117 lock_sock(sk);
1118 if (test_bit(BT_SK_DEFER_SETUP,
1119 &bt_sk(sk)->flags)) {
1120 struct sock *parent = bt_sk(sk)->parent;
1121 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1122 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1123 if (parent)
1124 parent->sk_data_ready(parent, 0);
1125
1126 } else {
1127 __l2cap_state_change(chan, BT_CONFIG);
1128 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1129 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1130 }
1131 release_sock(sk);
1132 } else {
1133 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1134 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1135 }
1136
1137 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1138 sizeof(rsp), &rsp);
1139
1140 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1141 rsp.result != L2CAP_CR_SUCCESS) {
1142 l2cap_chan_unlock(chan);
1143 continue;
1144 }
1145
1146 set_bit(CONF_REQ_SENT, &chan->conf_state);
1147 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1148 l2cap_build_conf_req(chan, buf), buf);
1149 chan->num_conf_req++;
1150 }
1151
1152 l2cap_chan_unlock(chan);
1153 }
1154
1155 mutex_unlock(&conn->chan_lock);
1156 }
1157
1158 /* Find socket with cid and source/destination bdaddr.
1159 * Returns closest match, locked.
1160 */
1161 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1162 bdaddr_t *src,
1163 bdaddr_t *dst)
1164 {
1165 struct l2cap_chan *c, *c1 = NULL;
1166
1167 read_lock(&chan_list_lock);
1168
1169 list_for_each_entry(c, &chan_list, global_l) {
1170 struct sock *sk = c->sk;
1171
1172 if (state && c->state != state)
1173 continue;
1174
1175 if (c->scid == cid) {
1176 int src_match, dst_match;
1177 int src_any, dst_any;
1178
1179 /* Exact match. */
1180 src_match = !bacmp(&bt_sk(sk)->src, src);
1181 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1182 if (src_match && dst_match) {
1183 read_unlock(&chan_list_lock);
1184 return c;
1185 }
1186
1187 /* Closest match */
1188 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1189 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1190 if ((src_match && dst_any) || (src_any && dst_match) ||
1191 (src_any && dst_any))
1192 c1 = c;
1193 }
1194 }
1195
1196 read_unlock(&chan_list_lock);
1197
1198 return c1;
1199 }
1200
1201 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1202 {
1203 struct sock *parent, *sk;
1204 struct l2cap_chan *chan, *pchan;
1205
1206 BT_DBG("");
1207
1208 /* Check if we have socket listening on cid */
1209 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1210 conn->src, conn->dst);
1211 if (!pchan)
1212 return;
1213
1214 parent = pchan->sk;
1215
1216 lock_sock(parent);
1217
1218 chan = pchan->ops->new_connection(pchan);
1219 if (!chan)
1220 goto clean;
1221
1222 sk = chan->sk;
1223
1224 hci_conn_hold(conn->hcon);
1225 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
1226
1227 bacpy(&bt_sk(sk)->src, conn->src);
1228 bacpy(&bt_sk(sk)->dst, conn->dst);
1229
1230 bt_accept_enqueue(parent, sk);
1231
1232 l2cap_chan_add(conn, chan);
1233
1234 l2cap_chan_ready(chan);
1235
1236 clean:
1237 release_sock(parent);
1238 }
1239
1240 static void l2cap_conn_ready(struct l2cap_conn *conn)
1241 {
1242 struct l2cap_chan *chan;
1243 struct hci_conn *hcon = conn->hcon;
1244
1245 BT_DBG("conn %p", conn);
1246
1247 if (!hcon->out && hcon->type == LE_LINK)
1248 l2cap_le_conn_ready(conn);
1249
1250 if (hcon->out && hcon->type == LE_LINK)
1251 smp_conn_security(hcon, hcon->pending_sec_level);
1252
1253 mutex_lock(&conn->chan_lock);
1254
1255 list_for_each_entry(chan, &conn->chan_l, list) {
1256
1257 l2cap_chan_lock(chan);
1258
1259 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1260 l2cap_chan_unlock(chan);
1261 continue;
1262 }
1263
1264 if (hcon->type == LE_LINK) {
1265 if (smp_conn_security(hcon, chan->sec_level))
1266 l2cap_chan_ready(chan);
1267
1268 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1269 struct sock *sk = chan->sk;
1270 __clear_chan_timer(chan);
1271 lock_sock(sk);
1272 __l2cap_state_change(chan, BT_CONNECTED);
1273 sk->sk_state_change(sk);
1274 release_sock(sk);
1275
1276 } else if (chan->state == BT_CONNECT)
1277 l2cap_do_start(chan);
1278
1279 l2cap_chan_unlock(chan);
1280 }
1281
1282 mutex_unlock(&conn->chan_lock);
1283 }
1284
1285 /* Notify sockets that we cannot guaranty reliability anymore */
1286 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1287 {
1288 struct l2cap_chan *chan;
1289
1290 BT_DBG("conn %p", conn);
1291
1292 mutex_lock(&conn->chan_lock);
1293
1294 list_for_each_entry(chan, &conn->chan_l, list) {
1295 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1296 __l2cap_chan_set_err(chan, err);
1297 }
1298
1299 mutex_unlock(&conn->chan_lock);
1300 }
1301
1302 static void l2cap_info_timeout(struct work_struct *work)
1303 {
1304 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1305 info_timer.work);
1306
1307 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1308 conn->info_ident = 0;
1309
1310 l2cap_conn_start(conn);
1311 }
1312
1313 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1314 {
1315 struct l2cap_conn *conn = hcon->l2cap_data;
1316 struct l2cap_chan *chan, *l;
1317
1318 if (!conn)
1319 return;
1320
1321 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1322
1323 kfree_skb(conn->rx_skb);
1324
1325 mutex_lock(&conn->chan_lock);
1326
1327 /* Kill channels */
1328 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1329 l2cap_chan_hold(chan);
1330 l2cap_chan_lock(chan);
1331
1332 l2cap_chan_del(chan, err);
1333
1334 l2cap_chan_unlock(chan);
1335
1336 chan->ops->close(chan);
1337 l2cap_chan_put(chan);
1338 }
1339
1340 mutex_unlock(&conn->chan_lock);
1341
1342 hci_chan_del(conn->hchan);
1343
1344 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1345 cancel_delayed_work_sync(&conn->info_timer);
1346
1347 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1348 cancel_delayed_work_sync(&conn->security_timer);
1349 smp_chan_destroy(conn);
1350 }
1351
1352 hcon->l2cap_data = NULL;
1353 kfree(conn);
1354 }
1355
1356 static void security_timeout(struct work_struct *work)
1357 {
1358 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1359 security_timer.work);
1360
1361 BT_DBG("conn %p", conn);
1362
1363 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1364 smp_chan_destroy(conn);
1365 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1366 }
1367 }
1368
1369 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1370 {
1371 struct l2cap_conn *conn = hcon->l2cap_data;
1372 struct hci_chan *hchan;
1373
1374 if (conn || status)
1375 return conn;
1376
1377 hchan = hci_chan_create(hcon);
1378 if (!hchan)
1379 return NULL;
1380
1381 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1382 if (!conn) {
1383 hci_chan_del(hchan);
1384 return NULL;
1385 }
1386
1387 hcon->l2cap_data = conn;
1388 conn->hcon = hcon;
1389 conn->hchan = hchan;
1390
1391 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1392
1393 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1394 conn->mtu = hcon->hdev->le_mtu;
1395 else
1396 conn->mtu = hcon->hdev->acl_mtu;
1397
1398 conn->src = &hcon->hdev->bdaddr;
1399 conn->dst = &hcon->dst;
1400
1401 conn->feat_mask = 0;
1402
1403 spin_lock_init(&conn->lock);
1404 mutex_init(&conn->chan_lock);
1405
1406 INIT_LIST_HEAD(&conn->chan_l);
1407
1408 if (hcon->type == LE_LINK)
1409 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1410 else
1411 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1412
1413 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1414
1415 return conn;
1416 }
1417
1418 /* ---- Socket interface ---- */
1419
1420 /* Find socket with psm and source / destination bdaddr.
1421 * Returns closest match.
1422 */
1423 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1424 bdaddr_t *src,
1425 bdaddr_t *dst)
1426 {
1427 struct l2cap_chan *c, *c1 = NULL;
1428
1429 read_lock(&chan_list_lock);
1430
1431 list_for_each_entry(c, &chan_list, global_l) {
1432 struct sock *sk = c->sk;
1433
1434 if (state && c->state != state)
1435 continue;
1436
1437 if (c->psm == psm) {
1438 int src_match, dst_match;
1439 int src_any, dst_any;
1440
1441 /* Exact match. */
1442 src_match = !bacmp(&bt_sk(sk)->src, src);
1443 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1444 if (src_match && dst_match) {
1445 read_unlock(&chan_list_lock);
1446 return c;
1447 }
1448
1449 /* Closest match */
1450 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1451 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1452 if ((src_match && dst_any) || (src_any && dst_match) ||
1453 (src_any && dst_any))
1454 c1 = c;
1455 }
1456 }
1457
1458 read_unlock(&chan_list_lock);
1459
1460 return c1;
1461 }
1462
1463 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1464 bdaddr_t *dst, u8 dst_type)
1465 {
1466 struct sock *sk = chan->sk;
1467 bdaddr_t *src = &bt_sk(sk)->src;
1468 struct l2cap_conn *conn;
1469 struct hci_conn *hcon;
1470 struct hci_dev *hdev;
1471 __u8 auth_type;
1472 int err;
1473
1474 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1475 dst_type, __le16_to_cpu(psm));
1476
1477 hdev = hci_get_route(dst, src);
1478 if (!hdev)
1479 return -EHOSTUNREACH;
1480
1481 hci_dev_lock(hdev);
1482
1483 l2cap_chan_lock(chan);
1484
1485 /* PSM must be odd and lsb of upper byte must be 0 */
1486 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1487 chan->chan_type != L2CAP_CHAN_RAW) {
1488 err = -EINVAL;
1489 goto done;
1490 }
1491
1492 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1493 err = -EINVAL;
1494 goto done;
1495 }
1496
1497 switch (chan->mode) {
1498 case L2CAP_MODE_BASIC:
1499 break;
1500 case L2CAP_MODE_ERTM:
1501 case L2CAP_MODE_STREAMING:
1502 if (!disable_ertm)
1503 break;
1504 /* fall through */
1505 default:
1506 err = -ENOTSUPP;
1507 goto done;
1508 }
1509
1510 switch (chan->state) {
1511 case BT_CONNECT:
1512 case BT_CONNECT2:
1513 case BT_CONFIG:
1514 /* Already connecting */
1515 err = 0;
1516 goto done;
1517
1518 case BT_CONNECTED:
1519 /* Already connected */
1520 err = -EISCONN;
1521 goto done;
1522
1523 case BT_OPEN:
1524 case BT_BOUND:
1525 /* Can connect */
1526 break;
1527
1528 default:
1529 err = -EBADFD;
1530 goto done;
1531 }
1532
1533 /* Set destination address and psm */
1534 lock_sock(sk);
1535 bacpy(&bt_sk(sk)->dst, dst);
1536 release_sock(sk);
1537
1538 chan->psm = psm;
1539 chan->dcid = cid;
1540
1541 auth_type = l2cap_get_auth_type(chan);
1542
1543 if (chan->dcid == L2CAP_CID_LE_DATA)
1544 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1545 chan->sec_level, auth_type);
1546 else
1547 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1548 chan->sec_level, auth_type);
1549
1550 if (IS_ERR(hcon)) {
1551 err = PTR_ERR(hcon);
1552 goto done;
1553 }
1554
1555 conn = l2cap_conn_add(hcon, 0);
1556 if (!conn) {
1557 hci_conn_put(hcon);
1558 err = -ENOMEM;
1559 goto done;
1560 }
1561
1562 if (hcon->type == LE_LINK) {
1563 err = 0;
1564
1565 if (!list_empty(&conn->chan_l)) {
1566 err = -EBUSY;
1567 hci_conn_put(hcon);
1568 }
1569
1570 if (err)
1571 goto done;
1572 }
1573
1574 /* Update source addr of the socket */
1575 bacpy(src, conn->src);
1576
1577 l2cap_chan_unlock(chan);
1578 l2cap_chan_add(conn, chan);
1579 l2cap_chan_lock(chan);
1580
1581 l2cap_state_change(chan, BT_CONNECT);
1582 __set_chan_timer(chan, sk->sk_sndtimeo);
1583
1584 if (hcon->state == BT_CONNECTED) {
1585 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1586 __clear_chan_timer(chan);
1587 if (l2cap_chan_check_security(chan))
1588 l2cap_state_change(chan, BT_CONNECTED);
1589 } else
1590 l2cap_do_start(chan);
1591 }
1592
1593 err = 0;
1594
1595 done:
1596 l2cap_chan_unlock(chan);
1597 hci_dev_unlock(hdev);
1598 hci_dev_put(hdev);
1599 return err;
1600 }
1601
1602 int __l2cap_wait_ack(struct sock *sk)
1603 {
1604 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1605 DECLARE_WAITQUEUE(wait, current);
1606 int err = 0;
1607 int timeo = HZ/5;
1608
1609 add_wait_queue(sk_sleep(sk), &wait);
1610 set_current_state(TASK_INTERRUPTIBLE);
1611 while (chan->unacked_frames > 0 && chan->conn) {
1612 if (!timeo)
1613 timeo = HZ/5;
1614
1615 if (signal_pending(current)) {
1616 err = sock_intr_errno(timeo);
1617 break;
1618 }
1619
1620 release_sock(sk);
1621 timeo = schedule_timeout(timeo);
1622 lock_sock(sk);
1623 set_current_state(TASK_INTERRUPTIBLE);
1624
1625 err = sock_error(sk);
1626 if (err)
1627 break;
1628 }
1629 set_current_state(TASK_RUNNING);
1630 remove_wait_queue(sk_sleep(sk), &wait);
1631 return err;
1632 }
1633
1634 static void l2cap_monitor_timeout(struct work_struct *work)
1635 {
1636 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1637 monitor_timer.work);
1638
1639 BT_DBG("chan %p", chan);
1640
1641 l2cap_chan_lock(chan);
1642
1643 if (!chan->conn) {
1644 l2cap_chan_unlock(chan);
1645 l2cap_chan_put(chan);
1646 return;
1647 }
1648
1649 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1650
1651 l2cap_chan_unlock(chan);
1652 l2cap_chan_put(chan);
1653 }
1654
1655 static void l2cap_retrans_timeout(struct work_struct *work)
1656 {
1657 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1658 retrans_timer.work);
1659
1660 BT_DBG("chan %p", chan);
1661
1662 l2cap_chan_lock(chan);
1663
1664 if (!chan->conn) {
1665 l2cap_chan_unlock(chan);
1666 l2cap_chan_put(chan);
1667 return;
1668 }
1669
1670 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1671 l2cap_chan_unlock(chan);
1672 l2cap_chan_put(chan);
1673 }
1674
1675 static void l2cap_streaming_send(struct l2cap_chan *chan,
1676 struct sk_buff_head *skbs)
1677 {
1678 struct sk_buff *skb;
1679 struct l2cap_ctrl *control;
1680
1681 BT_DBG("chan %p, skbs %p", chan, skbs);
1682
1683 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1684
1685 while (!skb_queue_empty(&chan->tx_q)) {
1686
1687 skb = skb_dequeue(&chan->tx_q);
1688
1689 bt_cb(skb)->control.retries = 1;
1690 control = &bt_cb(skb)->control;
1691
1692 control->reqseq = 0;
1693 control->txseq = chan->next_tx_seq;
1694
1695 __pack_control(chan, control, skb);
1696
1697 if (chan->fcs == L2CAP_FCS_CRC16) {
1698 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1699 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1700 }
1701
1702 l2cap_do_send(chan, skb);
1703
1704 BT_DBG("Sent txseq %u", control->txseq);
1705
1706 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1707 chan->frames_sent++;
1708 }
1709 }
1710
1711 static int l2cap_ertm_send(struct l2cap_chan *chan)
1712 {
1713 struct sk_buff *skb, *tx_skb;
1714 struct l2cap_ctrl *control;
1715 int sent = 0;
1716
1717 BT_DBG("chan %p", chan);
1718
1719 if (chan->state != BT_CONNECTED)
1720 return -ENOTCONN;
1721
1722 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1723 return 0;
1724
1725 while (chan->tx_send_head &&
1726 chan->unacked_frames < chan->remote_tx_win &&
1727 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1728
1729 skb = chan->tx_send_head;
1730
1731 bt_cb(skb)->control.retries = 1;
1732 control = &bt_cb(skb)->control;
1733
1734 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1735 control->final = 1;
1736
1737 control->reqseq = chan->buffer_seq;
1738 chan->last_acked_seq = chan->buffer_seq;
1739 control->txseq = chan->next_tx_seq;
1740
1741 __pack_control(chan, control, skb);
1742
1743 if (chan->fcs == L2CAP_FCS_CRC16) {
1744 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1745 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1746 }
1747
1748 /* Clone after data has been modified. Data is assumed to be
1749 read-only (for locking purposes) on cloned sk_buffs.
1750 */
1751 tx_skb = skb_clone(skb, GFP_KERNEL);
1752
1753 if (!tx_skb)
1754 break;
1755
1756 __set_retrans_timer(chan);
1757
1758 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1759 chan->unacked_frames++;
1760 chan->frames_sent++;
1761 sent++;
1762
1763 if (skb_queue_is_last(&chan->tx_q, skb))
1764 chan->tx_send_head = NULL;
1765 else
1766 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1767
1768 l2cap_do_send(chan, tx_skb);
1769 BT_DBG("Sent txseq %u", control->txseq);
1770 }
1771
1772 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1773 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1774
1775 return sent;
1776 }
1777
1778 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1779 {
1780 struct l2cap_ctrl control;
1781 struct sk_buff *skb;
1782 struct sk_buff *tx_skb;
1783 u16 seq;
1784
1785 BT_DBG("chan %p", chan);
1786
1787 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1788 return;
1789
1790 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1791 seq = l2cap_seq_list_pop(&chan->retrans_list);
1792
1793 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1794 if (!skb) {
1795 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1796 seq);
1797 continue;
1798 }
1799
1800 bt_cb(skb)->control.retries++;
1801 control = bt_cb(skb)->control;
1802
1803 if (chan->max_tx != 0 &&
1804 bt_cb(skb)->control.retries > chan->max_tx) {
1805 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1806 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1807 l2cap_seq_list_clear(&chan->retrans_list);
1808 break;
1809 }
1810
1811 control.reqseq = chan->buffer_seq;
1812 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1813 control.final = 1;
1814 else
1815 control.final = 0;
1816
1817 if (skb_cloned(skb)) {
1818 /* Cloned sk_buffs are read-only, so we need a
1819 * writeable copy
1820 */
1821 tx_skb = skb_copy(skb, GFP_ATOMIC);
1822 } else {
1823 tx_skb = skb_clone(skb, GFP_ATOMIC);
1824 }
1825
1826 if (!tx_skb) {
1827 l2cap_seq_list_clear(&chan->retrans_list);
1828 break;
1829 }
1830
1831 /* Update skb contents */
1832 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1833 put_unaligned_le32(__pack_extended_control(&control),
1834 tx_skb->data + L2CAP_HDR_SIZE);
1835 } else {
1836 put_unaligned_le16(__pack_enhanced_control(&control),
1837 tx_skb->data + L2CAP_HDR_SIZE);
1838 }
1839
1840 if (chan->fcs == L2CAP_FCS_CRC16) {
1841 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1842 put_unaligned_le16(fcs, skb_put(tx_skb,
1843 L2CAP_FCS_SIZE));
1844 }
1845
1846 l2cap_do_send(chan, tx_skb);
1847
1848 BT_DBG("Resent txseq %d", control.txseq);
1849
1850 chan->last_acked_seq = chan->buffer_seq;
1851 }
1852 }
1853
1854 static void l2cap_retransmit(struct l2cap_chan *chan,
1855 struct l2cap_ctrl *control)
1856 {
1857 BT_DBG("chan %p, control %p", chan, control);
1858
1859 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1860 l2cap_ertm_resend(chan);
1861 }
1862
1863 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1864 struct l2cap_ctrl *control)
1865 {
1866 struct sk_buff *skb;
1867
1868 BT_DBG("chan %p, control %p", chan, control);
1869
1870 if (control->poll)
1871 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1872
1873 l2cap_seq_list_clear(&chan->retrans_list);
1874
1875 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1876 return;
1877
1878 if (chan->unacked_frames) {
1879 skb_queue_walk(&chan->tx_q, skb) {
1880 if (bt_cb(skb)->control.txseq == control->reqseq ||
1881 skb == chan->tx_send_head)
1882 break;
1883 }
1884
1885 skb_queue_walk_from(&chan->tx_q, skb) {
1886 if (skb == chan->tx_send_head)
1887 break;
1888
1889 l2cap_seq_list_append(&chan->retrans_list,
1890 bt_cb(skb)->control.txseq);
1891 }
1892
1893 l2cap_ertm_resend(chan);
1894 }
1895 }
1896
1897 static void l2cap_send_ack(struct l2cap_chan *chan)
1898 {
1899 struct l2cap_ctrl control;
1900 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1901 chan->last_acked_seq);
1902 int threshold;
1903
1904 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1905 chan, chan->last_acked_seq, chan->buffer_seq);
1906
1907 memset(&control, 0, sizeof(control));
1908 control.sframe = 1;
1909
1910 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1911 chan->rx_state == L2CAP_RX_STATE_RECV) {
1912 __clear_ack_timer(chan);
1913 control.super = L2CAP_SUPER_RNR;
1914 control.reqseq = chan->buffer_seq;
1915 l2cap_send_sframe(chan, &control);
1916 } else {
1917 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1918 l2cap_ertm_send(chan);
1919 /* If any i-frames were sent, they included an ack */
1920 if (chan->buffer_seq == chan->last_acked_seq)
1921 frames_to_ack = 0;
1922 }
1923
1924 /* Ack now if the window is 3/4ths full.
1925 * Calculate without mul or div
1926 */
1927 threshold = chan->ack_win;
1928 threshold += threshold << 1;
1929 threshold >>= 2;
1930
1931 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
1932 threshold);
1933
1934 if (frames_to_ack >= threshold) {
1935 __clear_ack_timer(chan);
1936 control.super = L2CAP_SUPER_RR;
1937 control.reqseq = chan->buffer_seq;
1938 l2cap_send_sframe(chan, &control);
1939 frames_to_ack = 0;
1940 }
1941
1942 if (frames_to_ack)
1943 __set_ack_timer(chan);
1944 }
1945 }
1946
1947 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1948 struct msghdr *msg, int len,
1949 int count, struct sk_buff *skb)
1950 {
1951 struct l2cap_conn *conn = chan->conn;
1952 struct sk_buff **frag;
1953 int sent = 0;
1954
1955 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1956 return -EFAULT;
1957
1958 sent += count;
1959 len -= count;
1960
1961 /* Continuation fragments (no L2CAP header) */
1962 frag = &skb_shinfo(skb)->frag_list;
1963 while (len) {
1964 struct sk_buff *tmp;
1965
1966 count = min_t(unsigned int, conn->mtu, len);
1967
1968 tmp = chan->ops->alloc_skb(chan, count,
1969 msg->msg_flags & MSG_DONTWAIT);
1970 if (IS_ERR(tmp))
1971 return PTR_ERR(tmp);
1972
1973 *frag = tmp;
1974
1975 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1976 return -EFAULT;
1977
1978 (*frag)->priority = skb->priority;
1979
1980 sent += count;
1981 len -= count;
1982
1983 skb->len += (*frag)->len;
1984 skb->data_len += (*frag)->len;
1985
1986 frag = &(*frag)->next;
1987 }
1988
1989 return sent;
1990 }
1991
1992 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1993 struct msghdr *msg, size_t len,
1994 u32 priority)
1995 {
1996 struct l2cap_conn *conn = chan->conn;
1997 struct sk_buff *skb;
1998 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1999 struct l2cap_hdr *lh;
2000
2001 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
2002
2003 count = min_t(unsigned int, (conn->mtu - hlen), len);
2004
2005 skb = chan->ops->alloc_skb(chan, count + hlen,
2006 msg->msg_flags & MSG_DONTWAIT);
2007 if (IS_ERR(skb))
2008 return skb;
2009
2010 skb->priority = priority;
2011
2012 /* Create L2CAP header */
2013 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2014 lh->cid = cpu_to_le16(chan->dcid);
2015 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2016 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2017
2018 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2019 if (unlikely(err < 0)) {
2020 kfree_skb(skb);
2021 return ERR_PTR(err);
2022 }
2023 return skb;
2024 }
2025
2026 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2027 struct msghdr *msg, size_t len,
2028 u32 priority)
2029 {
2030 struct l2cap_conn *conn = chan->conn;
2031 struct sk_buff *skb;
2032 int err, count;
2033 struct l2cap_hdr *lh;
2034
2035 BT_DBG("chan %p len %zu", chan, len);
2036
2037 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2038
2039 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2040 msg->msg_flags & MSG_DONTWAIT);
2041 if (IS_ERR(skb))
2042 return skb;
2043
2044 skb->priority = priority;
2045
2046 /* Create L2CAP header */
2047 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2048 lh->cid = cpu_to_le16(chan->dcid);
2049 lh->len = cpu_to_le16(len);
2050
2051 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2052 if (unlikely(err < 0)) {
2053 kfree_skb(skb);
2054 return ERR_PTR(err);
2055 }
2056 return skb;
2057 }
2058
2059 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2060 struct msghdr *msg, size_t len,
2061 u16 sdulen)
2062 {
2063 struct l2cap_conn *conn = chan->conn;
2064 struct sk_buff *skb;
2065 int err, count, hlen;
2066 struct l2cap_hdr *lh;
2067
2068 BT_DBG("chan %p len %zu", chan, len);
2069
2070 if (!conn)
2071 return ERR_PTR(-ENOTCONN);
2072
2073 hlen = __ertm_hdr_size(chan);
2074
2075 if (sdulen)
2076 hlen += L2CAP_SDULEN_SIZE;
2077
2078 if (chan->fcs == L2CAP_FCS_CRC16)
2079 hlen += L2CAP_FCS_SIZE;
2080
2081 count = min_t(unsigned int, (conn->mtu - hlen), len);
2082
2083 skb = chan->ops->alloc_skb(chan, count + hlen,
2084 msg->msg_flags & MSG_DONTWAIT);
2085 if (IS_ERR(skb))
2086 return skb;
2087
2088 /* Create L2CAP header */
2089 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2090 lh->cid = cpu_to_le16(chan->dcid);
2091 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2092
2093 /* Control header is populated later */
2094 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2095 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2096 else
2097 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2098
2099 if (sdulen)
2100 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2101
2102 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2103 if (unlikely(err < 0)) {
2104 kfree_skb(skb);
2105 return ERR_PTR(err);
2106 }
2107
2108 bt_cb(skb)->control.fcs = chan->fcs;
2109 bt_cb(skb)->control.retries = 0;
2110 return skb;
2111 }
2112
2113 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2114 struct sk_buff_head *seg_queue,
2115 struct msghdr *msg, size_t len)
2116 {
2117 struct sk_buff *skb;
2118 u16 sdu_len;
2119 size_t pdu_len;
2120 u8 sar;
2121
2122 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2123
2124 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2125 * so fragmented skbs are not used. The HCI layer's handling
2126 * of fragmented skbs is not compatible with ERTM's queueing.
2127 */
2128
2129 /* PDU size is derived from the HCI MTU */
2130 pdu_len = chan->conn->mtu;
2131
2132 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2133
2134 /* Adjust for largest possible L2CAP overhead. */
2135 if (chan->fcs)
2136 pdu_len -= L2CAP_FCS_SIZE;
2137
2138 pdu_len -= __ertm_hdr_size(chan);
2139
2140 /* Remote device may have requested smaller PDUs */
2141 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2142
2143 if (len <= pdu_len) {
2144 sar = L2CAP_SAR_UNSEGMENTED;
2145 sdu_len = 0;
2146 pdu_len = len;
2147 } else {
2148 sar = L2CAP_SAR_START;
2149 sdu_len = len;
2150 pdu_len -= L2CAP_SDULEN_SIZE;
2151 }
2152
2153 while (len > 0) {
2154 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2155
2156 if (IS_ERR(skb)) {
2157 __skb_queue_purge(seg_queue);
2158 return PTR_ERR(skb);
2159 }
2160
2161 bt_cb(skb)->control.sar = sar;
2162 __skb_queue_tail(seg_queue, skb);
2163
2164 len -= pdu_len;
2165 if (sdu_len) {
2166 sdu_len = 0;
2167 pdu_len += L2CAP_SDULEN_SIZE;
2168 }
2169
2170 if (len <= pdu_len) {
2171 sar = L2CAP_SAR_END;
2172 pdu_len = len;
2173 } else {
2174 sar = L2CAP_SAR_CONTINUE;
2175 }
2176 }
2177
2178 return 0;
2179 }
2180
2181 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2182 u32 priority)
2183 {
2184 struct sk_buff *skb;
2185 int err;
2186 struct sk_buff_head seg_queue;
2187
2188 /* Connectionless channel */
2189 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2190 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2191 if (IS_ERR(skb))
2192 return PTR_ERR(skb);
2193
2194 l2cap_do_send(chan, skb);
2195 return len;
2196 }
2197
2198 switch (chan->mode) {
2199 case L2CAP_MODE_BASIC:
2200 /* Check outgoing MTU */
2201 if (len > chan->omtu)
2202 return -EMSGSIZE;
2203
2204 /* Create a basic PDU */
2205 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2206 if (IS_ERR(skb))
2207 return PTR_ERR(skb);
2208
2209 l2cap_do_send(chan, skb);
2210 err = len;
2211 break;
2212
2213 case L2CAP_MODE_ERTM:
2214 case L2CAP_MODE_STREAMING:
2215 /* Check outgoing MTU */
2216 if (len > chan->omtu) {
2217 err = -EMSGSIZE;
2218 break;
2219 }
2220
2221 __skb_queue_head_init(&seg_queue);
2222
2223 /* Do segmentation before calling in to the state machine,
2224 * since it's possible to block while waiting for memory
2225 * allocation.
2226 */
2227 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2228
2229 /* The channel could have been closed while segmenting,
2230 * check that it is still connected.
2231 */
2232 if (chan->state != BT_CONNECTED) {
2233 __skb_queue_purge(&seg_queue);
2234 err = -ENOTCONN;
2235 }
2236
2237 if (err)
2238 break;
2239
2240 if (chan->mode == L2CAP_MODE_ERTM)
2241 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2242 else
2243 l2cap_streaming_send(chan, &seg_queue);
2244
2245 err = len;
2246
2247 /* If the skbs were not queued for sending, they'll still be in
2248 * seg_queue and need to be purged.
2249 */
2250 __skb_queue_purge(&seg_queue);
2251 break;
2252
2253 default:
2254 BT_DBG("bad state %1.1x", chan->mode);
2255 err = -EBADFD;
2256 }
2257
2258 return err;
2259 }
2260
2261 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2262 {
2263 struct l2cap_ctrl control;
2264 u16 seq;
2265
2266 BT_DBG("chan %p, txseq %u", chan, txseq);
2267
2268 memset(&control, 0, sizeof(control));
2269 control.sframe = 1;
2270 control.super = L2CAP_SUPER_SREJ;
2271
2272 for (seq = chan->expected_tx_seq; seq != txseq;
2273 seq = __next_seq(chan, seq)) {
2274 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2275 control.reqseq = seq;
2276 l2cap_send_sframe(chan, &control);
2277 l2cap_seq_list_append(&chan->srej_list, seq);
2278 }
2279 }
2280
2281 chan->expected_tx_seq = __next_seq(chan, txseq);
2282 }
2283
2284 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2285 {
2286 struct l2cap_ctrl control;
2287
2288 BT_DBG("chan %p", chan);
2289
2290 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2291 return;
2292
2293 memset(&control, 0, sizeof(control));
2294 control.sframe = 1;
2295 control.super = L2CAP_SUPER_SREJ;
2296 control.reqseq = chan->srej_list.tail;
2297 l2cap_send_sframe(chan, &control);
2298 }
2299
2300 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2301 {
2302 struct l2cap_ctrl control;
2303 u16 initial_head;
2304 u16 seq;
2305
2306 BT_DBG("chan %p, txseq %u", chan, txseq);
2307
2308 memset(&control, 0, sizeof(control));
2309 control.sframe = 1;
2310 control.super = L2CAP_SUPER_SREJ;
2311
2312 /* Capture initial list head to allow only one pass through the list. */
2313 initial_head = chan->srej_list.head;
2314
2315 do {
2316 seq = l2cap_seq_list_pop(&chan->srej_list);
2317 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2318 break;
2319
2320 control.reqseq = seq;
2321 l2cap_send_sframe(chan, &control);
2322 l2cap_seq_list_append(&chan->srej_list, seq);
2323 } while (chan->srej_list.head != initial_head);
2324 }
2325
2326 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2327 {
2328 struct sk_buff *acked_skb;
2329 u16 ackseq;
2330
2331 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2332
2333 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2334 return;
2335
2336 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2337 chan->expected_ack_seq, chan->unacked_frames);
2338
2339 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2340 ackseq = __next_seq(chan, ackseq)) {
2341
2342 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2343 if (acked_skb) {
2344 skb_unlink(acked_skb, &chan->tx_q);
2345 kfree_skb(acked_skb);
2346 chan->unacked_frames--;
2347 }
2348 }
2349
2350 chan->expected_ack_seq = reqseq;
2351
2352 if (chan->unacked_frames == 0)
2353 __clear_retrans_timer(chan);
2354
2355 BT_DBG("unacked_frames %u", chan->unacked_frames);
2356 }
2357
2358 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2359 {
2360 BT_DBG("chan %p", chan);
2361
2362 chan->expected_tx_seq = chan->buffer_seq;
2363 l2cap_seq_list_clear(&chan->srej_list);
2364 skb_queue_purge(&chan->srej_q);
2365 chan->rx_state = L2CAP_RX_STATE_RECV;
2366 }
2367
2368 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2369 struct l2cap_ctrl *control,
2370 struct sk_buff_head *skbs, u8 event)
2371 {
2372 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2373 event);
2374
2375 switch (event) {
2376 case L2CAP_EV_DATA_REQUEST:
2377 if (chan->tx_send_head == NULL)
2378 chan->tx_send_head = skb_peek(skbs);
2379
2380 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2381 l2cap_ertm_send(chan);
2382 break;
2383 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2384 BT_DBG("Enter LOCAL_BUSY");
2385 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2386
2387 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2388 /* The SREJ_SENT state must be aborted if we are to
2389 * enter the LOCAL_BUSY state.
2390 */
2391 l2cap_abort_rx_srej_sent(chan);
2392 }
2393
2394 l2cap_send_ack(chan);
2395
2396 break;
2397 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2398 BT_DBG("Exit LOCAL_BUSY");
2399 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2400
2401 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2402 struct l2cap_ctrl local_control;
2403
2404 memset(&local_control, 0, sizeof(local_control));
2405 local_control.sframe = 1;
2406 local_control.super = L2CAP_SUPER_RR;
2407 local_control.poll = 1;
2408 local_control.reqseq = chan->buffer_seq;
2409 l2cap_send_sframe(chan, &local_control);
2410
2411 chan->retry_count = 1;
2412 __set_monitor_timer(chan);
2413 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2414 }
2415 break;
2416 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2417 l2cap_process_reqseq(chan, control->reqseq);
2418 break;
2419 case L2CAP_EV_EXPLICIT_POLL:
2420 l2cap_send_rr_or_rnr(chan, 1);
2421 chan->retry_count = 1;
2422 __set_monitor_timer(chan);
2423 __clear_ack_timer(chan);
2424 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2425 break;
2426 case L2CAP_EV_RETRANS_TO:
2427 l2cap_send_rr_or_rnr(chan, 1);
2428 chan->retry_count = 1;
2429 __set_monitor_timer(chan);
2430 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2431 break;
2432 case L2CAP_EV_RECV_FBIT:
2433 /* Nothing to process */
2434 break;
2435 default:
2436 break;
2437 }
2438 }
2439
2440 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2441 struct l2cap_ctrl *control,
2442 struct sk_buff_head *skbs, u8 event)
2443 {
2444 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2445 event);
2446
2447 switch (event) {
2448 case L2CAP_EV_DATA_REQUEST:
2449 if (chan->tx_send_head == NULL)
2450 chan->tx_send_head = skb_peek(skbs);
2451 /* Queue data, but don't send. */
2452 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2453 break;
2454 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2455 BT_DBG("Enter LOCAL_BUSY");
2456 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2457
2458 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2459 /* The SREJ_SENT state must be aborted if we are to
2460 * enter the LOCAL_BUSY state.
2461 */
2462 l2cap_abort_rx_srej_sent(chan);
2463 }
2464
2465 l2cap_send_ack(chan);
2466
2467 break;
2468 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2469 BT_DBG("Exit LOCAL_BUSY");
2470 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2471
2472 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2473 struct l2cap_ctrl local_control;
2474 memset(&local_control, 0, sizeof(local_control));
2475 local_control.sframe = 1;
2476 local_control.super = L2CAP_SUPER_RR;
2477 local_control.poll = 1;
2478 local_control.reqseq = chan->buffer_seq;
2479 l2cap_send_sframe(chan, &local_control);
2480
2481 chan->retry_count = 1;
2482 __set_monitor_timer(chan);
2483 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2484 }
2485 break;
2486 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2487 l2cap_process_reqseq(chan, control->reqseq);
2488
2489 /* Fall through */
2490
2491 case L2CAP_EV_RECV_FBIT:
2492 if (control && control->final) {
2493 __clear_monitor_timer(chan);
2494 if (chan->unacked_frames > 0)
2495 __set_retrans_timer(chan);
2496 chan->retry_count = 0;
2497 chan->tx_state = L2CAP_TX_STATE_XMIT;
2498 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2499 }
2500 break;
2501 case L2CAP_EV_EXPLICIT_POLL:
2502 /* Ignore */
2503 break;
2504 case L2CAP_EV_MONITOR_TO:
2505 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2506 l2cap_send_rr_or_rnr(chan, 1);
2507 __set_monitor_timer(chan);
2508 chan->retry_count++;
2509 } else {
2510 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2511 }
2512 break;
2513 default:
2514 break;
2515 }
2516 }
2517
2518 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2519 struct sk_buff_head *skbs, u8 event)
2520 {
2521 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2522 chan, control, skbs, event, chan->tx_state);
2523
2524 switch (chan->tx_state) {
2525 case L2CAP_TX_STATE_XMIT:
2526 l2cap_tx_state_xmit(chan, control, skbs, event);
2527 break;
2528 case L2CAP_TX_STATE_WAIT_F:
2529 l2cap_tx_state_wait_f(chan, control, skbs, event);
2530 break;
2531 default:
2532 /* Ignore event */
2533 break;
2534 }
2535 }
2536
2537 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2538 struct l2cap_ctrl *control)
2539 {
2540 BT_DBG("chan %p, control %p", chan, control);
2541 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2542 }
2543
2544 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2545 struct l2cap_ctrl *control)
2546 {
2547 BT_DBG("chan %p, control %p", chan, control);
2548 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2549 }
2550
2551 /* Copy frame to all raw sockets on that connection */
2552 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2553 {
2554 struct sk_buff *nskb;
2555 struct l2cap_chan *chan;
2556
2557 BT_DBG("conn %p", conn);
2558
2559 mutex_lock(&conn->chan_lock);
2560
2561 list_for_each_entry(chan, &conn->chan_l, list) {
2562 struct sock *sk = chan->sk;
2563 if (chan->chan_type != L2CAP_CHAN_RAW)
2564 continue;
2565
2566 /* Don't send frame to the socket it came from */
2567 if (skb->sk == sk)
2568 continue;
2569 nskb = skb_clone(skb, GFP_ATOMIC);
2570 if (!nskb)
2571 continue;
2572
2573 if (chan->ops->recv(chan, nskb))
2574 kfree_skb(nskb);
2575 }
2576
2577 mutex_unlock(&conn->chan_lock);
2578 }
2579
2580 /* ---- L2CAP signalling commands ---- */
2581 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2582 u8 ident, u16 dlen, void *data)
2583 {
2584 struct sk_buff *skb, **frag;
2585 struct l2cap_cmd_hdr *cmd;
2586 struct l2cap_hdr *lh;
2587 int len, count;
2588
2589 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2590 conn, code, ident, dlen);
2591
2592 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2593 count = min_t(unsigned int, conn->mtu, len);
2594
2595 skb = bt_skb_alloc(count, GFP_ATOMIC);
2596 if (!skb)
2597 return NULL;
2598
2599 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2600 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2601
2602 if (conn->hcon->type == LE_LINK)
2603 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2604 else
2605 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2606
2607 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2608 cmd->code = code;
2609 cmd->ident = ident;
2610 cmd->len = cpu_to_le16(dlen);
2611
2612 if (dlen) {
2613 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2614 memcpy(skb_put(skb, count), data, count);
2615 data += count;
2616 }
2617
2618 len -= skb->len;
2619
2620 /* Continuation fragments (no L2CAP header) */
2621 frag = &skb_shinfo(skb)->frag_list;
2622 while (len) {
2623 count = min_t(unsigned int, conn->mtu, len);
2624
2625 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2626 if (!*frag)
2627 goto fail;
2628
2629 memcpy(skb_put(*frag, count), data, count);
2630
2631 len -= count;
2632 data += count;
2633
2634 frag = &(*frag)->next;
2635 }
2636
2637 return skb;
2638
2639 fail:
2640 kfree_skb(skb);
2641 return NULL;
2642 }
2643
2644 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2645 {
2646 struct l2cap_conf_opt *opt = *ptr;
2647 int len;
2648
2649 len = L2CAP_CONF_OPT_SIZE + opt->len;
2650 *ptr += len;
2651
2652 *type = opt->type;
2653 *olen = opt->len;
2654
2655 switch (opt->len) {
2656 case 1:
2657 *val = *((u8 *) opt->val);
2658 break;
2659
2660 case 2:
2661 *val = get_unaligned_le16(opt->val);
2662 break;
2663
2664 case 4:
2665 *val = get_unaligned_le32(opt->val);
2666 break;
2667
2668 default:
2669 *val = (unsigned long) opt->val;
2670 break;
2671 }
2672
2673 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2674 return len;
2675 }
2676
2677 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2678 {
2679 struct l2cap_conf_opt *opt = *ptr;
2680
2681 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2682
2683 opt->type = type;
2684 opt->len = len;
2685
2686 switch (len) {
2687 case 1:
2688 *((u8 *) opt->val) = val;
2689 break;
2690
2691 case 2:
2692 put_unaligned_le16(val, opt->val);
2693 break;
2694
2695 case 4:
2696 put_unaligned_le32(val, opt->val);
2697 break;
2698
2699 default:
2700 memcpy(opt->val, (void *) val, len);
2701 break;
2702 }
2703
2704 *ptr += L2CAP_CONF_OPT_SIZE + len;
2705 }
2706
2707 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2708 {
2709 struct l2cap_conf_efs efs;
2710
2711 switch (chan->mode) {
2712 case L2CAP_MODE_ERTM:
2713 efs.id = chan->local_id;
2714 efs.stype = chan->local_stype;
2715 efs.msdu = cpu_to_le16(chan->local_msdu);
2716 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2717 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2718 efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2719 break;
2720
2721 case L2CAP_MODE_STREAMING:
2722 efs.id = 1;
2723 efs.stype = L2CAP_SERV_BESTEFFORT;
2724 efs.msdu = cpu_to_le16(chan->local_msdu);
2725 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2726 efs.acc_lat = 0;
2727 efs.flush_to = 0;
2728 break;
2729
2730 default:
2731 return;
2732 }
2733
2734 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2735 (unsigned long) &efs);
2736 }
2737
2738 static void l2cap_ack_timeout(struct work_struct *work)
2739 {
2740 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2741 ack_timer.work);
2742 u16 frames_to_ack;
2743
2744 BT_DBG("chan %p", chan);
2745
2746 l2cap_chan_lock(chan);
2747
2748 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2749 chan->last_acked_seq);
2750
2751 if (frames_to_ack)
2752 l2cap_send_rr_or_rnr(chan, 0);
2753
2754 l2cap_chan_unlock(chan);
2755 l2cap_chan_put(chan);
2756 }
2757
2758 int l2cap_ertm_init(struct l2cap_chan *chan)
2759 {
2760 int err;
2761
2762 chan->next_tx_seq = 0;
2763 chan->expected_tx_seq = 0;
2764 chan->expected_ack_seq = 0;
2765 chan->unacked_frames = 0;
2766 chan->buffer_seq = 0;
2767 chan->frames_sent = 0;
2768 chan->last_acked_seq = 0;
2769 chan->sdu = NULL;
2770 chan->sdu_last_frag = NULL;
2771 chan->sdu_len = 0;
2772
2773 skb_queue_head_init(&chan->tx_q);
2774
2775 if (chan->mode != L2CAP_MODE_ERTM)
2776 return 0;
2777
2778 chan->rx_state = L2CAP_RX_STATE_RECV;
2779 chan->tx_state = L2CAP_TX_STATE_XMIT;
2780
2781 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2782 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2783 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2784
2785 skb_queue_head_init(&chan->srej_q);
2786
2787 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2788 if (err < 0)
2789 return err;
2790
2791 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2792 if (err < 0)
2793 l2cap_seq_list_free(&chan->srej_list);
2794
2795 return err;
2796 }
2797
2798 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2799 {
2800 switch (mode) {
2801 case L2CAP_MODE_STREAMING:
2802 case L2CAP_MODE_ERTM:
2803 if (l2cap_mode_supported(mode, remote_feat_mask))
2804 return mode;
2805 /* fall through */
2806 default:
2807 return L2CAP_MODE_BASIC;
2808 }
2809 }
2810
2811 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2812 {
2813 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2814 }
2815
2816 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2817 {
2818 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2819 }
2820
2821 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2822 {
2823 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2824 __l2cap_ews_supported(chan)) {
2825 /* use extended control field */
2826 set_bit(FLAG_EXT_CTRL, &chan->flags);
2827 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2828 } else {
2829 chan->tx_win = min_t(u16, chan->tx_win,
2830 L2CAP_DEFAULT_TX_WINDOW);
2831 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2832 }
2833 chan->ack_win = chan->tx_win;
2834 }
2835
2836 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2837 {
2838 struct l2cap_conf_req *req = data;
2839 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2840 void *ptr = req->data;
2841 u16 size;
2842
2843 BT_DBG("chan %p", chan);
2844
2845 if (chan->num_conf_req || chan->num_conf_rsp)
2846 goto done;
2847
2848 switch (chan->mode) {
2849 case L2CAP_MODE_STREAMING:
2850 case L2CAP_MODE_ERTM:
2851 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2852 break;
2853
2854 if (__l2cap_efs_supported(chan))
2855 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2856
2857 /* fall through */
2858 default:
2859 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2860 break;
2861 }
2862
2863 done:
2864 if (chan->imtu != L2CAP_DEFAULT_MTU)
2865 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2866
2867 switch (chan->mode) {
2868 case L2CAP_MODE_BASIC:
2869 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2870 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2871 break;
2872
2873 rfc.mode = L2CAP_MODE_BASIC;
2874 rfc.txwin_size = 0;
2875 rfc.max_transmit = 0;
2876 rfc.retrans_timeout = 0;
2877 rfc.monitor_timeout = 0;
2878 rfc.max_pdu_size = 0;
2879
2880 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2881 (unsigned long) &rfc);
2882 break;
2883
2884 case L2CAP_MODE_ERTM:
2885 rfc.mode = L2CAP_MODE_ERTM;
2886 rfc.max_transmit = chan->max_tx;
2887 rfc.retrans_timeout = 0;
2888 rfc.monitor_timeout = 0;
2889
2890 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2891 L2CAP_EXT_HDR_SIZE -
2892 L2CAP_SDULEN_SIZE -
2893 L2CAP_FCS_SIZE);
2894 rfc.max_pdu_size = cpu_to_le16(size);
2895
2896 l2cap_txwin_setup(chan);
2897
2898 rfc.txwin_size = min_t(u16, chan->tx_win,
2899 L2CAP_DEFAULT_TX_WINDOW);
2900
2901 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2902 (unsigned long) &rfc);
2903
2904 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2905 l2cap_add_opt_efs(&ptr, chan);
2906
2907 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2908 break;
2909
2910 if (chan->fcs == L2CAP_FCS_NONE ||
2911 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2912 chan->fcs = L2CAP_FCS_NONE;
2913 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2914 }
2915
2916 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2917 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2918 chan->tx_win);
2919 break;
2920
2921 case L2CAP_MODE_STREAMING:
2922 l2cap_txwin_setup(chan);
2923 rfc.mode = L2CAP_MODE_STREAMING;
2924 rfc.txwin_size = 0;
2925 rfc.max_transmit = 0;
2926 rfc.retrans_timeout = 0;
2927 rfc.monitor_timeout = 0;
2928
2929 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2930 L2CAP_EXT_HDR_SIZE -
2931 L2CAP_SDULEN_SIZE -
2932 L2CAP_FCS_SIZE);
2933 rfc.max_pdu_size = cpu_to_le16(size);
2934
2935 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2936 (unsigned long) &rfc);
2937
2938 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2939 l2cap_add_opt_efs(&ptr, chan);
2940
2941 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2942 break;
2943
2944 if (chan->fcs == L2CAP_FCS_NONE ||
2945 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2946 chan->fcs = L2CAP_FCS_NONE;
2947 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2948 }
2949 break;
2950 }
2951
2952 req->dcid = cpu_to_le16(chan->dcid);
2953 req->flags = __constant_cpu_to_le16(0);
2954
2955 return ptr - data;
2956 }
2957
2958 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2959 {
2960 struct l2cap_conf_rsp *rsp = data;
2961 void *ptr = rsp->data;
2962 void *req = chan->conf_req;
2963 int len = chan->conf_len;
2964 int type, hint, olen;
2965 unsigned long val;
2966 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2967 struct l2cap_conf_efs efs;
2968 u8 remote_efs = 0;
2969 u16 mtu = L2CAP_DEFAULT_MTU;
2970 u16 result = L2CAP_CONF_SUCCESS;
2971 u16 size;
2972
2973 BT_DBG("chan %p", chan);
2974
2975 while (len >= L2CAP_CONF_OPT_SIZE) {
2976 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2977
2978 hint = type & L2CAP_CONF_HINT;
2979 type &= L2CAP_CONF_MASK;
2980
2981 switch (type) {
2982 case L2CAP_CONF_MTU:
2983 mtu = val;
2984 break;
2985
2986 case L2CAP_CONF_FLUSH_TO:
2987 chan->flush_to = val;
2988 break;
2989
2990 case L2CAP_CONF_QOS:
2991 break;
2992
2993 case L2CAP_CONF_RFC:
2994 if (olen == sizeof(rfc))
2995 memcpy(&rfc, (void *) val, olen);
2996 break;
2997
2998 case L2CAP_CONF_FCS:
2999 if (val == L2CAP_FCS_NONE)
3000 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
3001 break;
3002
3003 case L2CAP_CONF_EFS:
3004 remote_efs = 1;
3005 if (olen == sizeof(efs))
3006 memcpy(&efs, (void *) val, olen);
3007 break;
3008
3009 case L2CAP_CONF_EWS:
3010 if (!enable_hs)
3011 return -ECONNREFUSED;
3012
3013 set_bit(FLAG_EXT_CTRL, &chan->flags);
3014 set_bit(CONF_EWS_RECV, &chan->conf_state);
3015 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3016 chan->remote_tx_win = val;
3017 break;
3018
3019 default:
3020 if (hint)
3021 break;
3022
3023 result = L2CAP_CONF_UNKNOWN;
3024 *((u8 *) ptr++) = type;
3025 break;
3026 }
3027 }
3028
3029 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3030 goto done;
3031
3032 switch (chan->mode) {
3033 case L2CAP_MODE_STREAMING:
3034 case L2CAP_MODE_ERTM:
3035 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3036 chan->mode = l2cap_select_mode(rfc.mode,
3037 chan->conn->feat_mask);
3038 break;
3039 }
3040
3041 if (remote_efs) {
3042 if (__l2cap_efs_supported(chan))
3043 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3044 else
3045 return -ECONNREFUSED;
3046 }
3047
3048 if (chan->mode != rfc.mode)
3049 return -ECONNREFUSED;
3050
3051 break;
3052 }
3053
3054 done:
3055 if (chan->mode != rfc.mode) {
3056 result = L2CAP_CONF_UNACCEPT;
3057 rfc.mode = chan->mode;
3058
3059 if (chan->num_conf_rsp == 1)
3060 return -ECONNREFUSED;
3061
3062 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3063 sizeof(rfc), (unsigned long) &rfc);
3064 }
3065
3066 if (result == L2CAP_CONF_SUCCESS) {
3067 /* Configure output options and let the other side know
3068 * which ones we don't like. */
3069
3070 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3071 result = L2CAP_CONF_UNACCEPT;
3072 else {
3073 chan->omtu = mtu;
3074 set_bit(CONF_MTU_DONE, &chan->conf_state);
3075 }
3076 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3077
3078 if (remote_efs) {
3079 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3080 efs.stype != L2CAP_SERV_NOTRAFIC &&
3081 efs.stype != chan->local_stype) {
3082
3083 result = L2CAP_CONF_UNACCEPT;
3084
3085 if (chan->num_conf_req >= 1)
3086 return -ECONNREFUSED;
3087
3088 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3089 sizeof(efs),
3090 (unsigned long) &efs);
3091 } else {
3092 /* Send PENDING Conf Rsp */
3093 result = L2CAP_CONF_PENDING;
3094 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3095 }
3096 }
3097
3098 switch (rfc.mode) {
3099 case L2CAP_MODE_BASIC:
3100 chan->fcs = L2CAP_FCS_NONE;
3101 set_bit(CONF_MODE_DONE, &chan->conf_state);
3102 break;
3103
3104 case L2CAP_MODE_ERTM:
3105 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3106 chan->remote_tx_win = rfc.txwin_size;
3107 else
3108 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3109
3110 chan->remote_max_tx = rfc.max_transmit;
3111
3112 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3113 chan->conn->mtu -
3114 L2CAP_EXT_HDR_SIZE -
3115 L2CAP_SDULEN_SIZE -
3116 L2CAP_FCS_SIZE);
3117 rfc.max_pdu_size = cpu_to_le16(size);
3118 chan->remote_mps = size;
3119
3120 rfc.retrans_timeout =
3121 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3122 rfc.monitor_timeout =
3123 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3124
3125 set_bit(CONF_MODE_DONE, &chan->conf_state);
3126
3127 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3128 sizeof(rfc), (unsigned long) &rfc);
3129
3130 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3131 chan->remote_id = efs.id;
3132 chan->remote_stype = efs.stype;
3133 chan->remote_msdu = le16_to_cpu(efs.msdu);
3134 chan->remote_flush_to =
3135 le32_to_cpu(efs.flush_to);
3136 chan->remote_acc_lat =
3137 le32_to_cpu(efs.acc_lat);
3138 chan->remote_sdu_itime =
3139 le32_to_cpu(efs.sdu_itime);
3140 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3141 sizeof(efs), (unsigned long) &efs);
3142 }
3143 break;
3144
3145 case L2CAP_MODE_STREAMING:
3146 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3147 chan->conn->mtu -
3148 L2CAP_EXT_HDR_SIZE -
3149 L2CAP_SDULEN_SIZE -
3150 L2CAP_FCS_SIZE);
3151 rfc.max_pdu_size = cpu_to_le16(size);
3152 chan->remote_mps = size;
3153
3154 set_bit(CONF_MODE_DONE, &chan->conf_state);
3155
3156 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3157 sizeof(rfc), (unsigned long) &rfc);
3158
3159 break;
3160
3161 default:
3162 result = L2CAP_CONF_UNACCEPT;
3163
3164 memset(&rfc, 0, sizeof(rfc));
3165 rfc.mode = chan->mode;
3166 }
3167
3168 if (result == L2CAP_CONF_SUCCESS)
3169 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3170 }
3171 rsp->scid = cpu_to_le16(chan->dcid);
3172 rsp->result = cpu_to_le16(result);
3173 rsp->flags = __constant_cpu_to_le16(0);
3174
3175 return ptr - data;
3176 }
3177
3178 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3179 {
3180 struct l2cap_conf_req *req = data;
3181 void *ptr = req->data;
3182 int type, olen;
3183 unsigned long val;
3184 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3185 struct l2cap_conf_efs efs;
3186
3187 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3188
3189 while (len >= L2CAP_CONF_OPT_SIZE) {
3190 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3191
3192 switch (type) {
3193 case L2CAP_CONF_MTU:
3194 if (val < L2CAP_DEFAULT_MIN_MTU) {
3195 *result = L2CAP_CONF_UNACCEPT;
3196 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3197 } else
3198 chan->imtu = val;
3199 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3200 break;
3201
3202 case L2CAP_CONF_FLUSH_TO:
3203 chan->flush_to = val;
3204 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3205 2, chan->flush_to);
3206 break;
3207
3208 case L2CAP_CONF_RFC:
3209 if (olen == sizeof(rfc))
3210 memcpy(&rfc, (void *)val, olen);
3211
3212 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3213 rfc.mode != chan->mode)
3214 return -ECONNREFUSED;
3215
3216 chan->fcs = 0;
3217
3218 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3219 sizeof(rfc), (unsigned long) &rfc);
3220 break;
3221
3222 case L2CAP_CONF_EWS:
3223 chan->ack_win = min_t(u16, val, chan->ack_win);
3224 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3225 chan->tx_win);
3226 break;
3227
3228 case L2CAP_CONF_EFS:
3229 if (olen == sizeof(efs))
3230 memcpy(&efs, (void *)val, olen);
3231
3232 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3233 efs.stype != L2CAP_SERV_NOTRAFIC &&
3234 efs.stype != chan->local_stype)
3235 return -ECONNREFUSED;
3236
3237 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3238 sizeof(efs), (unsigned long) &efs);
3239 break;
3240 }
3241 }
3242
3243 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3244 return -ECONNREFUSED;
3245
3246 chan->mode = rfc.mode;
3247
3248 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3249 switch (rfc.mode) {
3250 case L2CAP_MODE_ERTM:
3251 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3252 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3253 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3254 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3255 chan->ack_win = min_t(u16, chan->ack_win,
3256 rfc.txwin_size);
3257
3258 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3259 chan->local_msdu = le16_to_cpu(efs.msdu);
3260 chan->local_sdu_itime =
3261 le32_to_cpu(efs.sdu_itime);
3262 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3263 chan->local_flush_to =
3264 le32_to_cpu(efs.flush_to);
3265 }
3266 break;
3267
3268 case L2CAP_MODE_STREAMING:
3269 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3270 }
3271 }
3272
3273 req->dcid = cpu_to_le16(chan->dcid);
3274 req->flags = __constant_cpu_to_le16(0);
3275
3276 return ptr - data;
3277 }
3278
3279 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3280 {
3281 struct l2cap_conf_rsp *rsp = data;
3282 void *ptr = rsp->data;
3283
3284 BT_DBG("chan %p", chan);
3285
3286 rsp->scid = cpu_to_le16(chan->dcid);
3287 rsp->result = cpu_to_le16(result);
3288 rsp->flags = cpu_to_le16(flags);
3289
3290 return ptr - data;
3291 }
3292
3293 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3294 {
3295 struct l2cap_conn_rsp rsp;
3296 struct l2cap_conn *conn = chan->conn;
3297 u8 buf[128];
3298
3299 rsp.scid = cpu_to_le16(chan->dcid);
3300 rsp.dcid = cpu_to_le16(chan->scid);
3301 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3302 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3303 l2cap_send_cmd(conn, chan->ident,
3304 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3305
3306 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3307 return;
3308
3309 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3310 l2cap_build_conf_req(chan, buf), buf);
3311 chan->num_conf_req++;
3312 }
3313
3314 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3315 {
3316 int type, olen;
3317 unsigned long val;
3318 /* Use sane default values in case a misbehaving remote device
3319 * did not send an RFC or extended window size option.
3320 */
3321 u16 txwin_ext = chan->ack_win;
3322 struct l2cap_conf_rfc rfc = {
3323 .mode = chan->mode,
3324 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3325 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3326 .max_pdu_size = cpu_to_le16(chan->imtu),
3327 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3328 };
3329
3330 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3331
3332 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3333 return;
3334
3335 while (len >= L2CAP_CONF_OPT_SIZE) {
3336 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3337
3338 switch (type) {
3339 case L2CAP_CONF_RFC:
3340 if (olen == sizeof(rfc))
3341 memcpy(&rfc, (void *)val, olen);
3342 break;
3343 case L2CAP_CONF_EWS:
3344 txwin_ext = val;
3345 break;
3346 }
3347 }
3348
3349 switch (rfc.mode) {
3350 case L2CAP_MODE_ERTM:
3351 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3352 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3353 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3354 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3355 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3356 else
3357 chan->ack_win = min_t(u16, chan->ack_win,
3358 rfc.txwin_size);
3359 break;
3360 case L2CAP_MODE_STREAMING:
3361 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3362 }
3363 }
3364
3365 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3366 {
3367 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3368
3369 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3370 return 0;
3371
3372 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3373 cmd->ident == conn->info_ident) {
3374 cancel_delayed_work(&conn->info_timer);
3375
3376 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3377 conn->info_ident = 0;
3378
3379 l2cap_conn_start(conn);
3380 }
3381
3382 return 0;
3383 }
3384
3385 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3386 {
3387 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3388 struct l2cap_conn_rsp rsp;
3389 struct l2cap_chan *chan = NULL, *pchan;
3390 struct sock *parent, *sk = NULL;
3391 int result, status = L2CAP_CS_NO_INFO;
3392
3393 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3394 __le16 psm = req->psm;
3395
3396 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3397
3398 /* Check if we have socket listening on psm */
3399 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3400 if (!pchan) {
3401 result = L2CAP_CR_BAD_PSM;
3402 goto sendresp;
3403 }
3404
3405 parent = pchan->sk;
3406
3407 mutex_lock(&conn->chan_lock);
3408 lock_sock(parent);
3409
3410 /* Check if the ACL is secure enough (if not SDP) */
3411 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3412 !hci_conn_check_link_mode(conn->hcon)) {
3413 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3414 result = L2CAP_CR_SEC_BLOCK;
3415 goto response;
3416 }
3417
3418 result = L2CAP_CR_NO_MEM;
3419
3420 /* Check if we already have channel with that dcid */
3421 if (__l2cap_get_chan_by_dcid(conn, scid))
3422 goto response;
3423
3424 chan = pchan->ops->new_connection(pchan);
3425 if (!chan)
3426 goto response;
3427
3428 sk = chan->sk;
3429
3430 hci_conn_hold(conn->hcon);
3431
3432 bacpy(&bt_sk(sk)->src, conn->src);
3433 bacpy(&bt_sk(sk)->dst, conn->dst);
3434 chan->psm = psm;
3435 chan->dcid = scid;
3436
3437 bt_accept_enqueue(parent, sk);
3438
3439 __l2cap_chan_add(conn, chan);
3440
3441 dcid = chan->scid;
3442
3443 __set_chan_timer(chan, sk->sk_sndtimeo);
3444
3445 chan->ident = cmd->ident;
3446
3447 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3448 if (l2cap_chan_check_security(chan)) {
3449 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3450 __l2cap_state_change(chan, BT_CONNECT2);
3451 result = L2CAP_CR_PEND;
3452 status = L2CAP_CS_AUTHOR_PEND;
3453 parent->sk_data_ready(parent, 0);
3454 } else {
3455 __l2cap_state_change(chan, BT_CONFIG);
3456 result = L2CAP_CR_SUCCESS;
3457 status = L2CAP_CS_NO_INFO;
3458 }
3459 } else {
3460 __l2cap_state_change(chan, BT_CONNECT2);
3461 result = L2CAP_CR_PEND;
3462 status = L2CAP_CS_AUTHEN_PEND;
3463 }
3464 } else {
3465 __l2cap_state_change(chan, BT_CONNECT2);
3466 result = L2CAP_CR_PEND;
3467 status = L2CAP_CS_NO_INFO;
3468 }
3469
3470 response:
3471 release_sock(parent);
3472 mutex_unlock(&conn->chan_lock);
3473
3474 sendresp:
3475 rsp.scid = cpu_to_le16(scid);
3476 rsp.dcid = cpu_to_le16(dcid);
3477 rsp.result = cpu_to_le16(result);
3478 rsp.status = cpu_to_le16(status);
3479 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3480
3481 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3482 struct l2cap_info_req info;
3483 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3484
3485 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3486 conn->info_ident = l2cap_get_ident(conn);
3487
3488 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3489
3490 l2cap_send_cmd(conn, conn->info_ident,
3491 L2CAP_INFO_REQ, sizeof(info), &info);
3492 }
3493
3494 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3495 result == L2CAP_CR_SUCCESS) {
3496 u8 buf[128];
3497 set_bit(CONF_REQ_SENT, &chan->conf_state);
3498 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3499 l2cap_build_conf_req(chan, buf), buf);
3500 chan->num_conf_req++;
3501 }
3502
3503 return 0;
3504 }
3505
3506 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3507 {
3508 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3509 u16 scid, dcid, result, status;
3510 struct l2cap_chan *chan;
3511 u8 req[128];
3512 int err;
3513
3514 scid = __le16_to_cpu(rsp->scid);
3515 dcid = __le16_to_cpu(rsp->dcid);
3516 result = __le16_to_cpu(rsp->result);
3517 status = __le16_to_cpu(rsp->status);
3518
3519 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3520 dcid, scid, result, status);
3521
3522 mutex_lock(&conn->chan_lock);
3523
3524 if (scid) {
3525 chan = __l2cap_get_chan_by_scid(conn, scid);
3526 if (!chan) {
3527 err = -EFAULT;
3528 goto unlock;
3529 }
3530 } else {
3531 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3532 if (!chan) {
3533 err = -EFAULT;
3534 goto unlock;
3535 }
3536 }
3537
3538 err = 0;
3539
3540 l2cap_chan_lock(chan);
3541
3542 switch (result) {
3543 case L2CAP_CR_SUCCESS:
3544 l2cap_state_change(chan, BT_CONFIG);
3545 chan->ident = 0;
3546 chan->dcid = dcid;
3547 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3548
3549 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3550 break;
3551
3552 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3553 l2cap_build_conf_req(chan, req), req);
3554 chan->num_conf_req++;
3555 break;
3556
3557 case L2CAP_CR_PEND:
3558 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3559 break;
3560
3561 default:
3562 l2cap_chan_del(chan, ECONNREFUSED);
3563 break;
3564 }
3565
3566 l2cap_chan_unlock(chan);
3567
3568 unlock:
3569 mutex_unlock(&conn->chan_lock);
3570
3571 return err;
3572 }
3573
3574 static inline void set_default_fcs(struct l2cap_chan *chan)
3575 {
3576 /* FCS is enabled only in ERTM or streaming mode, if one or both
3577 * sides request it.
3578 */
3579 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3580 chan->fcs = L2CAP_FCS_NONE;
3581 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3582 chan->fcs = L2CAP_FCS_CRC16;
3583 }
3584
3585 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3586 {
3587 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3588 u16 dcid, flags;
3589 u8 rsp[64];
3590 struct l2cap_chan *chan;
3591 int len, err = 0;
3592
3593 dcid = __le16_to_cpu(req->dcid);
3594 flags = __le16_to_cpu(req->flags);
3595
3596 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3597
3598 chan = l2cap_get_chan_by_scid(conn, dcid);
3599 if (!chan)
3600 return -ENOENT;
3601
3602 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3603 struct l2cap_cmd_rej_cid rej;
3604
3605 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3606 rej.scid = cpu_to_le16(chan->scid);
3607 rej.dcid = cpu_to_le16(chan->dcid);
3608
3609 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3610 sizeof(rej), &rej);
3611 goto unlock;
3612 }
3613
3614 /* Reject if config buffer is too small. */
3615 len = cmd_len - sizeof(*req);
3616 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3617 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3618 l2cap_build_conf_rsp(chan, rsp,
3619 L2CAP_CONF_REJECT, flags), rsp);
3620 goto unlock;
3621 }
3622
3623 /* Store config. */
3624 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3625 chan->conf_len += len;
3626
3627 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3628 /* Incomplete config. Send empty response. */
3629 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3630 l2cap_build_conf_rsp(chan, rsp,
3631 L2CAP_CONF_SUCCESS, flags), rsp);
3632 goto unlock;
3633 }
3634
3635 /* Complete config. */
3636 len = l2cap_parse_conf_req(chan, rsp);
3637 if (len < 0) {
3638 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3639 goto unlock;
3640 }
3641
3642 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3643 chan->num_conf_rsp++;
3644
3645 /* Reset config buffer. */
3646 chan->conf_len = 0;
3647
3648 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3649 goto unlock;
3650
3651 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3652 set_default_fcs(chan);
3653
3654 if (chan->mode == L2CAP_MODE_ERTM ||
3655 chan->mode == L2CAP_MODE_STREAMING)
3656 err = l2cap_ertm_init(chan);
3657
3658 if (err < 0)
3659 l2cap_send_disconn_req(chan->conn, chan, -err);
3660 else
3661 l2cap_chan_ready(chan);
3662
3663 goto unlock;
3664 }
3665
3666 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3667 u8 buf[64];
3668 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3669 l2cap_build_conf_req(chan, buf), buf);
3670 chan->num_conf_req++;
3671 }
3672
3673 /* Got Conf Rsp PENDING from remote side and asume we sent
3674 Conf Rsp PENDING in the code above */
3675 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3676 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3677
3678 /* check compatibility */
3679
3680 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3681 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3682
3683 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3684 l2cap_build_conf_rsp(chan, rsp,
3685 L2CAP_CONF_SUCCESS, flags), rsp);
3686 }
3687
3688 unlock:
3689 l2cap_chan_unlock(chan);
3690 return err;
3691 }
3692
3693 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3694 {
3695 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3696 u16 scid, flags, result;
3697 struct l2cap_chan *chan;
3698 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3699 int err = 0;
3700
3701 scid = __le16_to_cpu(rsp->scid);
3702 flags = __le16_to_cpu(rsp->flags);
3703 result = __le16_to_cpu(rsp->result);
3704
3705 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3706 result, len);
3707
3708 chan = l2cap_get_chan_by_scid(conn, scid);
3709 if (!chan)
3710 return 0;
3711
3712 switch (result) {
3713 case L2CAP_CONF_SUCCESS:
3714 l2cap_conf_rfc_get(chan, rsp->data, len);
3715 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3716 break;
3717
3718 case L2CAP_CONF_PENDING:
3719 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3720
3721 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3722 char buf[64];
3723
3724 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3725 buf, &result);
3726 if (len < 0) {
3727 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3728 goto done;
3729 }
3730
3731 /* check compatibility */
3732
3733 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3734 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3735
3736 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3737 l2cap_build_conf_rsp(chan, buf,
3738 L2CAP_CONF_SUCCESS, 0x0000), buf);
3739 }
3740 goto done;
3741
3742 case L2CAP_CONF_UNACCEPT:
3743 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3744 char req[64];
3745
3746 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3747 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3748 goto done;
3749 }
3750
3751 /* throw out any old stored conf requests */
3752 result = L2CAP_CONF_SUCCESS;
3753 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3754 req, &result);
3755 if (len < 0) {
3756 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3757 goto done;
3758 }
3759
3760 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3761 L2CAP_CONF_REQ, len, req);
3762 chan->num_conf_req++;
3763 if (result != L2CAP_CONF_SUCCESS)
3764 goto done;
3765 break;
3766 }
3767
3768 default:
3769 l2cap_chan_set_err(chan, ECONNRESET);
3770
3771 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3772 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3773 goto done;
3774 }
3775
3776 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3777 goto done;
3778
3779 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3780
3781 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3782 set_default_fcs(chan);
3783
3784 if (chan->mode == L2CAP_MODE_ERTM ||
3785 chan->mode == L2CAP_MODE_STREAMING)
3786 err = l2cap_ertm_init(chan);
3787
3788 if (err < 0)
3789 l2cap_send_disconn_req(chan->conn, chan, -err);
3790 else
3791 l2cap_chan_ready(chan);
3792 }
3793
3794 done:
3795 l2cap_chan_unlock(chan);
3796 return err;
3797 }
3798
3799 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3800 {
3801 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3802 struct l2cap_disconn_rsp rsp;
3803 u16 dcid, scid;
3804 struct l2cap_chan *chan;
3805 struct sock *sk;
3806
3807 scid = __le16_to_cpu(req->scid);
3808 dcid = __le16_to_cpu(req->dcid);
3809
3810 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3811
3812 mutex_lock(&conn->chan_lock);
3813
3814 chan = __l2cap_get_chan_by_scid(conn, dcid);
3815 if (!chan) {
3816 mutex_unlock(&conn->chan_lock);
3817 return 0;
3818 }
3819
3820 l2cap_chan_lock(chan);
3821
3822 sk = chan->sk;
3823
3824 rsp.dcid = cpu_to_le16(chan->scid);
3825 rsp.scid = cpu_to_le16(chan->dcid);
3826 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3827
3828 lock_sock(sk);
3829 sk->sk_shutdown = SHUTDOWN_MASK;
3830 release_sock(sk);
3831
3832 l2cap_chan_hold(chan);
3833 l2cap_chan_del(chan, ECONNRESET);
3834
3835 l2cap_chan_unlock(chan);
3836
3837 chan->ops->close(chan);
3838 l2cap_chan_put(chan);
3839
3840 mutex_unlock(&conn->chan_lock);
3841
3842 return 0;
3843 }
3844
3845 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3846 {
3847 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3848 u16 dcid, scid;
3849 struct l2cap_chan *chan;
3850
3851 scid = __le16_to_cpu(rsp->scid);
3852 dcid = __le16_to_cpu(rsp->dcid);
3853
3854 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3855
3856 mutex_lock(&conn->chan_lock);
3857
3858 chan = __l2cap_get_chan_by_scid(conn, scid);
3859 if (!chan) {
3860 mutex_unlock(&conn->chan_lock);
3861 return 0;
3862 }
3863
3864 l2cap_chan_lock(chan);
3865
3866 l2cap_chan_hold(chan);
3867 l2cap_chan_del(chan, 0);
3868
3869 l2cap_chan_unlock(chan);
3870
3871 chan->ops->close(chan);
3872 l2cap_chan_put(chan);
3873
3874 mutex_unlock(&conn->chan_lock);
3875
3876 return 0;
3877 }
3878
3879 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3880 {
3881 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3882 u16 type;
3883
3884 type = __le16_to_cpu(req->type);
3885
3886 BT_DBG("type 0x%4.4x", type);
3887
3888 if (type == L2CAP_IT_FEAT_MASK) {
3889 u8 buf[8];
3890 u32 feat_mask = l2cap_feat_mask;
3891 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3892 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3893 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3894 if (!disable_ertm)
3895 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3896 | L2CAP_FEAT_FCS;
3897 if (enable_hs)
3898 feat_mask |= L2CAP_FEAT_EXT_FLOW
3899 | L2CAP_FEAT_EXT_WINDOW;
3900
3901 put_unaligned_le32(feat_mask, rsp->data);
3902 l2cap_send_cmd(conn, cmd->ident,
3903 L2CAP_INFO_RSP, sizeof(buf), buf);
3904 } else if (type == L2CAP_IT_FIXED_CHAN) {
3905 u8 buf[12];
3906 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3907
3908 if (enable_hs)
3909 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3910 else
3911 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3912
3913 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3914 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3915 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3916 l2cap_send_cmd(conn, cmd->ident,
3917 L2CAP_INFO_RSP, sizeof(buf), buf);
3918 } else {
3919 struct l2cap_info_rsp rsp;
3920 rsp.type = cpu_to_le16(type);
3921 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3922 l2cap_send_cmd(conn, cmd->ident,
3923 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3924 }
3925
3926 return 0;
3927 }
3928
3929 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3930 {
3931 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3932 u16 type, result;
3933
3934 type = __le16_to_cpu(rsp->type);
3935 result = __le16_to_cpu(rsp->result);
3936
3937 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3938
3939 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3940 if (cmd->ident != conn->info_ident ||
3941 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3942 return 0;
3943
3944 cancel_delayed_work(&conn->info_timer);
3945
3946 if (result != L2CAP_IR_SUCCESS) {
3947 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3948 conn->info_ident = 0;
3949
3950 l2cap_conn_start(conn);
3951
3952 return 0;
3953 }
3954
3955 switch (type) {
3956 case L2CAP_IT_FEAT_MASK:
3957 conn->feat_mask = get_unaligned_le32(rsp->data);
3958
3959 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3960 struct l2cap_info_req req;
3961 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3962
3963 conn->info_ident = l2cap_get_ident(conn);
3964
3965 l2cap_send_cmd(conn, conn->info_ident,
3966 L2CAP_INFO_REQ, sizeof(req), &req);
3967 } else {
3968 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3969 conn->info_ident = 0;
3970
3971 l2cap_conn_start(conn);
3972 }
3973 break;
3974
3975 case L2CAP_IT_FIXED_CHAN:
3976 conn->fixed_chan_mask = rsp->data[0];
3977 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3978 conn->info_ident = 0;
3979
3980 l2cap_conn_start(conn);
3981 break;
3982 }
3983
3984 return 0;
3985 }
3986
3987 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3988 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3989 void *data)
3990 {
3991 struct l2cap_create_chan_req *req = data;
3992 struct l2cap_create_chan_rsp rsp;
3993 u16 psm, scid;
3994
3995 if (cmd_len != sizeof(*req))
3996 return -EPROTO;
3997
3998 if (!enable_hs)
3999 return -EINVAL;
4000
4001 psm = le16_to_cpu(req->psm);
4002 scid = le16_to_cpu(req->scid);
4003
4004 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4005
4006 /* Placeholder: Always reject */
4007 rsp.dcid = 0;
4008 rsp.scid = cpu_to_le16(scid);
4009 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4010 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4011
4012 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4013 sizeof(rsp), &rsp);
4014
4015 return 0;
4016 }
4017
4018 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
4019 struct l2cap_cmd_hdr *cmd, void *data)
4020 {
4021 BT_DBG("conn %p", conn);
4022
4023 return l2cap_connect_rsp(conn, cmd, data);
4024 }
4025
4026 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4027 u16 icid, u16 result)
4028 {
4029 struct l2cap_move_chan_rsp rsp;
4030
4031 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4032
4033 rsp.icid = cpu_to_le16(icid);
4034 rsp.result = cpu_to_le16(result);
4035
4036 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4037 }
4038
4039 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4040 struct l2cap_chan *chan,
4041 u16 icid, u16 result)
4042 {
4043 struct l2cap_move_chan_cfm cfm;
4044 u8 ident;
4045
4046 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4047
4048 ident = l2cap_get_ident(conn);
4049 if (chan)
4050 chan->ident = ident;
4051
4052 cfm.icid = cpu_to_le16(icid);
4053 cfm.result = cpu_to_le16(result);
4054
4055 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4056 }
4057
4058 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4059 u16 icid)
4060 {
4061 struct l2cap_move_chan_cfm_rsp rsp;
4062
4063 BT_DBG("icid 0x%4.4x", icid);
4064
4065 rsp.icid = cpu_to_le16(icid);
4066 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4067 }
4068
4069 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4070 struct l2cap_cmd_hdr *cmd,
4071 u16 cmd_len, void *data)
4072 {
4073 struct l2cap_move_chan_req *req = data;
4074 u16 icid = 0;
4075 u16 result = L2CAP_MR_NOT_ALLOWED;
4076
4077 if (cmd_len != sizeof(*req))
4078 return -EPROTO;
4079
4080 icid = le16_to_cpu(req->icid);
4081
4082 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4083
4084 if (!enable_hs)
4085 return -EINVAL;
4086
4087 /* Placeholder: Always refuse */
4088 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4089
4090 return 0;
4091 }
4092
4093 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4094 struct l2cap_cmd_hdr *cmd,
4095 u16 cmd_len, void *data)
4096 {
4097 struct l2cap_move_chan_rsp *rsp = data;
4098 u16 icid, result;
4099
4100 if (cmd_len != sizeof(*rsp))
4101 return -EPROTO;
4102
4103 icid = le16_to_cpu(rsp->icid);
4104 result = le16_to_cpu(rsp->result);
4105
4106 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4107
4108 /* Placeholder: Always unconfirmed */
4109 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4110
4111 return 0;
4112 }
4113
4114 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4115 struct l2cap_cmd_hdr *cmd,
4116 u16 cmd_len, void *data)
4117 {
4118 struct l2cap_move_chan_cfm *cfm = data;
4119 u16 icid, result;
4120
4121 if (cmd_len != sizeof(*cfm))
4122 return -EPROTO;
4123
4124 icid = le16_to_cpu(cfm->icid);
4125 result = le16_to_cpu(cfm->result);
4126
4127 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4128
4129 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4130
4131 return 0;
4132 }
4133
4134 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4135 struct l2cap_cmd_hdr *cmd,
4136 u16 cmd_len, void *data)
4137 {
4138 struct l2cap_move_chan_cfm_rsp *rsp = data;
4139 u16 icid;
4140
4141 if (cmd_len != sizeof(*rsp))
4142 return -EPROTO;
4143
4144 icid = le16_to_cpu(rsp->icid);
4145
4146 BT_DBG("icid 0x%4.4x", icid);
4147
4148 return 0;
4149 }
4150
4151 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4152 u16 to_multiplier)
4153 {
4154 u16 max_latency;
4155
4156 if (min > max || min < 6 || max > 3200)
4157 return -EINVAL;
4158
4159 if (to_multiplier < 10 || to_multiplier > 3200)
4160 return -EINVAL;
4161
4162 if (max >= to_multiplier * 8)
4163 return -EINVAL;
4164
4165 max_latency = (to_multiplier * 8 / max) - 1;
4166 if (latency > 499 || latency > max_latency)
4167 return -EINVAL;
4168
4169 return 0;
4170 }
4171
4172 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4173 struct l2cap_cmd_hdr *cmd, u8 *data)
4174 {
4175 struct hci_conn *hcon = conn->hcon;
4176 struct l2cap_conn_param_update_req *req;
4177 struct l2cap_conn_param_update_rsp rsp;
4178 u16 min, max, latency, to_multiplier, cmd_len;
4179 int err;
4180
4181 if (!(hcon->link_mode & HCI_LM_MASTER))
4182 return -EINVAL;
4183
4184 cmd_len = __le16_to_cpu(cmd->len);
4185 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4186 return -EPROTO;
4187
4188 req = (struct l2cap_conn_param_update_req *) data;
4189 min = __le16_to_cpu(req->min);
4190 max = __le16_to_cpu(req->max);
4191 latency = __le16_to_cpu(req->latency);
4192 to_multiplier = __le16_to_cpu(req->to_multiplier);
4193
4194 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4195 min, max, latency, to_multiplier);
4196
4197 memset(&rsp, 0, sizeof(rsp));
4198
4199 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4200 if (err)
4201 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4202 else
4203 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4204
4205 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4206 sizeof(rsp), &rsp);
4207
4208 if (!err)
4209 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4210
4211 return 0;
4212 }
4213
4214 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4215 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4216 {
4217 int err = 0;
4218
4219 switch (cmd->code) {
4220 case L2CAP_COMMAND_REJ:
4221 l2cap_command_rej(conn, cmd, data);
4222 break;
4223
4224 case L2CAP_CONN_REQ:
4225 err = l2cap_connect_req(conn, cmd, data);
4226 break;
4227
4228 case L2CAP_CONN_RSP:
4229 err = l2cap_connect_rsp(conn, cmd, data);
4230 break;
4231
4232 case L2CAP_CONF_REQ:
4233 err = l2cap_config_req(conn, cmd, cmd_len, data);
4234 break;
4235
4236 case L2CAP_CONF_RSP:
4237 err = l2cap_config_rsp(conn, cmd, data);
4238 break;
4239
4240 case L2CAP_DISCONN_REQ:
4241 err = l2cap_disconnect_req(conn, cmd, data);
4242 break;
4243
4244 case L2CAP_DISCONN_RSP:
4245 err = l2cap_disconnect_rsp(conn, cmd, data);
4246 break;
4247
4248 case L2CAP_ECHO_REQ:
4249 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4250 break;
4251
4252 case L2CAP_ECHO_RSP:
4253 break;
4254
4255 case L2CAP_INFO_REQ:
4256 err = l2cap_information_req(conn, cmd, data);
4257 break;
4258
4259 case L2CAP_INFO_RSP:
4260 err = l2cap_information_rsp(conn, cmd, data);
4261 break;
4262
4263 case L2CAP_CREATE_CHAN_REQ:
4264 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4265 break;
4266
4267 case L2CAP_CREATE_CHAN_RSP:
4268 err = l2cap_create_channel_rsp(conn, cmd, data);
4269 break;
4270
4271 case L2CAP_MOVE_CHAN_REQ:
4272 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4273 break;
4274
4275 case L2CAP_MOVE_CHAN_RSP:
4276 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4277 break;
4278
4279 case L2CAP_MOVE_CHAN_CFM:
4280 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4281 break;
4282
4283 case L2CAP_MOVE_CHAN_CFM_RSP:
4284 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4285 break;
4286
4287 default:
4288 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4289 err = -EINVAL;
4290 break;
4291 }
4292
4293 return err;
4294 }
4295
4296 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4297 struct l2cap_cmd_hdr *cmd, u8 *data)
4298 {
4299 switch (cmd->code) {
4300 case L2CAP_COMMAND_REJ:
4301 return 0;
4302
4303 case L2CAP_CONN_PARAM_UPDATE_REQ:
4304 return l2cap_conn_param_update_req(conn, cmd, data);
4305
4306 case L2CAP_CONN_PARAM_UPDATE_RSP:
4307 return 0;
4308
4309 default:
4310 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4311 return -EINVAL;
4312 }
4313 }
4314
4315 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4316 struct sk_buff *skb)
4317 {
4318 u8 *data = skb->data;
4319 int len = skb->len;
4320 struct l2cap_cmd_hdr cmd;
4321 int err;
4322
4323 l2cap_raw_recv(conn, skb);
4324
4325 while (len >= L2CAP_CMD_HDR_SIZE) {
4326 u16 cmd_len;
4327 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4328 data += L2CAP_CMD_HDR_SIZE;
4329 len -= L2CAP_CMD_HDR_SIZE;
4330
4331 cmd_len = le16_to_cpu(cmd.len);
4332
4333 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4334
4335 if (cmd_len > len || !cmd.ident) {
4336 BT_DBG("corrupted command");
4337 break;
4338 }
4339
4340 if (conn->hcon->type == LE_LINK)
4341 err = l2cap_le_sig_cmd(conn, &cmd, data);
4342 else
4343 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4344
4345 if (err) {
4346 struct l2cap_cmd_rej_unk rej;
4347
4348 BT_ERR("Wrong link type (%d)", err);
4349
4350 /* FIXME: Map err to a valid reason */
4351 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4352 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4353 }
4354
4355 data += cmd_len;
4356 len -= cmd_len;
4357 }
4358
4359 kfree_skb(skb);
4360 }
4361
4362 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4363 {
4364 u16 our_fcs, rcv_fcs;
4365 int hdr_size;
4366
4367 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4368 hdr_size = L2CAP_EXT_HDR_SIZE;
4369 else
4370 hdr_size = L2CAP_ENH_HDR_SIZE;
4371
4372 if (chan->fcs == L2CAP_FCS_CRC16) {
4373 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4374 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4375 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4376
4377 if (our_fcs != rcv_fcs)
4378 return -EBADMSG;
4379 }
4380 return 0;
4381 }
4382
4383 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4384 {
4385 struct l2cap_ctrl control;
4386
4387 BT_DBG("chan %p", chan);
4388
4389 memset(&control, 0, sizeof(control));
4390 control.sframe = 1;
4391 control.final = 1;
4392 control.reqseq = chan->buffer_seq;
4393 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4394
4395 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4396 control.super = L2CAP_SUPER_RNR;
4397 l2cap_send_sframe(chan, &control);
4398 }
4399
4400 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4401 chan->unacked_frames > 0)
4402 __set_retrans_timer(chan);
4403
4404 /* Send pending iframes */
4405 l2cap_ertm_send(chan);
4406
4407 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4408 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4409 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4410 * send it now.
4411 */
4412 control.super = L2CAP_SUPER_RR;
4413 l2cap_send_sframe(chan, &control);
4414 }
4415 }
4416
4417 static void append_skb_frag(struct sk_buff *skb,
4418 struct sk_buff *new_frag, struct sk_buff **last_frag)
4419 {
4420 /* skb->len reflects data in skb as well as all fragments
4421 * skb->data_len reflects only data in fragments
4422 */
4423 if (!skb_has_frag_list(skb))
4424 skb_shinfo(skb)->frag_list = new_frag;
4425
4426 new_frag->next = NULL;
4427
4428 (*last_frag)->next = new_frag;
4429 *last_frag = new_frag;
4430
4431 skb->len += new_frag->len;
4432 skb->data_len += new_frag->len;
4433 skb->truesize += new_frag->truesize;
4434 }
4435
4436 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4437 struct l2cap_ctrl *control)
4438 {
4439 int err = -EINVAL;
4440
4441 switch (control->sar) {
4442 case L2CAP_SAR_UNSEGMENTED:
4443 if (chan->sdu)
4444 break;
4445
4446 err = chan->ops->recv(chan, skb);
4447 break;
4448
4449 case L2CAP_SAR_START:
4450 if (chan->sdu)
4451 break;
4452
4453 chan->sdu_len = get_unaligned_le16(skb->data);
4454 skb_pull(skb, L2CAP_SDULEN_SIZE);
4455
4456 if (chan->sdu_len > chan->imtu) {
4457 err = -EMSGSIZE;
4458 break;
4459 }
4460
4461 if (skb->len >= chan->sdu_len)
4462 break;
4463
4464 chan->sdu = skb;
4465 chan->sdu_last_frag = skb;
4466
4467 skb = NULL;
4468 err = 0;
4469 break;
4470
4471 case L2CAP_SAR_CONTINUE:
4472 if (!chan->sdu)
4473 break;
4474
4475 append_skb_frag(chan->sdu, skb,
4476 &chan->sdu_last_frag);
4477 skb = NULL;
4478
4479 if (chan->sdu->len >= chan->sdu_len)
4480 break;
4481
4482 err = 0;
4483 break;
4484
4485 case L2CAP_SAR_END:
4486 if (!chan->sdu)
4487 break;
4488
4489 append_skb_frag(chan->sdu, skb,
4490 &chan->sdu_last_frag);
4491 skb = NULL;
4492
4493 if (chan->sdu->len != chan->sdu_len)
4494 break;
4495
4496 err = chan->ops->recv(chan, chan->sdu);
4497
4498 if (!err) {
4499 /* Reassembly complete */
4500 chan->sdu = NULL;
4501 chan->sdu_last_frag = NULL;
4502 chan->sdu_len = 0;
4503 }
4504 break;
4505 }
4506
4507 if (err) {
4508 kfree_skb(skb);
4509 kfree_skb(chan->sdu);
4510 chan->sdu = NULL;
4511 chan->sdu_last_frag = NULL;
4512 chan->sdu_len = 0;
4513 }
4514
4515 return err;
4516 }
4517
4518 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4519 {
4520 u8 event;
4521
4522 if (chan->mode != L2CAP_MODE_ERTM)
4523 return;
4524
4525 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4526 l2cap_tx(chan, NULL, NULL, event);
4527 }
4528
4529 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4530 {
4531 int err = 0;
4532 /* Pass sequential frames to l2cap_reassemble_sdu()
4533 * until a gap is encountered.
4534 */
4535
4536 BT_DBG("chan %p", chan);
4537
4538 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4539 struct sk_buff *skb;
4540 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4541 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4542
4543 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4544
4545 if (!skb)
4546 break;
4547
4548 skb_unlink(skb, &chan->srej_q);
4549 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4550 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4551 if (err)
4552 break;
4553 }
4554
4555 if (skb_queue_empty(&chan->srej_q)) {
4556 chan->rx_state = L2CAP_RX_STATE_RECV;
4557 l2cap_send_ack(chan);
4558 }
4559
4560 return err;
4561 }
4562
4563 static void l2cap_handle_srej(struct l2cap_chan *chan,
4564 struct l2cap_ctrl *control)
4565 {
4566 struct sk_buff *skb;
4567
4568 BT_DBG("chan %p, control %p", chan, control);
4569
4570 if (control->reqseq == chan->next_tx_seq) {
4571 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4572 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4573 return;
4574 }
4575
4576 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4577
4578 if (skb == NULL) {
4579 BT_DBG("Seq %d not available for retransmission",
4580 control->reqseq);
4581 return;
4582 }
4583
4584 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4585 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4586 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4587 return;
4588 }
4589
4590 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4591
4592 if (control->poll) {
4593 l2cap_pass_to_tx(chan, control);
4594
4595 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4596 l2cap_retransmit(chan, control);
4597 l2cap_ertm_send(chan);
4598
4599 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4600 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4601 chan->srej_save_reqseq = control->reqseq;
4602 }
4603 } else {
4604 l2cap_pass_to_tx_fbit(chan, control);
4605
4606 if (control->final) {
4607 if (chan->srej_save_reqseq != control->reqseq ||
4608 !test_and_clear_bit(CONN_SREJ_ACT,
4609 &chan->conn_state))
4610 l2cap_retransmit(chan, control);
4611 } else {
4612 l2cap_retransmit(chan, control);
4613 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4614 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4615 chan->srej_save_reqseq = control->reqseq;
4616 }
4617 }
4618 }
4619 }
4620
4621 static void l2cap_handle_rej(struct l2cap_chan *chan,
4622 struct l2cap_ctrl *control)
4623 {
4624 struct sk_buff *skb;
4625
4626 BT_DBG("chan %p, control %p", chan, control);
4627
4628 if (control->reqseq == chan->next_tx_seq) {
4629 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4630 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4631 return;
4632 }
4633
4634 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4635
4636 if (chan->max_tx && skb &&
4637 bt_cb(skb)->control.retries >= chan->max_tx) {
4638 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4639 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4640 return;
4641 }
4642
4643 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4644
4645 l2cap_pass_to_tx(chan, control);
4646
4647 if (control->final) {
4648 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4649 l2cap_retransmit_all(chan, control);
4650 } else {
4651 l2cap_retransmit_all(chan, control);
4652 l2cap_ertm_send(chan);
4653 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4654 set_bit(CONN_REJ_ACT, &chan->conn_state);
4655 }
4656 }
4657
4658 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4659 {
4660 BT_DBG("chan %p, txseq %d", chan, txseq);
4661
4662 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4663 chan->expected_tx_seq);
4664
4665 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4666 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4667 chan->tx_win) {
4668 /* See notes below regarding "double poll" and
4669 * invalid packets.
4670 */
4671 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4672 BT_DBG("Invalid/Ignore - after SREJ");
4673 return L2CAP_TXSEQ_INVALID_IGNORE;
4674 } else {
4675 BT_DBG("Invalid - in window after SREJ sent");
4676 return L2CAP_TXSEQ_INVALID;
4677 }
4678 }
4679
4680 if (chan->srej_list.head == txseq) {
4681 BT_DBG("Expected SREJ");
4682 return L2CAP_TXSEQ_EXPECTED_SREJ;
4683 }
4684
4685 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4686 BT_DBG("Duplicate SREJ - txseq already stored");
4687 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4688 }
4689
4690 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4691 BT_DBG("Unexpected SREJ - not requested");
4692 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4693 }
4694 }
4695
4696 if (chan->expected_tx_seq == txseq) {
4697 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4698 chan->tx_win) {
4699 BT_DBG("Invalid - txseq outside tx window");
4700 return L2CAP_TXSEQ_INVALID;
4701 } else {
4702 BT_DBG("Expected");
4703 return L2CAP_TXSEQ_EXPECTED;
4704 }
4705 }
4706
4707 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4708 __seq_offset(chan, chan->expected_tx_seq,
4709 chan->last_acked_seq)){
4710 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4711 return L2CAP_TXSEQ_DUPLICATE;
4712 }
4713
4714 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4715 /* A source of invalid packets is a "double poll" condition,
4716 * where delays cause us to send multiple poll packets. If
4717 * the remote stack receives and processes both polls,
4718 * sequence numbers can wrap around in such a way that a
4719 * resent frame has a sequence number that looks like new data
4720 * with a sequence gap. This would trigger an erroneous SREJ
4721 * request.
4722 *
4723 * Fortunately, this is impossible with a tx window that's
4724 * less than half of the maximum sequence number, which allows
4725 * invalid frames to be safely ignored.
4726 *
4727 * With tx window sizes greater than half of the tx window
4728 * maximum, the frame is invalid and cannot be ignored. This
4729 * causes a disconnect.
4730 */
4731
4732 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4733 BT_DBG("Invalid/Ignore - txseq outside tx window");
4734 return L2CAP_TXSEQ_INVALID_IGNORE;
4735 } else {
4736 BT_DBG("Invalid - txseq outside tx window");
4737 return L2CAP_TXSEQ_INVALID;
4738 }
4739 } else {
4740 BT_DBG("Unexpected - txseq indicates missing frames");
4741 return L2CAP_TXSEQ_UNEXPECTED;
4742 }
4743 }
4744
4745 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4746 struct l2cap_ctrl *control,
4747 struct sk_buff *skb, u8 event)
4748 {
4749 int err = 0;
4750 bool skb_in_use = 0;
4751
4752 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4753 event);
4754
4755 switch (event) {
4756 case L2CAP_EV_RECV_IFRAME:
4757 switch (l2cap_classify_txseq(chan, control->txseq)) {
4758 case L2CAP_TXSEQ_EXPECTED:
4759 l2cap_pass_to_tx(chan, control);
4760
4761 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4762 BT_DBG("Busy, discarding expected seq %d",
4763 control->txseq);
4764 break;
4765 }
4766
4767 chan->expected_tx_seq = __next_seq(chan,
4768 control->txseq);
4769
4770 chan->buffer_seq = chan->expected_tx_seq;
4771 skb_in_use = 1;
4772
4773 err = l2cap_reassemble_sdu(chan, skb, control);
4774 if (err)
4775 break;
4776
4777 if (control->final) {
4778 if (!test_and_clear_bit(CONN_REJ_ACT,
4779 &chan->conn_state)) {
4780 control->final = 0;
4781 l2cap_retransmit_all(chan, control);
4782 l2cap_ertm_send(chan);
4783 }
4784 }
4785
4786 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4787 l2cap_send_ack(chan);
4788 break;
4789 case L2CAP_TXSEQ_UNEXPECTED:
4790 l2cap_pass_to_tx(chan, control);
4791
4792 /* Can't issue SREJ frames in the local busy state.
4793 * Drop this frame, it will be seen as missing
4794 * when local busy is exited.
4795 */
4796 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4797 BT_DBG("Busy, discarding unexpected seq %d",
4798 control->txseq);
4799 break;
4800 }
4801
4802 /* There was a gap in the sequence, so an SREJ
4803 * must be sent for each missing frame. The
4804 * current frame is stored for later use.
4805 */
4806 skb_queue_tail(&chan->srej_q, skb);
4807 skb_in_use = 1;
4808 BT_DBG("Queued %p (queue len %d)", skb,
4809 skb_queue_len(&chan->srej_q));
4810
4811 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4812 l2cap_seq_list_clear(&chan->srej_list);
4813 l2cap_send_srej(chan, control->txseq);
4814
4815 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4816 break;
4817 case L2CAP_TXSEQ_DUPLICATE:
4818 l2cap_pass_to_tx(chan, control);
4819 break;
4820 case L2CAP_TXSEQ_INVALID_IGNORE:
4821 break;
4822 case L2CAP_TXSEQ_INVALID:
4823 default:
4824 l2cap_send_disconn_req(chan->conn, chan,
4825 ECONNRESET);
4826 break;
4827 }
4828 break;
4829 case L2CAP_EV_RECV_RR:
4830 l2cap_pass_to_tx(chan, control);
4831 if (control->final) {
4832 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4833
4834 if (!test_and_clear_bit(CONN_REJ_ACT,
4835 &chan->conn_state)) {
4836 control->final = 0;
4837 l2cap_retransmit_all(chan, control);
4838 }
4839
4840 l2cap_ertm_send(chan);
4841 } else if (control->poll) {
4842 l2cap_send_i_or_rr_or_rnr(chan);
4843 } else {
4844 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4845 &chan->conn_state) &&
4846 chan->unacked_frames)
4847 __set_retrans_timer(chan);
4848
4849 l2cap_ertm_send(chan);
4850 }
4851 break;
4852 case L2CAP_EV_RECV_RNR:
4853 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4854 l2cap_pass_to_tx(chan, control);
4855 if (control && control->poll) {
4856 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4857 l2cap_send_rr_or_rnr(chan, 0);
4858 }
4859 __clear_retrans_timer(chan);
4860 l2cap_seq_list_clear(&chan->retrans_list);
4861 break;
4862 case L2CAP_EV_RECV_REJ:
4863 l2cap_handle_rej(chan, control);
4864 break;
4865 case L2CAP_EV_RECV_SREJ:
4866 l2cap_handle_srej(chan, control);
4867 break;
4868 default:
4869 break;
4870 }
4871
4872 if (skb && !skb_in_use) {
4873 BT_DBG("Freeing %p", skb);
4874 kfree_skb(skb);
4875 }
4876
4877 return err;
4878 }
4879
4880 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4881 struct l2cap_ctrl *control,
4882 struct sk_buff *skb, u8 event)
4883 {
4884 int err = 0;
4885 u16 txseq = control->txseq;
4886 bool skb_in_use = 0;
4887
4888 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4889 event);
4890
4891 switch (event) {
4892 case L2CAP_EV_RECV_IFRAME:
4893 switch (l2cap_classify_txseq(chan, txseq)) {
4894 case L2CAP_TXSEQ_EXPECTED:
4895 /* Keep frame for reassembly later */
4896 l2cap_pass_to_tx(chan, control);
4897 skb_queue_tail(&chan->srej_q, skb);
4898 skb_in_use = 1;
4899 BT_DBG("Queued %p (queue len %d)", skb,
4900 skb_queue_len(&chan->srej_q));
4901
4902 chan->expected_tx_seq = __next_seq(chan, txseq);
4903 break;
4904 case L2CAP_TXSEQ_EXPECTED_SREJ:
4905 l2cap_seq_list_pop(&chan->srej_list);
4906
4907 l2cap_pass_to_tx(chan, control);
4908 skb_queue_tail(&chan->srej_q, skb);
4909 skb_in_use = 1;
4910 BT_DBG("Queued %p (queue len %d)", skb,
4911 skb_queue_len(&chan->srej_q));
4912
4913 err = l2cap_rx_queued_iframes(chan);
4914 if (err)
4915 break;
4916
4917 break;
4918 case L2CAP_TXSEQ_UNEXPECTED:
4919 /* Got a frame that can't be reassembled yet.
4920 * Save it for later, and send SREJs to cover
4921 * the missing frames.
4922 */
4923 skb_queue_tail(&chan->srej_q, skb);
4924 skb_in_use = 1;
4925 BT_DBG("Queued %p (queue len %d)", skb,
4926 skb_queue_len(&chan->srej_q));
4927
4928 l2cap_pass_to_tx(chan, control);
4929 l2cap_send_srej(chan, control->txseq);
4930 break;
4931 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4932 /* This frame was requested with an SREJ, but
4933 * some expected retransmitted frames are
4934 * missing. Request retransmission of missing
4935 * SREJ'd frames.
4936 */
4937 skb_queue_tail(&chan->srej_q, skb);
4938 skb_in_use = 1;
4939 BT_DBG("Queued %p (queue len %d)", skb,
4940 skb_queue_len(&chan->srej_q));
4941
4942 l2cap_pass_to_tx(chan, control);
4943 l2cap_send_srej_list(chan, control->txseq);
4944 break;
4945 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4946 /* We've already queued this frame. Drop this copy. */
4947 l2cap_pass_to_tx(chan, control);
4948 break;
4949 case L2CAP_TXSEQ_DUPLICATE:
4950 /* Expecting a later sequence number, so this frame
4951 * was already received. Ignore it completely.
4952 */
4953 break;
4954 case L2CAP_TXSEQ_INVALID_IGNORE:
4955 break;
4956 case L2CAP_TXSEQ_INVALID:
4957 default:
4958 l2cap_send_disconn_req(chan->conn, chan,
4959 ECONNRESET);
4960 break;
4961 }
4962 break;
4963 case L2CAP_EV_RECV_RR:
4964 l2cap_pass_to_tx(chan, control);
4965 if (control->final) {
4966 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4967
4968 if (!test_and_clear_bit(CONN_REJ_ACT,
4969 &chan->conn_state)) {
4970 control->final = 0;
4971 l2cap_retransmit_all(chan, control);
4972 }
4973
4974 l2cap_ertm_send(chan);
4975 } else if (control->poll) {
4976 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4977 &chan->conn_state) &&
4978 chan->unacked_frames) {
4979 __set_retrans_timer(chan);
4980 }
4981
4982 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4983 l2cap_send_srej_tail(chan);
4984 } else {
4985 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4986 &chan->conn_state) &&
4987 chan->unacked_frames)
4988 __set_retrans_timer(chan);
4989
4990 l2cap_send_ack(chan);
4991 }
4992 break;
4993 case L2CAP_EV_RECV_RNR:
4994 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4995 l2cap_pass_to_tx(chan, control);
4996 if (control->poll) {
4997 l2cap_send_srej_tail(chan);
4998 } else {
4999 struct l2cap_ctrl rr_control;
5000 memset(&rr_control, 0, sizeof(rr_control));
5001 rr_control.sframe = 1;
5002 rr_control.super = L2CAP_SUPER_RR;
5003 rr_control.reqseq = chan->buffer_seq;
5004 l2cap_send_sframe(chan, &rr_control);
5005 }
5006
5007 break;
5008 case L2CAP_EV_RECV_REJ:
5009 l2cap_handle_rej(chan, control);
5010 break;
5011 case L2CAP_EV_RECV_SREJ:
5012 l2cap_handle_srej(chan, control);
5013 break;
5014 }
5015
5016 if (skb && !skb_in_use) {
5017 BT_DBG("Freeing %p", skb);
5018 kfree_skb(skb);
5019 }
5020
5021 return err;
5022 }
5023
5024 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5025 {
5026 /* Make sure reqseq is for a packet that has been sent but not acked */
5027 u16 unacked;
5028
5029 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5030 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5031 }
5032
5033 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5034 struct sk_buff *skb, u8 event)
5035 {
5036 int err = 0;
5037
5038 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5039 control, skb, event, chan->rx_state);
5040
5041 if (__valid_reqseq(chan, control->reqseq)) {
5042 switch (chan->rx_state) {
5043 case L2CAP_RX_STATE_RECV:
5044 err = l2cap_rx_state_recv(chan, control, skb, event);
5045 break;
5046 case L2CAP_RX_STATE_SREJ_SENT:
5047 err = l2cap_rx_state_srej_sent(chan, control, skb,
5048 event);
5049 break;
5050 default:
5051 /* shut it down */
5052 break;
5053 }
5054 } else {
5055 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5056 control->reqseq, chan->next_tx_seq,
5057 chan->expected_ack_seq);
5058 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5059 }
5060
5061 return err;
5062 }
5063
5064 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5065 struct sk_buff *skb)
5066 {
5067 int err = 0;
5068
5069 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5070 chan->rx_state);
5071
5072 if (l2cap_classify_txseq(chan, control->txseq) ==
5073 L2CAP_TXSEQ_EXPECTED) {
5074 l2cap_pass_to_tx(chan, control);
5075
5076 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5077 __next_seq(chan, chan->buffer_seq));
5078
5079 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5080
5081 l2cap_reassemble_sdu(chan, skb, control);
5082 } else {
5083 if (chan->sdu) {
5084 kfree_skb(chan->sdu);
5085 chan->sdu = NULL;
5086 }
5087 chan->sdu_last_frag = NULL;
5088 chan->sdu_len = 0;
5089
5090 if (skb) {
5091 BT_DBG("Freeing %p", skb);
5092 kfree_skb(skb);
5093 }
5094 }
5095
5096 chan->last_acked_seq = control->txseq;
5097 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5098
5099 return err;
5100 }
5101
5102 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5103 {
5104 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5105 u16 len;
5106 u8 event;
5107
5108 __unpack_control(chan, skb);
5109
5110 len = skb->len;
5111
5112 /*
5113 * We can just drop the corrupted I-frame here.
5114 * Receiver will miss it and start proper recovery
5115 * procedures and ask for retransmission.
5116 */
5117 if (l2cap_check_fcs(chan, skb))
5118 goto drop;
5119
5120 if (!control->sframe && control->sar == L2CAP_SAR_START)
5121 len -= L2CAP_SDULEN_SIZE;
5122
5123 if (chan->fcs == L2CAP_FCS_CRC16)
5124 len -= L2CAP_FCS_SIZE;
5125
5126 if (len > chan->mps) {
5127 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5128 goto drop;
5129 }
5130
5131 if (!control->sframe) {
5132 int err;
5133
5134 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5135 control->sar, control->reqseq, control->final,
5136 control->txseq);
5137
5138 /* Validate F-bit - F=0 always valid, F=1 only
5139 * valid in TX WAIT_F
5140 */
5141 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5142 goto drop;
5143
5144 if (chan->mode != L2CAP_MODE_STREAMING) {
5145 event = L2CAP_EV_RECV_IFRAME;
5146 err = l2cap_rx(chan, control, skb, event);
5147 } else {
5148 err = l2cap_stream_rx(chan, control, skb);
5149 }
5150
5151 if (err)
5152 l2cap_send_disconn_req(chan->conn, chan,
5153 ECONNRESET);
5154 } else {
5155 const u8 rx_func_to_event[4] = {
5156 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5157 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5158 };
5159
5160 /* Only I-frames are expected in streaming mode */
5161 if (chan->mode == L2CAP_MODE_STREAMING)
5162 goto drop;
5163
5164 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5165 control->reqseq, control->final, control->poll,
5166 control->super);
5167
5168 if (len != 0) {
5169 BT_ERR("%d", len);
5170 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5171 goto drop;
5172 }
5173
5174 /* Validate F and P bits */
5175 if (control->final && (control->poll ||
5176 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5177 goto drop;
5178
5179 event = rx_func_to_event[control->super];
5180 if (l2cap_rx(chan, control, skb, event))
5181 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5182 }
5183
5184 return 0;
5185
5186 drop:
5187 kfree_skb(skb);
5188 return 0;
5189 }
5190
5191 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
5192 struct sk_buff *skb)
5193 {
5194 struct l2cap_chan *chan;
5195
5196 chan = l2cap_get_chan_by_scid(conn, cid);
5197 if (!chan) {
5198 if (cid == L2CAP_CID_A2MP) {
5199 chan = a2mp_channel_create(conn, skb);
5200 if (!chan) {
5201 kfree_skb(skb);
5202 return;
5203 }
5204
5205 l2cap_chan_lock(chan);
5206 } else {
5207 BT_DBG("unknown cid 0x%4.4x", cid);
5208 /* Drop packet and return */
5209 kfree_skb(skb);
5210 return;
5211 }
5212 }
5213
5214 BT_DBG("chan %p, len %d", chan, skb->len);
5215
5216 if (chan->state != BT_CONNECTED)
5217 goto drop;
5218
5219 switch (chan->mode) {
5220 case L2CAP_MODE_BASIC:
5221 /* If socket recv buffers overflows we drop data here
5222 * which is *bad* because L2CAP has to be reliable.
5223 * But we don't have any other choice. L2CAP doesn't
5224 * provide flow control mechanism. */
5225
5226 if (chan->imtu < skb->len)
5227 goto drop;
5228
5229 if (!chan->ops->recv(chan, skb))
5230 goto done;
5231 break;
5232
5233 case L2CAP_MODE_ERTM:
5234 case L2CAP_MODE_STREAMING:
5235 l2cap_data_rcv(chan, skb);
5236 goto done;
5237
5238 default:
5239 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5240 break;
5241 }
5242
5243 drop:
5244 kfree_skb(skb);
5245
5246 done:
5247 l2cap_chan_unlock(chan);
5248 }
5249
5250 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
5251 struct sk_buff *skb)
5252 {
5253 struct l2cap_chan *chan;
5254
5255 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5256 if (!chan)
5257 goto drop;
5258
5259 BT_DBG("chan %p, len %d", chan, skb->len);
5260
5261 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5262 goto drop;
5263
5264 if (chan->imtu < skb->len)
5265 goto drop;
5266
5267 if (!chan->ops->recv(chan, skb))
5268 return;
5269
5270 drop:
5271 kfree_skb(skb);
5272 }
5273
5274 static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5275 struct sk_buff *skb)
5276 {
5277 struct l2cap_chan *chan;
5278
5279 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5280 if (!chan)
5281 goto drop;
5282
5283 BT_DBG("chan %p, len %d", chan, skb->len);
5284
5285 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5286 goto drop;
5287
5288 if (chan->imtu < skb->len)
5289 goto drop;
5290
5291 if (!chan->ops->recv(chan, skb))
5292 return;
5293
5294 drop:
5295 kfree_skb(skb);
5296 }
5297
5298 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5299 {
5300 struct l2cap_hdr *lh = (void *) skb->data;
5301 u16 cid, len;
5302 __le16 psm;
5303
5304 skb_pull(skb, L2CAP_HDR_SIZE);
5305 cid = __le16_to_cpu(lh->cid);
5306 len = __le16_to_cpu(lh->len);
5307
5308 if (len != skb->len) {
5309 kfree_skb(skb);
5310 return;
5311 }
5312
5313 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5314
5315 switch (cid) {
5316 case L2CAP_CID_LE_SIGNALING:
5317 case L2CAP_CID_SIGNALING:
5318 l2cap_sig_channel(conn, skb);
5319 break;
5320
5321 case L2CAP_CID_CONN_LESS:
5322 psm = get_unaligned((__le16 *) skb->data);
5323 skb_pull(skb, L2CAP_PSMLEN_SIZE);
5324 l2cap_conless_channel(conn, psm, skb);
5325 break;
5326
5327 case L2CAP_CID_LE_DATA:
5328 l2cap_att_channel(conn, cid, skb);
5329 break;
5330
5331 case L2CAP_CID_SMP:
5332 if (smp_sig_channel(conn, skb))
5333 l2cap_conn_del(conn->hcon, EACCES);
5334 break;
5335
5336 default:
5337 l2cap_data_channel(conn, cid, skb);
5338 break;
5339 }
5340 }
5341
5342 /* ---- L2CAP interface with lower layer (HCI) ---- */
5343
5344 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5345 {
5346 int exact = 0, lm1 = 0, lm2 = 0;
5347 struct l2cap_chan *c;
5348
5349 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
5350
5351 /* Find listening sockets and check their link_mode */
5352 read_lock(&chan_list_lock);
5353 list_for_each_entry(c, &chan_list, global_l) {
5354 struct sock *sk = c->sk;
5355
5356 if (c->state != BT_LISTEN)
5357 continue;
5358
5359 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5360 lm1 |= HCI_LM_ACCEPT;
5361 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5362 lm1 |= HCI_LM_MASTER;
5363 exact++;
5364 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5365 lm2 |= HCI_LM_ACCEPT;
5366 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5367 lm2 |= HCI_LM_MASTER;
5368 }
5369 }
5370 read_unlock(&chan_list_lock);
5371
5372 return exact ? lm1 : lm2;
5373 }
5374
5375 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5376 {
5377 struct l2cap_conn *conn;
5378
5379 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
5380
5381 if (!status) {
5382 conn = l2cap_conn_add(hcon, status);
5383 if (conn)
5384 l2cap_conn_ready(conn);
5385 } else
5386 l2cap_conn_del(hcon, bt_to_errno(status));
5387
5388 }
5389
5390 int l2cap_disconn_ind(struct hci_conn *hcon)
5391 {
5392 struct l2cap_conn *conn = hcon->l2cap_data;
5393
5394 BT_DBG("hcon %p", hcon);
5395
5396 if (!conn)
5397 return HCI_ERROR_REMOTE_USER_TERM;
5398 return conn->disc_reason;
5399 }
5400
5401 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5402 {
5403 BT_DBG("hcon %p reason %d", hcon, reason);
5404
5405 l2cap_conn_del(hcon, bt_to_errno(reason));
5406 }
5407
5408 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5409 {
5410 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5411 return;
5412
5413 if (encrypt == 0x00) {
5414 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5415 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5416 } else if (chan->sec_level == BT_SECURITY_HIGH)
5417 l2cap_chan_close(chan, ECONNREFUSED);
5418 } else {
5419 if (chan->sec_level == BT_SECURITY_MEDIUM)
5420 __clear_chan_timer(chan);
5421 }
5422 }
5423
5424 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5425 {
5426 struct l2cap_conn *conn = hcon->l2cap_data;
5427 struct l2cap_chan *chan;
5428
5429 if (!conn)
5430 return 0;
5431
5432 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
5433
5434 if (hcon->type == LE_LINK) {
5435 if (!status && encrypt)
5436 smp_distribute_keys(conn, 0);
5437 cancel_delayed_work(&conn->security_timer);
5438 }
5439
5440 mutex_lock(&conn->chan_lock);
5441
5442 list_for_each_entry(chan, &conn->chan_l, list) {
5443 l2cap_chan_lock(chan);
5444
5445 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
5446 state_to_string(chan->state));
5447
5448 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
5449 l2cap_chan_unlock(chan);
5450 continue;
5451 }
5452
5453 if (chan->scid == L2CAP_CID_LE_DATA) {
5454 if (!status && encrypt) {
5455 chan->sec_level = hcon->sec_level;
5456 l2cap_chan_ready(chan);
5457 }
5458
5459 l2cap_chan_unlock(chan);
5460 continue;
5461 }
5462
5463 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5464 l2cap_chan_unlock(chan);
5465 continue;
5466 }
5467
5468 if (!status && (chan->state == BT_CONNECTED ||
5469 chan->state == BT_CONFIG)) {
5470 struct sock *sk = chan->sk;
5471
5472 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5473 sk->sk_state_change(sk);
5474
5475 l2cap_check_encryption(chan, encrypt);
5476 l2cap_chan_unlock(chan);
5477 continue;
5478 }
5479
5480 if (chan->state == BT_CONNECT) {
5481 if (!status) {
5482 l2cap_start_connection(chan);
5483 } else {
5484 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5485 }
5486 } else if (chan->state == BT_CONNECT2) {
5487 struct sock *sk = chan->sk;
5488 struct l2cap_conn_rsp rsp;
5489 __u16 res, stat;
5490
5491 lock_sock(sk);
5492
5493 if (!status) {
5494 if (test_bit(BT_SK_DEFER_SETUP,
5495 &bt_sk(sk)->flags)) {
5496 struct sock *parent = bt_sk(sk)->parent;
5497 res = L2CAP_CR_PEND;
5498 stat = L2CAP_CS_AUTHOR_PEND;
5499 if (parent)
5500 parent->sk_data_ready(parent, 0);
5501 } else {
5502 __l2cap_state_change(chan, BT_CONFIG);
5503 res = L2CAP_CR_SUCCESS;
5504 stat = L2CAP_CS_NO_INFO;
5505 }
5506 } else {
5507 __l2cap_state_change(chan, BT_DISCONN);
5508 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5509 res = L2CAP_CR_SEC_BLOCK;
5510 stat = L2CAP_CS_NO_INFO;
5511 }
5512
5513 release_sock(sk);
5514
5515 rsp.scid = cpu_to_le16(chan->dcid);
5516 rsp.dcid = cpu_to_le16(chan->scid);
5517 rsp.result = cpu_to_le16(res);
5518 rsp.status = cpu_to_le16(stat);
5519 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5520 sizeof(rsp), &rsp);
5521
5522 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5523 res == L2CAP_CR_SUCCESS) {
5524 char buf[128];
5525 set_bit(CONF_REQ_SENT, &chan->conf_state);
5526 l2cap_send_cmd(conn, l2cap_get_ident(conn),
5527 L2CAP_CONF_REQ,
5528 l2cap_build_conf_req(chan, buf),
5529 buf);
5530 chan->num_conf_req++;
5531 }
5532 }
5533
5534 l2cap_chan_unlock(chan);
5535 }
5536
5537 mutex_unlock(&conn->chan_lock);
5538
5539 return 0;
5540 }
5541
5542 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5543 {
5544 struct l2cap_conn *conn = hcon->l2cap_data;
5545
5546 if (!conn)
5547 conn = l2cap_conn_add(hcon, 0);
5548
5549 if (!conn)
5550 goto drop;
5551
5552 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5553
5554 if (!(flags & ACL_CONT)) {
5555 struct l2cap_hdr *hdr;
5556 int len;
5557
5558 if (conn->rx_len) {
5559 BT_ERR("Unexpected start frame (len %d)", skb->len);
5560 kfree_skb(conn->rx_skb);
5561 conn->rx_skb = NULL;
5562 conn->rx_len = 0;
5563 l2cap_conn_unreliable(conn, ECOMM);
5564 }
5565
5566 /* Start fragment always begin with Basic L2CAP header */
5567 if (skb->len < L2CAP_HDR_SIZE) {
5568 BT_ERR("Frame is too short (len %d)", skb->len);
5569 l2cap_conn_unreliable(conn, ECOMM);
5570 goto drop;
5571 }
5572
5573 hdr = (struct l2cap_hdr *) skb->data;
5574 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5575
5576 if (len == skb->len) {
5577 /* Complete frame received */
5578 l2cap_recv_frame(conn, skb);
5579 return 0;
5580 }
5581
5582 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5583
5584 if (skb->len > len) {
5585 BT_ERR("Frame is too long (len %d, expected len %d)",
5586 skb->len, len);
5587 l2cap_conn_unreliable(conn, ECOMM);
5588 goto drop;
5589 }
5590
5591 /* Allocate skb for the complete frame (with header) */
5592 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5593 if (!conn->rx_skb)
5594 goto drop;
5595
5596 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5597 skb->len);
5598 conn->rx_len = len - skb->len;
5599 } else {
5600 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5601
5602 if (!conn->rx_len) {
5603 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5604 l2cap_conn_unreliable(conn, ECOMM);
5605 goto drop;
5606 }
5607
5608 if (skb->len > conn->rx_len) {
5609 BT_ERR("Fragment is too long (len %d, expected %d)",
5610 skb->len, conn->rx_len);
5611 kfree_skb(conn->rx_skb);
5612 conn->rx_skb = NULL;
5613 conn->rx_len = 0;
5614 l2cap_conn_unreliable(conn, ECOMM);
5615 goto drop;
5616 }
5617
5618 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5619 skb->len);
5620 conn->rx_len -= skb->len;
5621
5622 if (!conn->rx_len) {
5623 /* Complete frame received */
5624 l2cap_recv_frame(conn, conn->rx_skb);
5625 conn->rx_skb = NULL;
5626 }
5627 }
5628
5629 drop:
5630 kfree_skb(skb);
5631 return 0;
5632 }
5633
5634 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5635 {
5636 struct l2cap_chan *c;
5637
5638 read_lock(&chan_list_lock);
5639
5640 list_for_each_entry(c, &chan_list, global_l) {
5641 struct sock *sk = c->sk;
5642
5643 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5644 batostr(&bt_sk(sk)->src),
5645 batostr(&bt_sk(sk)->dst),
5646 c->state, __le16_to_cpu(c->psm),
5647 c->scid, c->dcid, c->imtu, c->omtu,
5648 c->sec_level, c->mode);
5649 }
5650
5651 read_unlock(&chan_list_lock);
5652
5653 return 0;
5654 }
5655
5656 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5657 {
5658 return single_open(file, l2cap_debugfs_show, inode->i_private);
5659 }
5660
5661 static const struct file_operations l2cap_debugfs_fops = {
5662 .open = l2cap_debugfs_open,
5663 .read = seq_read,
5664 .llseek = seq_lseek,
5665 .release = single_release,
5666 };
5667
5668 static struct dentry *l2cap_debugfs;
5669
5670 int __init l2cap_init(void)
5671 {
5672 int err;
5673
5674 err = l2cap_init_sockets();
5675 if (err < 0)
5676 return err;
5677
5678 if (bt_debugfs) {
5679 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5680 bt_debugfs, NULL, &l2cap_debugfs_fops);
5681 if (!l2cap_debugfs)
5682 BT_ERR("Failed to create L2CAP debug file");
5683 }
5684
5685 return 0;
5686 }
5687
5688 void l2cap_exit(void)
5689 {
5690 debugfs_remove(l2cap_debugfs);
5691 l2cap_cleanup_sockets();
5692 }
5693
5694 module_param(disable_ertm, bool, 0644);
5695 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");